From 6479bd411c09ff5e424a76957816402101365a8f Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Wed, 18 Feb 2015 19:25:26 +0000 Subject: [PATCH 0001/1471] colorized gcc output --- build | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/build b/build index 191b2855bd..104f29463d 100755 --- a/build +++ b/build @@ -6,6 +6,32 @@ import platform, sys, os, time, threading, subprocess, copy, codecs, glob, atexi config_file = 'config.default' +bcolors = { + "HEADER" : '\033[95m', + "BLUE" : '\033[94m', + "candidate" : '\033[94m', + "GREEN" : '\033[92m', + "Linking" : '\033[92m', + "In function" : '\033[92m', + "WARNING" : '\033[93m', + "Warning" : '\033[93m', + "warning" : '\033[93m', + "required from" : '\033[93m', + "In instantiation of" : '\033[93m', + "In member" : '\033[93m', + "ERROR" : '\033[91m', + "error" : '\033[91m', + "failed" : '\033[91m', + "ENDC" : '\033[0m', + "BOLD" : '\033[1m', + "note" : '\033[4m', + "UNDERLINE" : '\033[4m'} + +def colorize(s): + for st in bcolors.keys(): + s = (bcolors[st]+st+bcolors["ENDC"]).join(s.split(st)) + return s + system = None dependencies = False verbose = False @@ -30,9 +56,9 @@ def pipe_errors_to_less_handler(): [ fid, name ] = tempfile.mkstemp() try: fid = os.fdopen (fid, 'wb') - fid.write (error_stream.encode ('utf-8', 'ignore')) + fid.write (colorize(error_stream.encode ('utf-8', 'ignore'))) fid.close() - os.system ("less " + name) + os.system ("less -R " + name) except Exception as e: sys.stderr.write (str (e)) os.unlink (name) From 534e879c591495a2c423e25a606ce55f918b6691 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Mon, 2 Mar 2015 18:01:50 +0000 Subject: [PATCH 0002/1471] batch parameter overlay.colourmap added to mrview --- cmd/mrview.cpp | 1 + src/gui/mrview/tool/overlay.cpp | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/cmd/mrview.cpp b/cmd/mrview.cpp index 402636e576..89ea12f142 100644 --- a/cmd/mrview.cpp +++ b/cmd/mrview.cpp @@ -43,6 +43,7 @@ void usage () + "exit\n quit MRView." + "overlay.load path\n Loads the specified image on the overlay tool." + "overlay.opacity value\n Sets the overlay opacity to floating value [0-1]." + + "overlay.colourmap index\n Sets the colourmap of the overlay as indexed in the colourmap dropdown menu." + "tractography.load path\n Load the specified tracks file into the tractography tool" + "capture.folder path\n Set the output folder for the screen capture tool" + "capture.prefix path\n Set the output file prefix for the screen capture tool" diff --git a/src/gui/mrview/tool/overlay.cpp b/src/gui/mrview/tool/overlay.cpp index d37d7515ee..abf1cef0b8 100644 --- a/src/gui/mrview/tool/overlay.cpp +++ b/src/gui/mrview/tool/overlay.cpp @@ -511,6 +511,18 @@ namespace MR return true; } + else if (cmd == "overlay.colourmap") { + try { + int n = to (args); + if (n < 0 || !ColourMap::maps[n].name) + throw Exception ("invalid overlay colourmap index \"" + args + "\" requested in batch command"); + colourmap_combobox->setCurrentIndex (n); + colourmap_changed(n); + } + catch (Exception& e) { e.display(); } + return true; + } + return false; } From 9566e4ba8a6e43969dee5af9069f8d17242e91d7 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Tue, 3 Mar 2015 12:26:00 +0000 Subject: [PATCH 0003/1471] format par added to image types --- lib/image/format/list.cpp | 3 ++ lib/image/format/list.h | 1 + lib/image/format/par.cpp | 83 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+) create mode 100644 lib/image/format/par.cpp diff --git a/lib/image/format/list.cpp b/lib/image/format/list.cpp index 45febdcbd7..14378e82c1 100644 --- a/lib/image/format/list.cpp +++ b/lib/image/format/list.cpp @@ -38,6 +38,7 @@ namespace MR Format::MRtrix mrtrix_handler; Format::MRtrix_GZ mrtrix_gz_handler; Format::MRI mri_handler; + Format::PAR par_handler; Format::NIfTI nifti_handler; Format::NIfTI_GZ nifti_gz_handler; Format::Analyse analyse_handler; @@ -60,6 +61,7 @@ namespace MR &nifti_gz_handler, &analyse_handler, &mri_handler, + &par_handler, &xds_handler, &mgh_handler, &mgz_handler, @@ -79,6 +81,7 @@ namespace MR ".bfloat", ".bshort", ".mri", + ".par", ".mgh", ".mgz", ".mgh.gz", diff --git a/lib/image/format/list.h b/lib/image/format/list.h index 3ec13274df..73c5e58208 100644 --- a/lib/image/format/list.h +++ b/lib/image/format/list.h @@ -108,6 +108,7 @@ namespace MR DECLARE_IMAGEFORMAT (NIfTI, "NIfTI-1.1"); DECLARE_IMAGEFORMAT (NIfTI_GZ, "NIfTI-1.1 (GZip compressed)"); DECLARE_IMAGEFORMAT (Analyse, "AnalyseAVW / NIfTI-1.1"); + DECLARE_IMAGEFORMAT (PAR, "Philips PAR/REG"); DECLARE_IMAGEFORMAT (MRI, "MRTools (legacy format)"); DECLARE_IMAGEFORMAT (XDS, "XDS"); DECLARE_IMAGEFORMAT (MGH, "MGH"); diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp new file mode 100644 index 0000000000..c6b0f2c457 --- /dev/null +++ b/lib/image/format/par.cpp @@ -0,0 +1,83 @@ +/* + Copyright 2008 Brain Research Institute, Melbourne, Australia + + Written by Maximilian Pietsch, 03/03/15. + + This file is part of MRtrix. + + MRtrix is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + MRtrix is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with MRtrix. If not, see . + +*/ + + +#include "file/config.h" +#include "file/ofstream.h" +#include "file/path.h" +#include "file/utils.h" +#include "file/mmap.h" +#include "image/utils.h" +#include "image/format/list.h" +#include "image/header.h" +#include "image/handler/default.h" +#include "get_set.h" + + + +namespace MR +{ + namespace Image + { + namespace Format + { + + RefPtr PAR::read (Header& H) const + { + if (!Path::has_suffix (H.name(), ".PAR")){ + return RefPtr(); + } + + File::MMap fmap (H.name()); + + // if (memcmp (fmap.address(), "PAR#", 4)) + // throw Exception ("file \"" + H.name() + "\" is not in PAR format (unrecognised magic number)"); + + size_t data_offset = 0; + + if (!data_offset) + throw Exception ("no data field found in PAR image \"" + H.name() + "\""); + + RefPtr handler (new Handler::Default (H)); + handler->files.push_back (File::Entry (H.name(), data_offset)); + + return handler; + } + + + bool PAR::check (Header& H, size_t num_axes) const + { + return false; + } + + RefPtr PAR::create (Header& H) const + { + assert (0); + return RefPtr(); + } + + } + } +} + + + From b14bd66f6824d14aed862c2fb40711a6bb68e8ca Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Tue, 3 Mar 2015 13:28:55 +0000 Subject: [PATCH 0004/1471] lower and uppercase file extension for .par --- lib/image/format/par.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index c6b0f2c457..37bbbce91c 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -43,7 +43,7 @@ namespace MR RefPtr PAR::read (Header& H) const { - if (!Path::has_suffix (H.name(), ".PAR")){ + if (!Path::has_suffix (H.name(), ".PAR") && !Path::has_suffix (H.name(), ".par")){ return RefPtr(); } From dbc6e6982582181465e0d6a1cbff4f7bb0af1b0c Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Tue, 3 Mar 2015 20:15:46 +0000 Subject: [PATCH 0005/1471] added par_utils for header parsing --- lib/file/par_utils.cpp | 149 +++++++++++++++++++++++++++++++++++++++ lib/file/par_utils.h | 95 +++++++++++++++++++++++++ lib/image/format/par.cpp | 29 ++++++-- 3 files changed, 267 insertions(+), 6 deletions(-) create mode 100644 lib/file/par_utils.cpp create mode 100644 lib/file/par_utils.h diff --git a/lib/file/par_utils.cpp b/lib/file/par_utils.cpp new file mode 100644 index 0000000000..d60ff6272b --- /dev/null +++ b/lib/file/par_utils.cpp @@ -0,0 +1,149 @@ +/* + Copyright 2009 Brain Research Institute, Melbourne, Australia + + Written by Maximilian Pietsch, 03/03/15. + + This file is part of MRtrix. + + MRtrix is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + MRtrix is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with MRtrix. If not, see . + +*/ + +// #include "image/stride.h" +// #include "get_set.h" +// #include "file/config.h" +#include "file/par_utils.h" +// #include "math/LU.h" +// #include "math/permutation.h" +// #include "math/versor.h" +// #include "image/header.h" + +namespace MR +{ + namespace File + { + namespace PAR + { + std::string KeyValue::trim(std::string const& str) + { + if (str.empty()) + return str; + if (str[0] == '.') + return trim(str.substr(1, str.find_last_not_of(' '))); + std::string whitespaces (" \t\f\v\n\r"); + size_t first = str.find_first_not_of(whitespaces); + size_t last = str.find_last_not_of(whitespaces); + return str.substr(first, last-first+1); + } + + + void KeyValue::open (const std::string& file, const char* first_line) + { + filename.clear(); + DEBUG ("reading key/value file \"" + file + "\"..."); + + in.open (file.c_str(), std::ios::in | std::ios::binary); + if (!in) + throw Exception ("failed to open key/value file \"" + file + "\": " + strerror (errno)); + if (first_line) { + std::string sbuf; + getline (in, sbuf); + if (sbuf.compare (0, strlen (first_line), first_line)) { + in.close(); + throw Exception ("invalid first line for key/value file \"" + file + "\" (expected \"" + first_line + "\")"); + } + } + filename = file; + } + + bool KeyValue::next_general () + { + while (in.good() && general_information ) { + std::string sbuf; + getline (in, sbuf); + if (in.bad()) + throw Exception ("error reading PAR file \"" + filename + "\": " + strerror (errno)); + + if (sbuf.find("IMAGE INFORMATION") != std::string::npos){ + DEBUG ("general_information = false: after line (\"" + sbuf + "\") in file \"" + filename + "\" - ignored"); + general_information = false; + return true; + } + if (!ver.size() && (sbuf.find("image export tool")!= std::string::npos) ){ + ver = KeyValue::trim(sbuf.substr (sbuf.find_last_of("image export tool") ) ); + DEBUG("par/rec version: " + ver); + return true; + } + + sbuf = strip (sbuf.substr (0, sbuf.find_first_of ('#'))); + if (sbuf.size()) { + size_t colon = sbuf.find_first_of (':'); + if (colon == std::string::npos) { + INFO ("malformed key/value entry (\"" + sbuf + "\") in file \"" + filename + "\" - ignored"); + return true; + } + else { + K = KeyValue::trim(strip (sbuf.substr (0, colon))); + V = KeyValue::trim(strip (sbuf.substr (colon+1))); + if (K.empty()) { + INFO ("malformed key/value entry (\"" + sbuf + "\") in file \"" + filename + "\" - ignored"); + } + else + return true; + } + } + } + return false; + } + + bool KeyValue::next_image () + { + while (in.good() && !general_information ) { + std::string sbuf; + getline (in, sbuf); + if (in.bad()) + throw Exception ("error reading PAR file \"" + filename + "\": " + strerror (errno)); + + if (sbuf.find("END OF DATA DESCRIPTION FILE") != std::string::npos) { + DEBUG ("END OF DATA DESCRIPTION FILE"); + in.setstate (std::ios::eofbit); + return false; + } + sbuf = strip (sbuf.substr (0, sbuf.find_first_of ('#'))); + if (sbuf.size()) { + K = "image"; + V = sbuf; + return true; + } + } + return false; + } + + + + // else if (sbuf.size()){ + // WARN(sbuf); + // } + + // size_t read (Image::Header& H, const par_header& NH) { + // bool is_BE = false; + // size_t data_offset; // = (size_t) get (&NH.vox_offset, is_BE); + // return data_offset; + // } + + } + } +} + + diff --git a/lib/file/par_utils.h b/lib/file/par_utils.h new file mode 100644 index 0000000000..6ee9703091 --- /dev/null +++ b/lib/file/par_utils.h @@ -0,0 +1,95 @@ +/* + Copyright 2009 Brain Research Institute, Melbourne, Australia + + Written by Maximilian Pietsch, 03/03/15. + + This file is part of MRtrix. + + MRtrix is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + MRtrix is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with MRtrix. If not, see . + +*/ + +#ifndef __file_par_utils_h__ +#define __file_par_utils_h__ + +#include "file/par.h" +#include "math/matrix.h" + +#include +#include "mrtrix.h" + +namespace MR +{ + namespace Image + { + class Header; + } + namespace File + { + namespace PAR + { + + class KeyValue + { + public: + KeyValue () { } + KeyValue (const std::string& file, const char* first_line = NULL) { + open (file, first_line); + } + + void open (const std::string& file, const char* first_line = NULL); + bool next_general (); + bool next_image (); + void close () { + in.close(); + } + + const std::string& version() const throw () { + return (ver); + } + const std::string& key () const throw () { + return (K); + } + const std::string& value () const throw () { + return (V); + } + const std::string& name () const throw () { + return (filename); + } + const bool& is_general () const throw () { + return (general_information); + } + + private: + std::string trim(std::string const& str); + bool general_information = true; + + protected: + std::string K, V, filename, ver; + std::ifstream in; + }; + + // Math::Matrix adjust_transform (const Image::Header& H, std::vector& order); + + // void check (Image::Header& H, bool single_file); + // size_t read (Image::Header& H, const par_header& PH); + // void check (Image::Header& H, bool single_file); + // void write (par_header& PH, const Image::Header& H, bool single_file); + + } + } +} + +#endif + diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 37bbbce91c..4154514ce6 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -31,7 +31,11 @@ #include "image/header.h" #include "image/handler/default.h" #include "get_set.h" + +#include "file/par_utils.h" +#include "image/format/mrtrix_utils.h" +// #include "file/key_value.h" namespace MR @@ -40,6 +44,7 @@ namespace MR { namespace Format { + // File::MMap fmap (H.name().substr (0, H.name().size()-4) + ".REC"); RefPtr PAR::read (Header& H) const { @@ -47,15 +52,27 @@ namespace MR return RefPtr(); } - File::MMap fmap (H.name()); + MR::File::PAR::KeyValue kv (H.name()); - // if (memcmp (fmap.address(), "PAR#", 4)) - // throw Exception ("file \"" + H.name() + "\" is not in PAR format (unrecognised magic number)"); + while (kv.next_general()){ + DEBUG(kv.key() + ":\t" + kv.value()); + } - size_t data_offset = 0; + if (kv.version() != "V4.2"){ + WARN("par/rec version " + kv.version() + " not supported"); + return RefPtr(); + } + INFO("par/rec version: " + kv.version()); - if (!data_offset) - throw Exception ("no data field found in PAR image \"" + H.name() + "\""); + while (kv.next_image()){ + DEBUG(kv.key() + ":\t" + kv.value()); + } + + File::MMap fmap (H.name()); + std::cerr << fmap << std::endl; + size_t data_offset = 0; // File::PAR::read (H, * ( (const par_header*) fmap.address())); + // size_t data_offset = 0; + throw Exception ("par/rec not yet implemented... \"" + H.name() + "\""); RefPtr handler (new Handler::Default (H)); handler->files.push_back (File::Entry (H.name(), data_offset)); From 9262d3ea1fd5a4e9594a2dc14cdbbc0b227ef315 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Tue, 3 Mar 2015 20:35:48 +0000 Subject: [PATCH 0006/1471] version check --- lib/file/par_utils.cpp | 4 ---- lib/file/par_utils.h | 9 ++++++++- lib/image/format/par.cpp | 3 ++- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/lib/file/par_utils.cpp b/lib/file/par_utils.cpp index d60ff6272b..414ef4b8c9 100644 --- a/lib/file/par_utils.cpp +++ b/lib/file/par_utils.cpp @@ -132,10 +132,6 @@ namespace MR - // else if (sbuf.size()){ - // WARN(sbuf); - // } - // size_t read (Image::Header& H, const par_header& NH) { // bool is_BE = false; // size_t data_offset; // = (size_t) get (&NH.vox_offset, is_BE); diff --git a/lib/file/par_utils.h b/lib/file/par_utils.h index 6ee9703091..9a268331b0 100644 --- a/lib/file/par_utils.h +++ b/lib/file/par_utils.h @@ -67,9 +67,12 @@ namespace MR const std::string& name () const throw () { return (filename); } - const bool& is_general () const throw () { + const bool is_general () const throw () { return (general_information); } + const bool valid_version() const throw () { + return (std::find(understood_versions.begin(), understood_versions.end(), ver) != understood_versions.end()); + } private: std::string trim(std::string const& str); @@ -78,6 +81,10 @@ namespace MR protected: std::string K, V, filename, ver; std::ifstream in; + const std::vector understood_versions { + "V4", + "V4.1", + "V4.2"}; }; // Math::Matrix adjust_transform (const Image::Header& H, std::vector& order); diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 4154514ce6..3873b22d93 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -58,7 +58,8 @@ namespace MR DEBUG(kv.key() + ":\t" + kv.value()); } - if (kv.version() != "V4.2"){ + // if (kv.version() != "V4.2"){ + if (!kv.valid_version()){ WARN("par/rec version " + kv.version() + " not supported"); return RefPtr(); } From 50cd16a93c89b23a82b8b6c7219e386173494d5b Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Wed, 4 Mar 2015 20:06:13 +0000 Subject: [PATCH 0007/1471] parsing complete header --- lib/file/par_utils.cpp | 50 +++++++++++++++++---- lib/file/par_utils.h | 21 ++++++--- lib/image/format/par.cpp | 96 +++++++++++++++++++++++++++++++++++----- 3 files changed, 143 insertions(+), 24 deletions(-) diff --git a/lib/file/par_utils.cpp b/lib/file/par_utils.cpp index 414ef4b8c9..5ed275c525 100644 --- a/lib/file/par_utils.cpp +++ b/lib/file/par_utils.cpp @@ -35,13 +35,15 @@ namespace MR { namespace PAR { - std::string KeyValue::trim(std::string const& str) + + std::string KeyValue::trim(std::string const& str, char leading_char) { if (str.empty()) return str; - if (str[0] == '.') - return trim(str.substr(1, str.find_last_not_of(' '))); std::string whitespaces (" \t\f\v\n\r"); + if (str[0] == leading_char){ + return trim(str.substr(1, str.find_last_not_of(whitespaces))); + } size_t first = str.find_first_not_of(whitespaces); size_t last = str.find_last_not_of(whitespaces); return str.substr(first, last-first+1); @@ -76,14 +78,12 @@ namespace MR throw Exception ("error reading PAR file \"" + filename + "\": " + strerror (errno)); if (sbuf.find("IMAGE INFORMATION") != std::string::npos){ - DEBUG ("general_information = false: after line (\"" + sbuf + "\") in file \"" + filename + "\" - ignored"); general_information = false; - return true; + return false; } if (!ver.size() && (sbuf.find("image export tool")!= std::string::npos) ){ ver = KeyValue::trim(sbuf.substr (sbuf.find_last_of("image export tool") ) ); - DEBUG("par/rec version: " + ver); - return true; + continue; } sbuf = strip (sbuf.substr (0, sbuf.find_first_of ('#'))); @@ -107,9 +107,41 @@ namespace MR return false; } - bool KeyValue::next_image () + bool KeyValue::next_image_information () { - while (in.good() && !general_information ) { + if (general_information) + return false; + + while (in.good()){ + std::string sbuf; + getline (in, sbuf); + + if (sbuf.find("IMAGE INFORMATION") != std::string::npos) + return false; + + sbuf = KeyValue::trim(sbuf,'#'); + + size_t l_bracket = sbuf.find_last_of("("); + size_t r_bracket = sbuf.find_last_of(")"); + if (!sbuf.size() || r_bracket== std::string::npos || l_bracket== std::string::npos || r_bracket != sbuf.size()-1) + continue; + if (l_bracket-r_bracket == 0){ + INFO("malformed key/value entry(\"" + sbuf + "\") in file \"" + filename + "\" - ignored"); + continue; + } + K = KeyValue::trim( sbuf.substr(0,l_bracket-1)); + V = KeyValue::trim( sbuf.substr(l_bracket+1,r_bracket-1-l_bracket)); + return true; + } + return false; + } + + bool KeyValue::next_image () + { + if (general_information) + return false; + + while (in.good()) { std::string sbuf; getline (in, sbuf); if (in.bad()) diff --git a/lib/file/par_utils.h b/lib/file/par_utils.h index 9a268331b0..3dc12a58ae 100644 --- a/lib/file/par_utils.h +++ b/lib/file/par_utils.h @@ -29,6 +29,12 @@ #include #include "mrtrix.h" +#include +#include +#include + +// #include + namespace MR { namespace Image @@ -38,22 +44,24 @@ namespace MR namespace File { namespace PAR - { - + { class KeyValue { + // KeyValue: use KeyValue.next_general() to extract general information followed by KeyValue.next_image() for image lines public: KeyValue () { } - KeyValue (const std::string& file, const char* first_line = NULL) { + KeyValue (const std::string& file, const char* first_line = nullptr) { open (file, first_line); } - void open (const std::string& file, const char* first_line = NULL); + void open (const std::string& file, const char* first_line = nullptr); bool next_general (); bool next_image (); + bool next_image_information (); void close () { in.close(); } + // std::vector split_image_line(const std::string&); const std::string& version() const throw () { return (ver); @@ -75,7 +83,8 @@ namespace MR } private: - std::string trim(std::string const& str); + std::string trim(std::string const& str, char leading_char = '.'); + bool general_information = true; protected: @@ -94,6 +103,8 @@ namespace MR // void check (Image::Header& H, bool single_file); // void write (par_header& PH, const Image::Header& H, bool single_file); + + } } } diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 3873b22d93..d5c187f8ad 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -35,7 +35,12 @@ #include "file/par_utils.h" #include "image/format/mrtrix_utils.h" -// #include "file/key_value.h" +#include + +#include +#include +// #include + namespace MR @@ -44,30 +49,101 @@ namespace MR { namespace Format { - // File::MMap fmap (H.name().substr (0, H.name().size()-4) + ".REC"); + template + std::vector split_image_line(const std::string& line) { + std::istringstream is(line); + return std::vector(std::istream_iterator(is), std::istream_iterator()); + } + + typedef double ComputeType; + + // typedef std::vector::const_iterator Input_iterator; + typedef std::map ParHeader; + + typedef std::tuple ParCol; + typedef std::map ParImageInfo; + typedef std::map> ParImages; + // TOOD ParImages: use different types? + // TOOD ParImages: use array? RefPtr PAR::read (Header& H) const { if (!Path::has_suffix (H.name(), ".PAR") && !Path::has_suffix (H.name(), ".par")){ return RefPtr(); } - MR::File::PAR::KeyValue kv (H.name()); - + + ParHeader PH; // General information + ParImageInfo image_info; // image column info + ParImages images; // columns + while (kv.next_general()){ - DEBUG(kv.key() + ":\t" + kv.value()); + std::pair res = PH.insert(std::make_pair(kv.key(), kv.value())); + if ( ! res.second ) + WARN("ParHeader key " + kv.key() + " defined multiple times. Using: " + (res.first)->second); + } + PH.insert(std::make_pair("version", kv.version())); + + for (auto& item: PH) { + DEBUG(item.first + ":" + item.second); } - // if (kv.version() != "V4.2"){ if (!kv.valid_version()){ - WARN("par/rec version " + kv.version() + " not supported"); - return RefPtr(); + WARN("par/rec file " + H.name() + " claims to be of version '" + + kv.version() + "' which is not supported. You've got to ask yourself one question: Do I feel lucky?"); + } + + size_t cnt=0; + while (kv.next_image_information ()){ + int extent = 1; + std::string type = kv.value(); + { + size_t star = kv.value().find_first_of ('*'); + if (star != std::string::npos){ + extent = std::stoi(kv.value().substr(0, star)) ; + type = kv.value().substr(star+1); + } + } + image_info.insert(std::make_pair(kv.key(), std::make_tuple(cnt, cnt+extent, type))); + cnt++; + } + // image_info.insert(std::make_pair("volume number", std::make_tuple(-1, -1, "integer"))); + DEBUG("image column info:"); + for (auto& item: image_info) { + DEBUG(item.first + ": columns " + + str(std::get<0>(item.second)) + "-" + + str(std::get<1>(item.second)-1) + " " + + std::get<2>(item.second)); } - INFO("par/rec version: " + kv.version()); + std::vector vec; + std::map image_number_counter; while (kv.next_image()){ - DEBUG(kv.key() + ":\t" + kv.value()); + vec = split_image_line(kv.value()); + image_number_counter[vec[0]]++; + + for (auto& item: image_info) { + size_t start_col = std::get<0>(item.second); + size_t stop_col = std::get<1>(item.second); + // TODO how to handle multiple rows in a nicer way? + std::string s; + std::for_each(vec.begin()+start_col, vec.begin()+stop_col, [&](const std::string &piece){ s += piece; s += " "; }); + images[item.first].push_back(s); + } + // calculate volume numbers inferred from slice number + // std::string volume_number = std::count (images["slice number"].begin(), images["slice number"].end(), vec[0]); + images["volume number"].push_back(str(image_number_counter[vec[0]])); } + // TODO check slice_numbers against "Max. number of slices/locations" ... see _truncation_checks + + // for (auto& item: images) + // DEBUG(item.first + ": " +str(item.second)); + + // convert strVec to float: + // std::vector flVect(strVect.size()); + // std::transform(strVect.begin(), strVect.end(), flVect.begin(), + // [](const std::string &arg) { return std::stof(arg); }); + File::MMap fmap (H.name()); std::cerr << fmap << std::endl; From 175be744241f705ab771fa22e74130b8baa0dc2f Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Wed, 4 Mar 2015 20:24:38 +0000 Subject: [PATCH 0008/1471] info --- lib/image/format/par.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index d5c187f8ad..700e2a71db 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -144,6 +144,18 @@ namespace MR // std::transform(strVect.begin(), strVect.end(), flVect.begin(), // [](const std::string &arg) { return std::stof(arg); }); + INFO("Patient position: " + PH["Patient position"]); + INFO("Preparation direction: " + PH["Preparation direction"]); + INFO("FOV (ap,fh,rl) [mm]: " + PH["FOV (ap,fh,rl) [mm]"]); + + + // NIBABEL: + // "It seems that everyone agrees that Philips stores REC data in little-endian + // format - see https://github.com/nipy/nibabel/issues/274 + + // Philips XML header files, and some previous experience, suggest that the REC + // data is always stored as 8 or 16 bit unsigned integers - see + // https://github.com/nipy/nibabel/issues/275" File::MMap fmap (H.name()); std::cerr << fmap << std::endl; From 1e3dd32e43794e76be9e23890d24157e5ca1cbab Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Sat, 7 Mar 2015 20:24:27 +0000 Subject: [PATCH 0009/1471] volume slicing uid --- lib/image/format/par.cpp | 140 +++++++++++++++++++++++++++++++-------- 1 file changed, 111 insertions(+), 29 deletions(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 700e2a71db..a73e6a08bf 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -33,6 +33,7 @@ #include "get_set.h" #include "file/par_utils.h" +#include "file/par.h" // not used yet #include "image/format/mrtrix_utils.h" #include @@ -49,6 +50,13 @@ namespace MR { namespace Format { + + + // if (map.count(key) == 1) + // return map.at(key); + // else + // return nullptr; + template std::vector split_image_line(const std::string& line) { std::istringstream is(line); @@ -82,17 +90,11 @@ namespace MR if ( ! res.second ) WARN("ParHeader key " + kv.key() + " defined multiple times. Using: " + (res.first)->second); } - PH.insert(std::make_pair("version", kv.version())); for (auto& item: PH) { DEBUG(item.first + ":" + item.second); } - if (!kv.valid_version()){ - WARN("par/rec file " + H.name() + " claims to be of version '" + - kv.version() + "' which is not supported. You've got to ask yourself one question: Do I feel lucky?"); - } - size_t cnt=0; while (kv.next_image_information ()){ int extent = 1; @@ -105,49 +107,123 @@ namespace MR } } image_info.insert(std::make_pair(kv.key(), std::make_tuple(cnt, cnt+extent, type))); - cnt++; + cnt+=extent; } - // image_info.insert(std::make_pair("volume number", std::make_tuple(-1, -1, "integer"))); - DEBUG("image column info:"); - for (auto& item: image_info) { - DEBUG(item.first + ": columns " + - str(std::get<0>(item.second)) + "-" + - str(std::get<1>(item.second)-1) + " " + - std::get<2>(item.second)); + + // check version + { + if (!kv.valid_version()) + WARN("par/rec file " + H.name() + " claims to be of version '" + + kv.version() + "' which is not supported. You've got to ask yourself one question: Do I feel lucky?"); + size_t number_of_columns=0; + for (auto& item:image_info) + number_of_columns = std::max(number_of_columns,std::get<1>(item.second)); + std::string version; + if (number_of_columns <= 41) + version = "V4"; + else if (number_of_columns <= 48) + version = "V4.1"; + else + version = "V4.2"; + if (kv.version() != version) + WARN("number of columns in " + H.name() + " does not match version number: " + kv.version() + " (" + version + ")"); } + PH.insert(std::make_pair("version", kv.version())); + + // define what information we need for unique identifier + std::vector vUID; + if (std::stoi(PH["Max. number of echoes"]) > 1) + vUID.push_back("echo number"); + if (std::stoi(PH["Max. number of slices/locations"]) > 1) + vUID.push_back("slice number"); + if (std::stoi(PH["Max. number of cardiac phases"]) > 1) + vUID.push_back("cardiac phase number"); + if (std::stoi(PH["Max. number of dynamics"]) > 1) + vUID.push_back("dynamic scan number"); + // 4.1 + if ((kv.version()=="V4.1" || kv.version()=="V4.2") && (std::stoi(PH["Max. number of diffusion values"]) > 1)) + vUID.push_back("gradient orientation number (imagekey!)"); + if ((kv.version()=="V4.1" || kv.version()=="V4.2") && (std::stoi(PH["Max. number of diffusion values"]) > 1)) + vUID.push_back("diffusion b value number (imagekey!)"); + // 4.2 + if (kv.version()=="V4.2" && (std::stoi(PH["Number of label types <0=no ASL>"]) > 1)) + vUID.push_back("label type (ASL) (imagekey!)"); + vUID.push_back("image_type_mr"); + + if (vUID.size()>1) + WARN("Multiple volumes in file " + H.name() + ". Uid category indices required."); + + // parse image information and save it in images + std::map uid_tester; std::vector vec; - std::map image_number_counter; + std::string uid; // minimum unique identifier for each slice + std::string uid_cat; + std::for_each(vUID.begin(), vUID.end(), [&](const std::string &piece){ uid_cat += piece; uid_cat += ";"; }); + uid_cat.pop_back(); + std::vector > uid_indices; while (kv.next_image()){ vec = split_image_line(kv.value()); - image_number_counter[vec[0]]++; for (auto& item: image_info) { + // stop_col - start_col >1 --> item spans multiple columns size_t start_col = std::get<0>(item.second); size_t stop_col = std::get<1>(item.second); - // TODO how to handle multiple rows in a nicer way? std::string s; std::for_each(vec.begin()+start_col, vec.begin()+stop_col, [&](const std::string &piece){ s += piece; s += " "; }); - images[item.first].push_back(s); + images[item.first].push_back(s.substr(0, s.size()-1)); + } + uid.clear(); + for (auto& k : vUID) + uid += images[k].back() + " "; + uid.pop_back(); + INFO(uid); + ++uid_tester[uid]; + if (uid_tester[uid] > 1){ + WARN("uid not unique: " + uid_cat + ": " + uid); } - // calculate volume numbers inferred from slice number - // std::string volume_number = std::count (images["slice number"].begin(), images["slice number"].end(), vec[0]); - images["volume number"].push_back(str(image_number_counter[vec[0]])); + uid_indices.push_back(split_image_line(uid)); } - // TODO check slice_numbers against "Max. number of slices/locations" ... see _truncation_checks + + INFO("uid categories: " + uid_cat); + + + // TODO: user defined volume slicing via uid + + // + // #include "math/matrix.h" + // Math::Matrix mat (nrows, vUID.size()); + + // TODO truncation_checks: check slice_numbers against "Max. number of slices/locations" + // TODO dynamics with arbitrary start? + // bool sorted_by_slice = false; - // for (auto& item: images) - // DEBUG(item.first + ": " +str(item.second)); // convert strVec to float: // std::vector flVect(strVect.size()); // std::transform(strVect.begin(), strVect.end(), flVect.begin(), // [](const std::string &arg) { return std::stof(arg); }); - INFO("Patient position: " + PH["Patient position"]); - INFO("Preparation direction: " + PH["Preparation direction"]); - INFO("FOV (ap,fh,rl) [mm]: " + PH["FOV (ap,fh,rl) [mm]"]); - + // show some info about the data + { + std::vector v = {"Patient position", + "Preparation direction", + "FOV (ap,fh,rl) [mm]", + "Technique", + "Protocol name", + "Dynamic scan <0=no 1=yes> ?", + "Diffusion <0=no 1=yes> ?"}; + auto it = std::max_element(v.begin(), v.end(), [](const std::string& x, const std::string& y) { return x.size() < y.size(); }); + size_t padding = (*it).size(); + for (auto& k : v){ + if (PH.find(k) != PH.end()){ + INFO(k.insert(k.size(), padding - k.size(), ' ') + ": " + PH[k]); + } + else{ + WARN("PAR header lacks '" + k +"' field." ); + } + } + } // NIBABEL: // "It seems that everyone agrees that Philips stores REC data in little-endian @@ -157,8 +233,14 @@ namespace MR // data is always stored as 8 or 16 bit unsigned integers - see // https://github.com/nipy/nibabel/issues/275" + + // PV = pixel value in REC file, FP = floating point value, DV = displayed value on console + // RS = rescale slope, RI = rescale intercept, SS = scale slope + // DV = PV * RS + RI FP = DV / (RS * SS) + + File::MMap fmap (H.name()); - std::cerr << fmap << std::endl; + // std::cerr << fmap << std::endl; size_t data_offset = 0; // File::PAR::read (H, * ( (const par_header*) fmap.address())); // size_t data_offset = 0; throw Exception ("par/rec not yet implemented... \"" + H.name() + "\""); From d38dd09b68724d1870e1e20b057208c9e4454288 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Sat, 7 Mar 2015 21:27:33 +0000 Subject: [PATCH 0010/1471] volume slicing uid 2 --- lib/image/format/par.cpp | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index a73e6a08bf..e11f728250 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -39,6 +39,7 @@ #include #include +#include #include // #include @@ -56,6 +57,17 @@ namespace MR // return map.at(key); // else // return nullptr; + template + std::set get_matching_indices(const std::vector& v, const T& criterion){ + std::set indices; + auto it = std::find_if(std::begin(v), std::end(v), [&](T i){return i == criterion;}); + while (it != std::end(v)) { + // indices.emplace_back(std::distance(std::begin(v), it)); + indices.insert(std::distance(std::begin(v), it)); + it = std::find_if(std::next(it), std::end(v), [&](T i){return i == criterion;}); + } + return indices; + } template std::vector split_image_line(const std::string& line) { @@ -161,6 +173,7 @@ namespace MR std::string uid_cat; std::for_each(vUID.begin(), vUID.end(), [&](const std::string &piece){ uid_cat += piece; uid_cat += ";"; }); uid_cat.pop_back(); + INFO("uid categories: " + uid_cat); std::vector > uid_indices; while (kv.next_image()){ vec = split_image_line(kv.value()); @@ -177,22 +190,26 @@ namespace MR for (auto& k : vUID) uid += images[k].back() + " "; uid.pop_back(); - INFO(uid); + INFO("uid: " + uid); ++uid_tester[uid]; if (uid_tester[uid] > 1){ WARN("uid not unique: " + uid_cat + ": " + uid); } uid_indices.push_back(split_image_line(uid)); } - INFO("uid categories: " + uid_cat); - // TODO: user defined volume slicing via uid - - // - // #include "math/matrix.h" - // Math::Matrix mat (nrows, vUID.size()); + std::set a = get_matching_indices(images[vUID[2]],std::string(images[vUID[2]][0])); + std::set b = get_matching_indices(images[vUID[1]],std::string(images[vUID[1]][0])); + + std::set uni; + std::set_intersection (a.begin(), a.end(), + b.begin(), b.end(), + std::inserter(uni, uni.begin())); + std::cerr << vUID[2] + "=" + images[vUID[2]][0] + ", " + vUID[1] + "=" + images[vUID[1]][0] + " has indices "; + std::copy(uni.begin(), uni.end(), std::ostream_iterator(std::cerr, " ")); + std::cerr << std::endl; // TODO truncation_checks: check slice_numbers against "Max. number of slices/locations" // TODO dynamics with arbitrary start? From 3924a8f4020315e5311235e6bd4614aad567af51 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Mon, 9 Mar 2015 17:23:22 +0000 Subject: [PATCH 0011/1471] data blocks --- lib/image/format/par.cpp | 171 ++++++++++++++++++++++++++++++--------- 1 file changed, 132 insertions(+), 39 deletions(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index e11f728250..941b5c855e 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -41,6 +41,8 @@ #include #include #include +#include +// #include // #include @@ -51,7 +53,20 @@ namespace MR { namespace Format { - + // template + // T intersect(T first, Args... args) { + // return first + intersect(args...); + // } + // std::set a = get_matching_indices(images[vUID[2]],std::string(images[vUID[2]][0])); + // std::set b = get_matching_indices(images[vUID[1]],std::string(images[vUID[1]][0])); + + // std::set uni; + // std::set_intersection (a.begin(), a.end(), + // b.begin(), b.end(), + // std::inserter(uni, uni.begin())); + // std::cerr << vUID[2] + "=" + images[vUID[2]][0] + ", " + vUID[1] + "=" + images[vUID[1]][0] + " has indices "; + // std::copy(uni.begin(), uni.end(), std::ostream_iterator(std::cerr, " ")); + // std::cerr << std::endl; // if (map.count(key) == 1) // return map.at(key); @@ -86,16 +101,20 @@ namespace MR // TOOD ParImages: use different types? // TOOD ParImages: use array? + // todo check file existence RefPtr PAR::read (Header& H) const { if (!Path::has_suffix (H.name(), ".PAR") && !Path::has_suffix (H.name(), ".par")){ return RefPtr(); } + std::string rec_file = H.name().substr(0,H.name().size()-4)+".REC"; + MR::File::PAR::KeyValue kv (H.name()); ParHeader PH; // General information ParImageInfo image_info; // image column info ParImages images; // columns + std::vector> slice_data_block_positions; while (kv.next_general()){ std::pair res = PH.insert(std::make_pair(kv.key(), kv.value())); @@ -122,6 +141,27 @@ namespace MR cnt+=extent; } + // show some info about the data + { + std::vector v = {"Patient position", + "Preparation direction", + "FOV (ap,fh,rl) [mm]", + "Technique", + "Protocol name", + "Dynamic scan <0=no 1=yes> ?", + "Diffusion <0=no 1=yes> ?"}; + auto it = std::max_element(v.begin(), v.end(), [](const std::string& x, const std::string& y) { return x.size() < y.size(); }); + size_t padding = (*it).size(); + for (auto& k : v){ + if (PH.find(k) != PH.end()){ + INFO(k.insert(k.size(), padding - k.size(), ' ') + ": " + PH[k]); + } + else{ + WARN("PAR header lacks '" + k +"' field." ); + } + } + } + // check version { if (!kv.valid_version()) @@ -173,8 +213,9 @@ namespace MR std::string uid_cat; std::for_each(vUID.begin(), vUID.end(), [&](const std::string &piece){ uid_cat += piece; uid_cat += ";"; }); uid_cat.pop_back(); - INFO("uid categories: " + uid_cat); + // INFO("uid categories: " + uid_cat); std::vector > uid_indices; + size_t slice_data_block_start=0; while (kv.next_image()){ vec = split_image_line(kv.value()); @@ -186,34 +227,96 @@ namespace MR std::for_each(vec.begin()+start_col, vec.begin()+stop_col, [&](const std::string &piece){ s += piece; s += " "; }); images[item.first].push_back(s.substr(0, s.size()-1)); } + // slice_data_block_positions + { + // image = reshape(data(1+idx.*(x.*y):(idx+1).*x.*y),x,y); + // size_t idx = std::stoi(images["index in REC file (in images)"].back()); + std::vector xy = split_image_line(images["recon resolution (x y)"].back()); + // size_t start = idx*(xy[0] *xy[1]); + // size_t stop = (idx+1)*(xy[0] *xy[1]); + size_t stop = slice_data_block_start + (xy[0] *xy[1]); + slice_data_block_positions.push_back(std::pair(slice_data_block_start,stop)); + slice_data_block_start = stop; + } + + uid.clear(); for (auto& k : vUID) uid += images[k].back() + " "; uid.pop_back(); - INFO("uid: " + uid); + // INFO("uid: " + uid); ++uid_tester[uid]; if (uid_tester[uid] > 1){ WARN("uid not unique: " + uid_cat + ": " + uid); } uid_indices.push_back(split_image_line(uid)); } + kv.close(); INFO("uid categories: " + uid_cat); - // TODO: user defined volume slicing via uid - std::set a = get_matching_indices(images[vUID[2]],std::string(images[vUID[2]][0])); - std::set b = get_matching_indices(images[vUID[1]],std::string(images[vUID[1]][0])); + // TODO separate uid code from image parsing - std::set uni; - std::set_intersection (a.begin(), a.end(), - b.begin(), b.end(), - std::inserter(uni, uni.begin())); - std::cerr << vUID[2] + "=" + images[vUID[2]][0] + ", " + vUID[1] + "=" + images[vUID[1]][0] + " has indices "; - std::copy(uni.begin(), uni.end(), std::ostream_iterator(std::cerr, " ")); - std::cerr << std::endl; + // TODO: combine type values + + // TODO: user defined volume slicing via \a categories and \a values. here we choose an arbitrary volume + std::vector chosen_slices; + if (vUID.size() == 1) { + chosen_slices.reserve(images[vUID[0]].size()); + std::iota(chosen_slices.begin(), chosen_slices.end(), 0); + } + else { + std::vector categories; // which categories in have to be unique (the remaining one can have any value) + std::vector values; // the value corresponding to categories + + // TODO: change this to user defined values + categories.insert(categories.end(), vUID.begin()+1, vUID.end()); + for (auto cat : categories) + values.push_back(images[cat][0]); + VAR(categories); + VAR(values); + + + std::vector> matching_lines_per_categ; + for (size_t i=0; i< categories.size(); i++) { + matching_lines_per_categ.push_back(std::set(get_matching_indices(images[categories[i]],values[i]))); // TODO use pointers + } + std::map intersect_counter; + for (auto& lines: matching_lines_per_categ){ + for (auto& line :lines) + intersect_counter[line] += 1; + } + + for (auto& item: intersect_counter) { + if (item.second == matching_lines_per_categ.size()) + chosen_slices.push_back(item.first); + } + + + // std::set uni; + // std::set a = get_matching_indices(images[vUID[0]],std::string(images[vUID[0]][0])); + // std::set b = get_matching_indices(images[vUID[1]],std::string(images[vUID[1]][0])); + // std::set_intersection (a.begin(), a.end(), + // b.begin(), b.end(), + // std::inserter(uni, uni.begin())); + // std::cerr << vUID[2] + "=" + images[vUID[2]][0] + ", " + vUID[1] + "=" + images[vUID[1]][0] + " has indices "; + // std::copy(uni.begin(), uni.end(), std::ostream_iterator(std::cerr, " ")); + // std::cerr << std::endl; + } + + { INFO("selected slices:"); + std::vector image_info = {"image offcentre (ap,fh,rl in mm )"}; + image_info.insert(image_info.end(),vUID.begin(),vUID.end()); + // ,"slice number","echo number","dynamic scan number","image_type_mr"}; + for (auto& slice : chosen_slices){ + std::string s; + for (auto& cat: image_info) + s += cat +": " + images[cat][slice] + "\t"; + INFO(s + " (" + str(slice_data_block_positions[slice].first) + "," +str(slice_data_block_positions[slice].second) +")"); + } + } // TODO truncation_checks: check slice_numbers against "Max. number of slices/locations" // TODO dynamics with arbitrary start? - // bool sorted_by_slice = false; // convert strVec to float: @@ -221,26 +324,6 @@ namespace MR // std::transform(strVect.begin(), strVect.end(), flVect.begin(), // [](const std::string &arg) { return std::stof(arg); }); - // show some info about the data - { - std::vector v = {"Patient position", - "Preparation direction", - "FOV (ap,fh,rl) [mm]", - "Technique", - "Protocol name", - "Dynamic scan <0=no 1=yes> ?", - "Diffusion <0=no 1=yes> ?"}; - auto it = std::max_element(v.begin(), v.end(), [](const std::string& x, const std::string& y) { return x.size() < y.size(); }); - size_t padding = (*it).size(); - for (auto& k : v){ - if (PH.find(k) != PH.end()){ - INFO(k.insert(k.size(), padding - k.size(), ' ') + ": " + PH[k]); - } - else{ - WARN("PAR header lacks '" + k +"' field." ); - } - } - } // NIBABEL: // "It seems that everyone agrees that Philips stores REC data in little-endian @@ -255,14 +338,24 @@ namespace MR // RS = rescale slope, RI = rescale intercept, SS = scale slope // DV = PV * RS + RI FP = DV / (RS * SS) - - File::MMap fmap (H.name()); + File::MMap fmap (rec_file); // std::cerr << fmap << std::endl; - size_t data_offset = 0; // File::PAR::read (H, * ( (const par_header*) fmap.address())); - // size_t data_offset = 0; - throw Exception ("par/rec not yet implemented... \"" + H.name() + "\""); + + // File::PAR::read (H, * ( (const par_header*) fmap.address())); + + // How to load the image data for non-contiguous data blocks? + // e.g. (0,4096), (36864,40960), (73728,77824), ... + // lib/image/handler/mosaic.cpp + // Mosaic::load RefPtr handler (new Handler::Default (H)); + + throw Exception ("par/rec not yet implemented... \"" + H.name() + "\""); + size_t data_offset = 0; + // for (size_t n = 0; n < chosen_slices.size(); ++n) + // handler->files.push_back (File::Entry (frames[n]->filename, frames[n]->data)) + + handler->files.push_back (File::Entry (H.name(), data_offset)); return handler; From 4ff4dffc611d4da8aa95791cbe34f95bda7bffbc Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Tue, 10 Mar 2015 13:20:36 +0000 Subject: [PATCH 0012/1471] load one volume --- lib/image/format/par.cpp | 45 +++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 941b5c855e..88c33e985a 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -254,6 +254,11 @@ namespace MR kv.close(); INFO("uid categories: " + uid_cat); + if (!std::all_of(images["recon resolution (x y)"].begin()+1,images["recon resolution (x y)"].end(), + [&](const std::string & r) {return r==images["recon resolution (x y)"].front();})) + throw Exception ("recon resolution (x y) not the same for all slices"); + + // TODO separate uid code from image parsing // TODO: combine type values @@ -274,6 +279,8 @@ namespace MR values.push_back(images[cat][0]); VAR(categories); VAR(values); + H.comments().push_back("categories:" + str(categories)); + H.comments().push_back("values:" + str(values)); std::vector> matching_lines_per_categ; @@ -291,6 +298,28 @@ namespace MR chosen_slices.push_back(item.first); } + H.set_ndim (3); + H.dim(0) = split_image_line(images["recon resolution (x y)"].back())[1]; + H.dim(1) = split_image_line(images["recon resolution (x y)"].back())[0]; + H.dim(2) = chosen_slices.size(); + H.vox(0) = 1; + H.vox(1) = 1; + H.vox(2) = 1; + H.datatype() = DataType::UInt16; + H.datatype().set_byte_order_native(); + for (auto& item: PH) + H[item.first] = item.second; + + // H.intensity_offset(); + // H.intensity_scale(); + + // H.transform().allocate (4,4); + // H.transform()(3,0) = H.transform()(3,1) = H.transform()(3,2) = 0.0; + // H.transform()(3,3) = 1.0; + // int count = 0; + // for (int row = 0; row < 3; ++row) + // for (int col = 0; col < 4; ++col) + // H.transform() (row,col) = transform[count++]; // std::set uni; // std::set a = get_matching_indices(images[vUID[0]],std::string(images[vUID[0]][0])); @@ -303,6 +332,8 @@ namespace MR // std::cerr << std::endl; } + RefPtr handler (new Handler::Default (H)); + { INFO("selected slices:"); std::vector image_info = {"image offcentre (ap,fh,rl in mm )"}; image_info.insert(image_info.end(),vUID.begin(),vUID.end()); @@ -312,6 +343,7 @@ namespace MR for (auto& cat: image_info) s += cat +": " + images[cat][slice] + "\t"; INFO(s + " (" + str(slice_data_block_positions[slice].first) + "," +str(slice_data_block_positions[slice].second) +")"); + handler->files.push_back (File::Entry (rec_file, 2*slice_data_block_positions[slice].first)); } } @@ -338,7 +370,7 @@ namespace MR // RS = rescale slope, RI = rescale intercept, SS = scale slope // DV = PV * RS + RI FP = DV / (RS * SS) - File::MMap fmap (rec_file); + // File::MMap fmap (rec_file); // std::cerr << fmap << std::endl; // File::PAR::read (H, * ( (const par_header*) fmap.address())); @@ -348,15 +380,18 @@ namespace MR // lib/image/handler/mosaic.cpp // Mosaic::load - RefPtr handler (new Handler::Default (H)); + // RefPtr handler (new Handler::Default (H)); + + // for (auto& data_block_position : slice_data_block_positions){ + // size_t data_offset = data_block_position.first; + // handler->files.push_back (File::Entry (rec_file, data_offset)); + // } - throw Exception ("par/rec not yet implemented... \"" + H.name() + "\""); - size_t data_offset = 0; + // throw Exception ("par/rec not yet implemented... \"" + H.name() + "\""); // for (size_t n = 0; n < chosen_slices.size(); ++n) // handler->files.push_back (File::Entry (frames[n]->filename, frames[n]->data)) - handler->files.push_back (File::Entry (H.name(), data_offset)); return handler; } From 9c512a64d189094449a3423ffb295d26d3836eae Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Tue, 10 Mar 2015 16:12:31 +0000 Subject: [PATCH 0013/1471] tmp --- lib/image/format/par.cpp | 121 ++++++++++++++++++++++----------------- 1 file changed, 69 insertions(+), 52 deletions(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 88c33e985a..22e03c7d05 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -98,6 +98,8 @@ namespace MR typedef std::tuple ParCol; typedef std::map ParImageInfo; typedef std::map> ParImages; + size_t data_type_size = 2; + // TOOD ParImages: use different types? // TOOD ParImages: use array? @@ -114,7 +116,7 @@ namespace MR ParHeader PH; // General information ParImageInfo image_info; // image column info ParImages images; // columns - std::vector> slice_data_block_positions; + std::vector slice_data_block_positions; while (kv.next_general()){ std::pair res = PH.insert(std::make_pair(kv.key(), kv.value())); @@ -204,7 +206,7 @@ namespace MR vUID.push_back("image_type_mr"); if (vUID.size()>1) - WARN("Multiple volumes in file " + H.name() + ". Uid category indices required."); + WARN("Multiple volumes in file " + H.name() + "All but one discarded."); // parse image information and save it in images std::map uid_tester; @@ -227,19 +229,16 @@ namespace MR std::for_each(vec.begin()+start_col, vec.begin()+stop_col, [&](const std::string &piece){ s += piece; s += " "; }); images[item.first].push_back(s.substr(0, s.size()-1)); } - // slice_data_block_positions { - // image = reshape(data(1+idx.*(x.*y):(idx+1).*x.*y),x,y); - // size_t idx = std::stoi(images["index in REC file (in images)"].back()); std::vector xy = split_image_line(images["recon resolution (x y)"].back()); + // size_t idx = std::stoi(images["index in REC file (in images)"].back()); // size_t start = idx*(xy[0] *xy[1]); // size_t stop = (idx+1)*(xy[0] *xy[1]); - size_t stop = slice_data_block_start + (xy[0] *xy[1]); - slice_data_block_positions.push_back(std::pair(slice_data_block_start,stop)); - slice_data_block_start = stop; + // size_t stop = slice_data_block_start + (xy[0] *xy[1]); + slice_data_block_positions.push_back(slice_data_block_start); + slice_data_block_start += data_type_size * (xy[0] *xy[1]); } - uid.clear(); for (auto& k : vUID) uid += images[k].back() + " "; @@ -251,41 +250,63 @@ namespace MR } uid_indices.push_back(split_image_line(uid)); } + // finished reading image lines kv.close(); INFO("uid categories: " + uid_cat); + // sanity checks if (!std::all_of(images["recon resolution (x y)"].begin()+1,images["recon resolution (x y)"].end(), [&](const std::string & r) {return r==images["recon resolution (x y)"].front();})) throw Exception ("recon resolution (x y) not the same for all slices"); + // TODO truncation_checks: check slice_numbers against "Max. number of slices/locations" + // TODO dynamics with arbitrary start? // TODO separate uid code from image parsing - - // TODO: combine type values + // TODO: combine type values: complex float ? // TODO: user defined volume slicing via \a categories and \a values. here we choose an arbitrary volume std::vector chosen_slices; + std::vector dimension_size; if (vUID.size() == 1) { chosen_slices.reserve(images[vUID[0]].size()); std::iota(chosen_slices.begin(), chosen_slices.end(), 0); + dimension_size.push_back(chosen_slices.size()); } else { - std::vector categories; // which categories in have to be unique (the remaining one can have any value) - std::vector values; // the value corresponding to categories + // TODO: loop over all volumes + + std::vector categories; // which categories have to be unique (the remaining one can have any value) + std::vector chosen_values; // the value corresponding to categories + std::vector> possible_values; - // TODO: change this to user defined values - categories.insert(categories.end(), vUID.begin()+1, vUID.end()); - for (auto cat : categories) - values.push_back(images[cat][0]); + for (auto& cat : vUID){ + possible_values.push_back(std::set (images[cat].begin(),images[cat].end())); + dimension_size.push_back(possible_values.back().size()); + INFO(cat + " dim: " + str(dimension_size.back())); + } + + // loop over iDim + size_t iDim = 0; + for (int i=0; i (images[cat].begin(),images[cat].end())); // TODO remove me + // } + for (auto& val: values) + chosen_values.push_back(*val.begin()); + VAR(categories); - VAR(values); + VAR(chosen_values); H.comments().push_back("categories:" + str(categories)); - H.comments().push_back("values:" + str(values)); + H.comments().push_back("values:" + str(chosen_values)); std::vector> matching_lines_per_categ; for (size_t i=0; i< categories.size(); i++) { - matching_lines_per_categ.push_back(std::set(get_matching_indices(images[categories[i]],values[i]))); // TODO use pointers + matching_lines_per_categ.push_back(std::set(get_matching_indices(images[categories[i]],chosen_values[i]))); // TODO use pointers } std::map intersect_counter; for (auto& lines: matching_lines_per_categ){ @@ -297,30 +318,6 @@ namespace MR if (item.second == matching_lines_per_categ.size()) chosen_slices.push_back(item.first); } - - H.set_ndim (3); - H.dim(0) = split_image_line(images["recon resolution (x y)"].back())[1]; - H.dim(1) = split_image_line(images["recon resolution (x y)"].back())[0]; - H.dim(2) = chosen_slices.size(); - H.vox(0) = 1; - H.vox(1) = 1; - H.vox(2) = 1; - H.datatype() = DataType::UInt16; - H.datatype().set_byte_order_native(); - for (auto& item: PH) - H[item.first] = item.second; - - // H.intensity_offset(); - // H.intensity_scale(); - - // H.transform().allocate (4,4); - // H.transform()(3,0) = H.transform()(3,1) = H.transform()(3,2) = 0.0; - // H.transform()(3,3) = 1.0; - // int count = 0; - // for (int row = 0; row < 3; ++row) - // for (int col = 0; col < 4; ++col) - // H.transform() (row,col) = transform[count++]; - // std::set uni; // std::set a = get_matching_indices(images[vUID[0]],std::string(images[vUID[0]][0])); // std::set b = get_matching_indices(images[vUID[1]],std::string(images[vUID[1]][0])); @@ -332,9 +329,33 @@ namespace MR // std::cerr << std::endl; } + H.set_ndim (3); + H.dim(0) = split_image_line(images["recon resolution (x y)"].back())[1]; + H.dim(1) = split_image_line(images["recon resolution (x y)"].back())[0]; + H.dim(2) = chosen_slices.size(); + H.vox(0) = 1; + H.vox(1) = 1; + H.vox(2) = 1; + H.datatype() = DataType::UInt16; + H.datatype().set_byte_order_native(); + for (auto& item: PH) + H[item.first] = item.second; + + // H.intensity_offset(); + // H.intensity_scale(); + + // H.transform().allocate (4,4); + // H.transform()(3,0) = H.transform()(3,1) = H.transform()(3,2) = 0.0; + // H.transform()(3,3) = 1.0; + // int count = 0; + // for (int row = 0; row < 3; ++row) + // for (int col = 0; col < 4; ++col) + // H.transform() (row,col) = transform[count++]; + RefPtr handler (new Handler::Default (H)); - { INFO("selected slices:"); + { + INFO("selected slices:"); std::vector image_info = {"image offcentre (ap,fh,rl in mm )"}; image_info.insert(image_info.end(),vUID.begin(),vUID.end()); // ,"slice number","echo number","dynamic scan number","image_type_mr"}; @@ -342,15 +363,11 @@ namespace MR std::string s; for (auto& cat: image_info) s += cat +": " + images[cat][slice] + "\t"; - INFO(s + " (" + str(slice_data_block_positions[slice].first) + "," +str(slice_data_block_positions[slice].second) +")"); - handler->files.push_back (File::Entry (rec_file, 2*slice_data_block_positions[slice].first)); + INFO(s + " (" + str(slice_data_block_positions[slice]) + ")"); + handler->files.push_back (File::Entry (rec_file, slice_data_block_positions[slice])); } } - // TODO truncation_checks: check slice_numbers against "Max. number of slices/locations" - // TODO dynamics with arbitrary start? - - // convert strVec to float: // std::vector flVect(strVect.size()); // std::transform(strVect.begin(), strVect.end(), flVect.begin(), @@ -392,7 +409,7 @@ namespace MR // handler->files.push_back (File::Entry (frames[n]->filename, frames[n]->data)) - + WARN("PAR/REC voxel size, scaling, intercept and image transformation not yet implemented."); return handler; } From 332dc745287a3c17b371fe78de17320786633b96 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Tue, 10 Mar 2015 16:17:59 +0000 Subject: [PATCH 0014/1471] tmp --- lib/image/format/par.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 22e03c7d05..e44c21fc84 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -287,16 +287,22 @@ namespace MR } // loop over iDim + // for iDi size_t iDim = 0; - for (int i=0; i (images[cat].begin(),images[cat].end())); // TODO remove me // } - for (auto& val: values) - chosen_values.push_back(*val.begin()); + // for (auto& val: values) + // chosen_values.push_back(*val.begin()); VAR(categories); VAR(chosen_values); From 22d755c038ee2e0ae12eb608ba49273fd4e64091 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Tue, 10 Mar 2015 20:25:06 +0000 Subject: [PATCH 0015/1471] dense n-D volume --- lib/image/format/par.cpp | 215 +++++++++++++++++++++++++++++---------- 1 file changed, 159 insertions(+), 56 deletions(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index e44c21fc84..62432e54bc 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -89,6 +89,66 @@ namespace MR std::istringstream is(line); return std::vector(std::istream_iterator(is), std::istream_iterator()); } + struct SortStringByLengthFirst + { + bool operator () (const std::string & p_lhs, const std::string & p_rhs) + { + const size_t lhsLength = p_lhs.length() ; + const size_t rhsLength = p_rhs.length() ; + + if(lhsLength == rhsLength) + { + return (p_lhs < p_rhs) ; // when two strings have the same + // length, defaults to the normal + // string comparison + } + + return (lhsLength < rhsLength) ; // compares with the length + } + } ; + + // loop over /a nDim range loops with size /a size + class NestedLoop { + size_t nDim; + size_t p; + bool first_iteration = true; + std::vector size; + std::vector max; + std::vector indx; + public: + std::vector get_indices() { + return std::vector (indx.begin(),indx.end()-1); + } + size_t operator() () { + if (first_iteration){ + first_iteration = false; + return true; + } + if(indx[nDim]!=0) return false; + // increment index + indx[0]++; + while(indx[p]==max[p]) { + indx[p]=0; + indx[++p]++; //increase p by 1, and increase the next (p+1)th index + if (nDim == 1 && p == 1) { return false; } + if(indx[p]!=max[p]) { + p=0; // break + } + if (p == nDim-1) { + return false; + } + } + return true; + } + void init (const size_t& x, const std::vector& s) { + nDim = x; + size = s; + size.push_back(0); + p = 0; //Used to increment all of the indicies correctly, at the end of each loop. + indx.assign(nDim+1,0); + max = size; + } + }; typedef double ComputeType; @@ -206,7 +266,7 @@ namespace MR vUID.push_back("image_type_mr"); if (vUID.size()>1) - WARN("Multiple volumes in file " + H.name() + "All but one discarded."); + INFO("Multiple volumes in file " + H.name() ); // parse image information and save it in images std::map uid_tester; @@ -272,76 +332,118 @@ namespace MR chosen_slices.reserve(images[vUID[0]].size()); std::iota(chosen_slices.begin(), chosen_slices.end(), 0); dimension_size.push_back(chosen_slices.size()); + + H.set_ndim (3); + H.dim(0) = split_image_line(images["recon resolution (x y)"].back())[0]; + H.dim(1) = split_image_line(images["recon resolution (x y)"].back())[1]; + H.dim(2) = chosen_slices.size(); + // untested } else { - // TODO: loop over all volumes - - std::vector categories; // which categories have to be unique (the remaining one can have any value) - std::vector chosen_values; // the value corresponding to categories - std::vector> possible_values; + // std::vector categories; // which categories have to be unique (the remaining one can have any value) + // std::vector chosen_values; // the value corresponding to categories + // std::vector> possible_values; + std::vector> possible_values; + // std::vector possible_value_index(vUID.size(),0); for (auto& cat : vUID){ - possible_values.push_back(std::set (images[cat].begin(),images[cat].end())); - dimension_size.push_back(possible_values.back().size()); + // auto comp = [](const std::string& a, const std::string& b) -> bool { + // if (a.length() == b.length()) + // return (a < b); + // else + // return a.length() < b.length(); }; + // auto tmp_set = std::set (images[cat].begin(),images[cat].end()); + auto tmp_set = std::set (images[cat].begin(),images[cat].end()); + possible_values.push_back(std::vector(tmp_set.begin(),tmp_set.end())); + dimension_size.push_back(tmp_set.size()); INFO(cat + " dim: " + str(dimension_size.back())); } - - // loop over iDim - // for iDi - size_t iDim = 0; - categories.clear(); - chosen_values.clear(); - - for (size_t i=0; i (images[cat].begin(),images[cat].end())); // TODO remove me - // } - // for (auto& val: values) - // chosen_values.push_back(*val.begin()); - - VAR(categories); - VAR(chosen_values); - H.comments().push_back("categories:" + str(categories)); - H.comments().push_back("values:" + str(chosen_values)); - - std::vector> matching_lines_per_categ; - for (size_t i=0; i< categories.size(); i++) { - matching_lines_per_categ.push_back(std::set(get_matching_indices(images[categories[i]],chosen_values[i]))); // TODO use pointers + H.set_ndim (2+dimension_size.size()); + H.dim(0) = split_image_line(images["recon resolution (x y)"].back())[0]; + H.dim(1) = split_image_line(images["recon resolution (x y)"].back())[1]; + for (size_t iDim = 0; iDim intersect_counter; - for (auto& lines: matching_lines_per_categ){ - for (auto& line :lines) - intersect_counter[line] += 1; + std::map line_lookup; + { + for (size_t i=0; i(nested_loop.get_indices())) + t += possible_values[idx][nested_loop.get_indices()[idx]] + "," ; + if (line_lookup.find(t) == line_lookup.end()){ + // throw Exception("we assumed that the n-D volume is dense"); + WARN("we assumed that the n-D volume is dense. hack: we use slice 0 instead"); //fixme TODO + chosen_slices.push_back(0); + } else { + chosen_slices.push_back(line_lookup[t]); + } } - // std::set uni; - // std::set a = get_matching_indices(images[vUID[0]],std::string(images[vUID[0]][0])); - // std::set b = get_matching_indices(images[vUID[1]],std::string(images[vUID[1]][0])); - // std::set_intersection (a.begin(), a.end(), - // b.begin(), b.end(), - // std::inserter(uni, uni.begin())); - // std::cerr << vUID[2] + "=" + images[vUID[2]][0] + ", " + vUID[1] + "=" + images[vUID[1]][0] + " has indices "; - // std::copy(uni.begin(), uni.end(), std::ostream_iterator(std::cerr, " ")); - // std::cerr << std::endl; + + // // loop over iDim + // for (size_t iDim = 0; iDim> matching_lines_per_categ; + // for (size_t i=0; i< categories.size(); i++) { + // matching_lines_per_categ.push_back(std::set(get_matching_indices(images[categories[i]],chosen_values[i]))); // TODO use pointers + // } + // std::map intersect_counter; + // for (auto& lines: matching_lines_per_categ){ + // for (auto& line :lines) + // intersect_counter[line] += 1; + // } + + // for (auto& item: intersect_counter) { + // if (item.second == matching_lines_per_categ.size()) + // chosen_slices.push_back(item.first); + // } + // } + + + // std::set uni; + // std::set a = get_matching_indices(images[vUID[0]],std::string(images[vUID[0]][0])); + // std::set b = get_matching_indices(images[vUID[1]],std::string(images[vUID[1]][0])); + // std::set_intersection (a.begin(), a.end(), + // b.begin(), b.end(), + // std::inserter(uni, uni.begin())); + // std::cerr << vUID[2] + "=" + images[vUID[2]][0] + ", " + vUID[1] + "=" + images[vUID[1]][0] + " has indices "; + // std::copy(uni.begin(), uni.end(), std::ostream_iterator(std::cerr, " ")); + // std::cerr << std::endl; + + // H.dim(iDim+2) = dimension_size[iDim]; + + // } } - H.set_ndim (3); - H.dim(0) = split_image_line(images["recon resolution (x y)"].back())[1]; - H.dim(1) = split_image_line(images["recon resolution (x y)"].back())[0]; - H.dim(2) = chosen_slices.size(); H.vox(0) = 1; H.vox(1) = 1; - H.vox(2) = 1; H.datatype() = DataType::UInt16; H.datatype().set_byte_order_native(); for (auto& item: PH) @@ -367,6 +469,7 @@ namespace MR // ,"slice number","echo number","dynamic scan number","image_type_mr"}; for (auto& slice : chosen_slices){ std::string s; + // s += images for (auto& cat: image_info) s += cat +": " + images[cat][slice] + "\t"; INFO(s + " (" + str(slice_data_block_positions[slice]) + ")"); From ef369880febd89db0359041b051ed9cdfa3110c7 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Wed, 11 Mar 2015 17:49:00 +0000 Subject: [PATCH 0016/1471] eigen added to cflags --- configure | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/configure b/configure index c6cecd5b5c..276fad214d 100755 --- a/configure +++ b/configure @@ -10,6 +10,7 @@ noshared = False static = False verbose = False R_module = False +eigen = True profile_name = None sh_basis_def = None @@ -1018,6 +1019,14 @@ WARNING: no OpenGL implementation found. if '-Wall' in qt_cflags: qt_cflags.remove ('-Wall') if '-W' in qt_cflags: qt_cflags.remove ('-W') +if eigen: + report('checking for Eigen 3 library: ') + try: + eigen_cflags = shlex.split (execute ([ 'pkg-config', '--cflags', 'eigen3' ], RuntimeError)[1]) + cpp_flags += eigen_cflags + report(eigen_cflags[0]) + except: + log('error running on pkg-config --cflags eigen3\n\n') # output R module: From 8c7aef8081bc1780d70f2f96c4aa8ead3b55efce Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Wed, 11 Mar 2015 17:51:19 +0000 Subject: [PATCH 0017/1471] - --- lib/image/format/par.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 62432e54bc..89ffb86142 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -263,7 +263,7 @@ namespace MR // 4.2 if (kv.version()=="V4.2" && (std::stoi(PH["Number of label types <0=no ASL>"]) > 1)) vUID.push_back("label type (ASL) (imagekey!)"); - vUID.push_back("image_type_mr"); + vUID.push_back("image_type_mr"); // TODO: process and remove last dimension if (vUID.size()>1) INFO("Multiple volumes in file " + H.name() ); From 02745cf061c66a180b1489382b4d43c1e04a59de Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Wed, 11 Mar 2015 19:19:45 +0000 Subject: [PATCH 0018/1471] unnecessary header removed --- lib/image/format/par.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 89ffb86142..60c3564111 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -33,7 +33,7 @@ #include "get_set.h" #include "file/par_utils.h" -#include "file/par.h" // not used yet +// #include "file/par.h" // not used yet #include "image/format/mrtrix_utils.h" #include From b9c2e5ca3c9720f4f549a8989a80e0292b513bf4 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Wed, 11 Mar 2015 19:36:38 +0000 Subject: [PATCH 0019/1471] file/par.h removed --- lib/file/par_utils.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/file/par_utils.h b/lib/file/par_utils.h index 3dc12a58ae..192842ee26 100644 --- a/lib/file/par_utils.h +++ b/lib/file/par_utils.h @@ -23,7 +23,7 @@ #ifndef __file_par_utils_h__ #define __file_par_utils_h__ -#include "file/par.h" +// #include "file/par.h" #include "math/matrix.h" #include From 374ff3f93456068c1137161b0b7a28bbff16ba13 Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Wed, 11 Mar 2015 21:13:44 +0000 Subject: [PATCH 0020/1471] fix missing header for std::iota --- lib/image/format/par.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/image/format/par.cpp b/lib/image/format/par.cpp index 60c3564111..cbe1e3c80e 100644 --- a/lib/image/format/par.cpp +++ b/lib/image/format/par.cpp @@ -20,7 +20,7 @@ */ - +#include #include "file/config.h" #include "file/ofstream.h" #include "file/path.h" From 7609198adb3d711df5c3d3fc2fdfc6d9594068dc Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Tue, 21 Jul 2015 11:58:09 +0100 Subject: [PATCH 0021/1471] build: output per-command execution times --- build | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/build b/build index b2d2d73c79..8784563380 100755 --- a/build +++ b/build @@ -1,6 +1,7 @@ #!/usr/bin/env python import platform, sys, os, time, threading, subprocess, copy, codecs, glob, atexit, tempfile +from timeit import default_timer as timer config_file = 'config.default' @@ -25,6 +26,7 @@ bcolors = { "failed" : '\033[91m', "note" : '\033[94m'} + def colorize(s): out = [] for l in s.splitlines(): @@ -49,6 +51,7 @@ lock = threading.Lock() print_lock = threading.Lock() stop = False error_stream = None +current_line = 0 logfile = open ('build.log', 'wb') @@ -81,11 +84,23 @@ if sys.stderr.isatty(): -def disp (msg): +def disp (msg, line = -1): + global current_line print_lock.acquire() + if line < 0: + this_line = current_line + else: + this_line = line logfile.write (msg.encode (errors='ignore')) + if line >= 0: + sys.stdout.write ('\033[s\r\033['+str(current_line - line)+'A') sys.stdout.write (msg) + if line < 0: + current_line += len (msg.splitlines()) + else: + sys.stdout.write ('\033[u') print_lock.release() + return this_line def log (msg): print_lock.acquire() @@ -494,12 +509,15 @@ def fillin (template, keyvalue): def execute (message, cmd): - disp (message + os.linesep) + line = disp (message + os.linesep) log (' '.join(cmd) + os.linesep) try: + start = timer() process = subprocess.Popen (cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=environ) ( stdout, stderr ) = process.communicate() + end = timer() + disp (message + ' [' + ("{:.3f}".format(end-start)) + 's]' + os.linesep, line) if process.returncode != 0: if error_stream is not None: disp ('ERROR: ' + message + os.linesep) From f8ba2b247ce813426f0e32e64d5645c4b362b16c Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Tue, 25 Oct 2016 14:32:58 +0100 Subject: [PATCH 0022/1471] dwidenoise: cosmetic changes. --- cmd/dwidenoise.cpp | 12 ++++++------ testing/tests/dwidenoise | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index e4a6adeba3..a278356902 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -118,13 +118,13 @@ class DenoisingFunctor // Compute Eigendecomposition: Eigen::MatrixXf XtX (r,r); if (m <= n) - XtX.template triangularView() = X * X.transpose(); + XtX.template triangularView() = X * X.adjoint(); else - XtX.template triangularView() = X.transpose() * X; + XtX.template triangularView() = X.adjoint() * X; Eigen::SelfAdjointEigenSolver eig (XtX); - // eigenvalues provide squared singular values: + // eigenvalues provide squared singular values, sorted in increasing order: Eigen::VectorXf s = eig.eigenvalues(); - + // Marchenko-Pastur optimal threshold const double lam_r = s[0] / n; double clam = 0.0; @@ -135,8 +135,8 @@ class DenoisingFunctor double lam = s[p] / n; clam += lam; double gam = double(m-r+p+1) / double(n); - double sigsq1 = clam / (p+1) / std::max (gam, 1.0); - double sigsq2 = (lam - lam_r) / 4 / std::sqrt(gam); + double sigsq1 = clam / ((p+1) * std::max (gam, 1.0)); + double sigsq2 = (lam - lam_r) / (4.0 * std::sqrt(gam)); // sigsq2 > sigsq1 if signal else noise if (sigsq2 < sigsq1) { sigma2 = sigsq1; diff --git a/testing/tests/dwidenoise b/testing/tests/dwidenoise index f934ebc84a..62947dc227 100644 --- a/testing/tests/dwidenoise +++ b/testing/tests/dwidenoise @@ -1,6 +1,6 @@ -dwidenoise dwi.mif - | testing_diff_data - dwidenoise/dwi.mif -voxel 1e-4 -dwidenoise dwi.mif -mask mask.mif - | testing_diff_data - dwidenoise/masked.mif -voxel 1e-4 -dwidenoise dwi.mif -extent 3 - | testing_diff_data - dwidenoise/extent3.mif -voxel 1e-4 -dwidenoise dwi.mif -extent 5,3,1 - | testing_diff_data - dwidenoise/extent531.mif -voxel 1e-4 -dwidenoise dwi.mif -noise tmp-noise.mif - | testing_diff_data - dwidenoise/dwi.mif -voxel 1e-4 && testing_diff_data tmp-noise.mif dwidenoise/noise.mif -frac 1e-4 -dwidenoise dwi.mif -extent 3 -noise tmp-noise3.mif - | testing_diff_data - dwidenoise/extent3.mif -voxel 1e-4 && testing_diff_data tmp-noise3.mif dwidenoise/noise3.mif -frac 1e-4 +dwidenoise dwi.mif - | testing_diff_data - dwidenoise/dwi.mif -voxel 1e-3 +dwidenoise dwi.mif -mask mask.mif - | testing_diff_data - dwidenoise/masked.mif -voxel 1e-3 +dwidenoise dwi.mif -extent 3 - | testing_diff_data - dwidenoise/extent3.mif -voxel 1e-3 +dwidenoise dwi.mif -extent 5,3,1 - | testing_diff_data - dwidenoise/extent531.mif -voxel 1e-3 +dwidenoise dwi.mif -noise tmp-noise.mif - | testing_diff_data - dwidenoise/dwi.mif -voxel 1e-3 && testing_diff_data tmp-noise.mif dwidenoise/noise.mif -frac 1e-2 +dwidenoise dwi.mif -extent 3 -noise tmp-noise3.mif - | testing_diff_data - dwidenoise/extent3.mif -voxel 1e-3 && testing_diff_data tmp-noise3.mif dwidenoise/noise3.mif -frac 1e-2 From d351550c1266e53f12959e60dc51a45054bf9fdf Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Tue, 25 Oct 2016 17:26:44 +0100 Subject: [PATCH 0023/1471] dwidenoise: switch to double precision and check for negative eigenvalues. --- cmd/dwidenoise.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index a278356902..573ac816c4 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -98,7 +98,7 @@ class DenoisingFunctor m (dwi.size(3)), n (extent[0]*extent[1]*extent[2]), r ((m() = X * X.adjoint(); else XtX.template triangularView() = X.adjoint() * X; - Eigen::SelfAdjointEigenSolver eig (XtX); + Eigen::SelfAdjointEigenSolver eig (XtX); // eigenvalues provide squared singular values, sorted in increasing order: - Eigen::VectorXf s = eig.eigenvalues(); + Eigen::VectorXd s = eig.eigenvalues(); // Marchenko-Pastur optimal threshold - const double lam_r = s[0] / n; + const double lam_r = std::max(s[0], 0.0) / n; double clam = 0.0; sigma2 = NaN; ssize_t cutoff_p = 0; for (ssize_t p = 0; p < r; ++p) { - double lam = s[p] / n; + double lam = std::max(s[p], 0.0) / n; clam += lam; double gam = double(m-r+p+1) / double(n); double sigsq1 = clam / ((p+1) * std::max (gam, 1.0)); @@ -141,7 +141,7 @@ class DenoisingFunctor if (sigsq2 < sigsq1) { sigma2 = sigsq1; cutoff_p = p+1; - } + } } if (cutoff_p > 0) { @@ -176,7 +176,7 @@ class DenoisingFunctor for (dwi.index(1) = pos[1]-extent[1]; dwi.index(1) <= pos[1]+extent[1]; ++dwi.index(1)) for (dwi.index(0) = pos[0]-extent[0]; dwi.index(0) <= pos[0]+extent[0]; ++dwi.index(0), ++k) if (! is_out_of_bounds(dwi)) - X.col(k) = dwi.row(3).template cast(); + X.col(k) = dwi.row(3).template cast(); // reset image position dwi.index(0) = pos[0]; dwi.index(1) = pos[1]; @@ -186,7 +186,7 @@ class DenoisingFunctor private: const std::array extent; const ssize_t m, n, r; - Eigen::MatrixXf X; + Eigen::MatrixXd X; std::array pos; double sigma2; Image mask; From 0016f7b089a8f136d9764e395fd3267c8c3753f8 Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Tue, 13 Dec 2016 18:53:24 +0000 Subject: [PATCH 0024/1471] dwidenoise: template datatype for EVD. --- cmd/dwidenoise.cpp | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index 76c879ab35..5786749fde 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -89,10 +89,14 @@ void usage () typedef float value_type; -template +template class DenoisingFunctor { public: + + typedef Eigen::Matrix MatrixX; + typedef Eigen::Matrix VectorX; + DenoisingFunctor (ImageType& dwi, std::vector extent, Image& mask, ImageType& noise) : extent {{extent[0]/2, extent[1]/2, extent[2]/2}}, m (dwi.size(3)), @@ -116,23 +120,23 @@ class DenoisingFunctor load_data (dwi); // Compute Eigendecomposition: - Eigen::MatrixXd XtX (r,r); + MatrixX XtX (r,r); if (m <= n) XtX.template triangularView() = X * X.adjoint(); else XtX.template triangularView() = X.adjoint() * X; - Eigen::SelfAdjointEigenSolver eig (XtX); + Eigen::SelfAdjointEigenSolver eig (XtX); // eigenvalues provide squared singular values, sorted in increasing order: - Eigen::VectorXd s = eig.eigenvalues(); + VectorX s = eig.eigenvalues(); // Marchenko-Pastur optimal threshold - const double lam_r = std::max(s[0], 0.0) / n; + const double lam_r = std::max(double(s[0]), 0.0) / n; double clam = 0.0; sigma2 = NaN; ssize_t cutoff_p = 0; for (ssize_t p = 0; p < r; ++p) { - double lam = std::max(s[p], 0.0) / n; + double lam = std::max(double(s[p]), 0.0) / n; clam += lam; double gam = double(m-r+p+1) / double(n); double sigsq1 = clam / ((p+1) * std::max (gam, 1.0)); @@ -157,7 +161,7 @@ class DenoisingFunctor // Store output assign_pos_of(dwi).to(out); for (auto l = Loop (3) (out); l; ++l) - out.value() = X(out.index(3), n/2); + out.value() = value_type (X(out.index(3), n/2)); // store noise map if requested: if (noise.valid()) { @@ -176,7 +180,7 @@ class DenoisingFunctor for (dwi.index(1) = pos[1]-extent[1]; dwi.index(1) <= pos[1]+extent[1]; ++dwi.index(1)) for (dwi.index(0) = pos[0]-extent[0]; dwi.index(0) <= pos[0]+extent[0]; ++dwi.index(0), ++k) if (! is_out_of_bounds(dwi)) - X.col(k) = dwi.row(3).template cast(); + X.col(k) = dwi.row(3).template cast(); // reset image position dwi.index(0) = pos[0]; dwi.index(1) = pos[1]; @@ -186,7 +190,7 @@ class DenoisingFunctor private: const std::array extent; const ssize_t m, n, r; - Eigen::MatrixXd X; + MatrixX X; std::array pos; double sigma2; Image mask; From 5235cece7d0f6978d57acae927990fdfa0598d58 Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Thu, 15 Dec 2016 12:02:49 +0000 Subject: [PATCH 0025/1471] dwidenoise: user option for SVD datatype. --- cmd/dwidenoise.cpp | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index 5786749fde..80f3fe17c4 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -24,6 +24,7 @@ using namespace MR; using namespace App; +const char* const dtypes[] = { "float32", "float64", NULL }; void usage () { @@ -64,7 +65,11 @@ void usage () + Argument ("window").type_sequence_int () + Option ("noise", "the output noise map.") - + Argument ("level").type_image_out(); + + Argument ("level").type_image_out() + + + Option ("datatype", "datatype for SVD (float32 or float64).") + + Argument ("spec").type_choice(dtypes); + COPYRIGHT = "Copyright (c) 2016 New York University, University of Antwerp, and the MRtrix3 contributors \n \n" "Permission is hereby granted, free of charge, to any non-commercial entity ('Recipient') obtaining a copy of this software and " @@ -235,9 +240,21 @@ void run () noise = Image::create (opt[0][0], header); } - DenoisingFunctor< Image > func (dwi_in, extent, mask, noise); - ThreadedLoop ("running MP-PCA denoising", dwi_in, 0, 3) - .run (func, dwi_in, dwi_out); + opt = get_options("datatype"); + if (opt.size() && (int(opt[0][0]) == 0)) { + DenoisingFunctor< Image , float > func (dwi_in, extent, mask, noise); + ThreadedLoop ("running MP-PCA denoising", dwi_in, 0, 3) + .run (func, dwi_in, dwi_out); + } + else if (int(opt[0][0]) == 1) { + DenoisingFunctor< Image , double > func (dwi_in, extent, mask, noise); + ThreadedLoop ("running MP-PCA denoising", dwi_in, 0, 3) + .run (func, dwi_in, dwi_out); + } + else { + assert(0); + } + } From f6b56d67b75c1af31858eb168dd18a57c153efa9 Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Thu, 15 Dec 2016 15:07:14 +0000 Subject: [PATCH 0026/1471] dwidenoise: data centring for numerical stability. --- cmd/dwidenoise.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index 80f3fe17c4..5748fa0661 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -108,6 +108,7 @@ class DenoisingFunctor n (extent[0]*extent[1]*extent[2]), r ((m sigsq1 if signal else noise if (sigsq2 < sigsq1) { @@ -166,7 +167,7 @@ class DenoisingFunctor // Store output assign_pos_of(dwi).to(out); for (auto l = Loop (3) (out); l; ++l) - out.value() = value_type (X(out.index(3), n/2)); + out.value() = value_type (X(out.index(3), n/2) + Xm(out.index(3))); // store noise map if requested: if (noise.valid()) { @@ -186,6 +187,9 @@ class DenoisingFunctor for (dwi.index(0) = pos[0]-extent[0]; dwi.index(0) <= pos[0]+extent[0]; ++dwi.index(0), ++k) if (! is_out_of_bounds(dwi)) X.col(k) = dwi.row(3).template cast(); + // data centring + Xm = X.rowwise().mean(); + X.colwise() -= Xm; // reset image position dwi.index(0) = pos[0]; dwi.index(1) = pos[1]; @@ -196,6 +200,7 @@ class DenoisingFunctor const std::array extent; const ssize_t m, n, r; MatrixX X; + VectorX Xm; std::array pos; double sigma2; Image mask; From ccffde75614bf12ee793b3de8f456bef81b25be9 Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Thu, 15 Dec 2016 16:46:31 +0000 Subject: [PATCH 0027/1471] dwidenoise: fix default datatype. --- cmd/dwidenoise.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index 5748fa0661..bfdd565e36 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -246,12 +246,14 @@ void run () } opt = get_options("datatype"); - if (opt.size() && (int(opt[0][0]) == 0)) { + if (!opt.size() || (int(opt[0][0]) == 0)) { + DEBUG("Computing SVD with single precision."); DenoisingFunctor< Image , float > func (dwi_in, extent, mask, noise); ThreadedLoop ("running MP-PCA denoising", dwi_in, 0, 3) .run (func, dwi_in, dwi_out); } else if (int(opt[0][0]) == 1) { + DEBUG("Computing SVD with double precision."); DenoisingFunctor< Image , double > func (dwi_in, extent, mask, noise); ThreadedLoop ("running MP-PCA denoising", dwi_in, 0, 3) .run (func, dwi_in, dwi_out); From 6ee275c374290bc61887da3c7aa7ff05bcb08219 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 3 Apr 2017 14:35:01 +1000 Subject: [PATCH 0028/1471] Stats: First steps toward element-wise design matrices Manually ported from fixel_twi_0.3.15 branch. --- cmd/connectomestats.cpp | 2 +- cmd/fixelcfestats.cpp | 2 +- cmd/mrclusterstats.cpp | 2 +- cmd/vectorstats.cpp | 2 +- core/math/stats/glm.cpp | 151 +++++++++++++++++++++++++++----- core/math/stats/glm.h | 174 ++++++++++++++++++++++++++++++++++--- core/math/stats/import.cpp | 43 +++++++++ core/math/stats/import.h | 138 +++++++++++++++++++++++++++++ 8 files changed, 476 insertions(+), 38 deletions(-) create mode 100644 core/math/stats/import.cpp create mode 100644 core/math/stats/import.h diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 0ff774015a..ed1293c301 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -262,7 +262,7 @@ void run() save_vector (stdevs.col(0), output_prefix + "_std_dev.csv"); } - Math::Stats::GLMTTest glm_ttest (data, design, contrast); + Math::Stats::GLMTTestFixed glm_ttest (data, design, contrast); // If performing non-stationarity adjustment we need to pre-compute the empirical statistic vector_type empirical_statistic; diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 63a0fd29b2..1960fbdbe7 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -370,7 +370,7 @@ void run() { write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), temp.row(0), output_header); } - Math::Stats::GLMTTest glm_ttest (data, design, contrast); + Math::Stats::GLMTTestFixed glm_ttest (data, design, contrast); std::shared_ptr cfe_integrator; cfe_integrator.reset (new Stats::CFE::Enhancer (connectivity_matrix, cfe_dh, cfe_e, cfe_h)); vector_type empirical_cfe_statistic; diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 0ef0cc5872..fa404a7c25 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -210,7 +210,7 @@ void run() { if (compute_negative_contrast) default_cluster_output_neg.reset (new vector_type (num_vox)); - Math::Stats::GLMTTest glm (data, design, contrast); + Math::Stats::GLMTTestFixed glm (data, design, contrast); std::shared_ptr enhancer; if (use_tfce) { diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 102d7469f6..07dc7c9b3b 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -158,7 +158,7 @@ void run() save_vector (stdevs.col(0), output_prefix + "_std_dev.csv"); } - Math::Stats::GLMTTest glm_ttest (data, design, contrast); + Math::Stats::GLMTTestFixed glm_ttest (data, design, contrast); // Precompute default statistic // Don't use convenience function: No enhancer! diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 389c277894..21fccc93fa 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -32,7 +32,7 @@ namespace MR { assert (contrasts.cols() == design.cols()); const matrix_type XtX = design.transpose() * design; - const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); + const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); matrix_type scaled_contrasts (contrasts); for (size_t n = 0; n < size_t(contrasts.rows()); ++n) { @@ -44,23 +44,46 @@ namespace MR + void ttest_prescaled (matrix_type& tvalues, + const matrix_type& design, + const matrix_type& pinv_design, + const matrix_type& measurements, + const matrix_type& scaled_contrasts, + matrix_type& betas, + matrix_type& residuals) + { + betas.noalias() = measurements * pinv_design; + residuals.noalias() = measurements - betas * design; + tvalues.noalias() = betas * scaled_contrasts; + for (size_t n = 0; n < size_t(tvalues.rows()); ++n) + tvalues.row(n).array() /= residuals.row(n).norm(); + } + + + void ttest (matrix_type& tvalues, const matrix_type& design, - const matrix_type& pinv_design, const matrix_type& measurements, - const matrix_type& scaled_contrasts, + const matrix_type& contrasts, matrix_type& betas, matrix_type& residuals) { + const matrix_type pinv_design = Math::pinv (design); betas.noalias() = measurements * pinv_design; residuals.noalias() = measurements - betas * design; - tvalues.noalias() = betas * scaled_contrasts; - for (size_t n = 0; n < size_t(tvalues.rows()); ++n) - tvalues.row(n).array() /= residuals.row(n).norm(); + const matrix_type XtX = design.transpose() * design; + const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); + const size_t degrees_of_freedom = design.rows() - rank(design); + tvalues.noalias() = betas * contrasts; + for (size_t n = 0; n != size_t(tvalues.rows()); ++n) { + const default_type variance = residuals.row(n).squaredNorm() / degrees_of_freedom; + tvalues.row(n).array() /= sqrt(variance * contrasts.row(n).dot (pinv_XtX * contrasts.row(n).transpose())); + } } + matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design) { return design.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(measurements.transpose()); @@ -68,9 +91,9 @@ namespace MR - matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast) + matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) { - return contrast * solve_betas (measurements, design); + return contrasts * solve_betas (measurements, design); } @@ -84,9 +107,9 @@ namespace MR } - matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast) + matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) { - return abs_effect_size (measurements, design, contrast).array() / stdev (measurements, design).array(); + return abs_effect_size (measurements, design, contrasts).array() / stdev (measurements, design).array(); } } @@ -97,21 +120,24 @@ namespace MR - GLMTTest::GLMTTest (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast) : - y (measurements), - X (design), - scaled_contrasts (GLM::scale_contrasts (contrast, X, X.rows()-rank(X)).transpose()) - { - pinvX = Math::pinv (X); - } + GLMTTestFixed::GLMTTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast) : + GLMTestBase (measurements, design, contrast), + pinvX (Math::pinv (X)), + scaled_contrasts (GLM::scale_contrasts (contrast, X, X.rows()-rank(X)).transpose()) { } - void GLMTTest::operator() (const vector& perm_labelling, vector_type& stats) const + void GLMTTestFixed::operator() (const vector& perm_labelling, vector_type& output) const { - stats = vector_type::Zero (y.rows()); + output = vector_type::Zero (y.rows()); matrix_type tvalues, betas, residuals, SX, pinvSX; + // TODO Currently the entire design matrix is permuted; + // we may instead prefer Freedman-Lane + // This however would be different for each row in the contrasts matrix, + // since the columns that correspond to nuisance variables + // varies between rows + SX.resize (X.rows(), X.cols()); pinvSX.resize (pinvX.rows(), pinvX.cols()); for (ssize_t i = 0; i < X.rows(); ++i) { @@ -119,22 +145,103 @@ namespace MR pinvSX.col(i) = pinvX.col (perm_labelling[i]); } - pinvSX.transposeInPlace(); SX.transposeInPlace(); + pinvSX.transposeInPlace(); for (ssize_t i = 0; i < y.rows(); i += GLM_BATCH_SIZE) { const matrix_type tmp = y.block (i, 0, std::min (GLM_BATCH_SIZE, (int)(y.rows()-i)), y.cols()); - GLM::ttest (tvalues, SX, pinvSX, tmp, scaled_contrasts, betas, residuals); + GLM::ttest_prescaled (tvalues, SX, pinvSX, tmp, scaled_contrasts, betas, residuals); for (ssize_t n = 0; n < tvalues.rows(); ++n) { value_type val = tvalues(n,0); if (!std::isfinite (val)) val = value_type(0); - stats[i+n] = val; + output[i+n] = val; } } } + GLMTTestVariable::GLMTTestVariable (vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : + GLMTestBase (measurements, design, contrasts), + importers (importers) + { + // Make sure that the specified contrasts reflect the full design matrix (with additional + // data loaded) + assert (contrasts.cols() == X.cols() + importers.size()); + } + + + + void GLMTTestVariable::operator() (const vector& perm_labelling, vector_type& output) const + { + output = vector_type::Zero (y.rows()); + matrix_type tvalues, betas, residuals; + + // Set the size of the permuted design matrix to include the additional columns + // that will be imported from external files + matrix_type SX (X.rows(), X.cols() + importers.size()); + + // Pre-permute the fixed contents of the design matrix + for (ssize_t row = 0; row != X.rows(); ++row) + SX.block(row, 0, 1, X.cols()) = X.row (perm_labelling[row]); + + // Loop over all elements in the input image + for (ssize_t element = 0; element != y.rows(); ++element) { + + // For each element (row in y), need to load the additional data for that element + // for all subjects in order to construct the design matrix + // Would it be preferable to pre-calculate and store these per-element design matrices, + // rather than re-generating them each time? (More RAM, less CPU) + // No, most of the time that subject data will be memory-mapped, so pre-loading (in + // addition to the duplication of the fixed design matrix contents) would hurt bad + matrix_type extra_data (X.rows(), importers.size()); + for (ssize_t col = 0; col != ssize_t(importers.size()); ++col) + extra_data.col(col) = importers[col] (element); + + // Make sure the data from the additional columns is appropriately permuted + // (i.e. in the same way as what the fixed portion of the design matrix experienced) + for (ssize_t row = 0; row != X.rows(); ++row) + SX.block(row, X.cols(), 1, importers.size()) = extra_data.row(perm_labelling[row]); + + // Need to pre-scale contrasts if we want to use the ttest() function; + // otherwise, need to define a different function that doesn't rely on pre-scaling + // Went for the latter option; this call doesn't need pre-scaling of contrasts, + // nor does it need a pre-computed pseudo-inverse of the design matrix + GLM::ttest (tvalues, SX.transpose(), y.row(element), c, betas, residuals); + + // FIXME + // Currently output only the first contrast, as is done in GLMTTestFixed + // tvalues should have one row only (since we're only testing a single row), and + // number of columns equal to the number of contrasts + value_type val = tvalues (element, 0); + if (!std::isfinite (val)) + val = value_type(0); + output[element] = val; + + } + } + + + + + GLMFTestFixed::GLMFTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const matrix_type& ftests) : + GLMTestBase (measurements, design, contrasts), + ftests (ftests) { } + + + + void GLMFTestFixed::operator() (const vector& perm_labelling, vector_type& output) const + { + + } + + + + + + + + } } diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 9ecc048e3b..ad00a62cf8 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -16,6 +16,7 @@ #define __math_stats_glm_h__ #include "math/least_squares.h" +#include "math/stats/import.h" #include "math/stats/typedefs.h" namespace MR @@ -30,8 +31,35 @@ namespace MR namespace GLM { + + // TODO With the upcoming changes, many of these 'loose' functions become specific to the GLMTTestFixed class + // Therefore they should be moved + + //! scale contrasts for use in t-test - /*! Note each row of the contrast matrix will be treated as an independent contrast. The number + /*! This function pre-scales a contrast matrix in order to make conversion from GLM betas + * to t-values more computationally efficient. + * + * For design matrix X, contrast matrix c, beta vector b and variance o^2, the t-value is calculated as: + * c^T.b + * t = -------------------------- + * sqrt(o^2.c^T.(X^T.X)^-1.c) + * + * Definition of variance (for vector of residuals e): + * e^T.e + * o^2 = ------ + * DOF(X) + * + * This function will generate scaled contrasts c' from c, such that: + * DOF(X) + * c' = c.sqrt(------------------) + * c^T.(X^T.X)^-1.c + * + * c'^T.b + * t = ----------- + * sqrt(e^T.e) + * + * Note each row of the contrast matrix will still be treated as an independent contrast. The number * of elements in each contrast vector must equal the number of columns in the design matrix */ matrix_type scale_contrasts (const matrix_type& contrasts, const matrix_type& design, const size_t degrees_of_freedom); @@ -44,11 +72,29 @@ namespace MR * * Note also that the contrast matrix should already have been scaled * using the GLM::scale_contrasts() function. */ + void ttest_prescaled (matrix_type& tvalues, + const matrix_type& design, + const matrix_type& pinv_design, + const matrix_type& measurements, + const matrix_type& scaled_contrasts, + matrix_type& betas, + matrix_type& residuals); + + + //! generic GLM t-test + /*! note that the data, effects, and residual matrices are transposed. + * This is to take advantage of Eigen's convention of storing + * matrices in column-major format by default. + * + * This version does not require, or take advantage of, pre-calculation + * of the pseudo-inverse of the design matrix. + * + * Note that for this version the contrast matrix should NOT have been scaled + * using the GLM::scale_contrasts() function. */ void ttest (matrix_type& tvalues, const matrix_type& design, - const matrix_type& pinv_design, const matrix_type& measurements, - const matrix_type& scaled_contrasts, + const matrix_type& contrasts, matrix_type& betas, matrix_type& residuals); @@ -97,17 +143,57 @@ namespace MR + // Define a base class for GLM tests + // Should support both T-tests and F-tests + // The latter will always produce 1 column only, whereas the former will produce the same number of columns as there are contrasts + class GLMTestBase { MEMALIGN(GLMTestBase) + public: + GLMTestBase (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : + y (measurements), + X (design), + c (contrasts), + dim (c.rows()) + { + assert (y.cols() == X.rows()); + assert (c.cols() == X.cols()); + } + + /*! Compute the statistics + * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) + * @param stats the vector containing the output statistics + */ + virtual void operator() (const vector& perm_labelling, vector_type& output) const = 0; + + size_t num_subjects () const { return y.cols(); } + size_t num_elements () const { return y.rows(); } + size_t num_outputs () const { return dim; } + + protected: + const matrix_type& y, X, c; + size_t dim; + + }; + + + + /** \addtogroup Statistics @{ */ - /*! A class to compute t-statistics using a General Linear Model. */ - class GLMTTest { NOMEMALIGN + /*! A class to compute t-statistics using a fixed General Linear Model. + * This class produces a t-statistic per contrast of interest. It should be used in + * cases where the same design matrix is to be applied for all image elements being + * tested; able to pre-compute a number of matrices before testing, improving + * execution speed. + */ + // TODO Currently this appears to only support a single contrast, since the output is a vector_type + class GLMTTestFixed : public GLMTestBase { MEMALIGN(GLMTTestFixed) public: /*! - * @param measurements a matrix storing the measured data for each subject in a column //TODO + * @param measurements a matrix storing the measured data for each subject in a column * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) * @param contrast a matrix containing the contrast of interest. */ - GLMTTest (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); + GLMTTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); /*! Compute the t-statistics * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) @@ -115,17 +201,81 @@ namespace MR * @param max_stat the maximum t-statistic * @param min_stat the minimum t-statistic */ - void operator() (const vector& perm_labelling, vector_type& stats) const; + void operator() (const vector& perm_labelling, vector_type& output) const override; - size_t num_subjects () const { return y.cols(); } - size_t num_elements () const { return y.rows(); } + protected: + const matrix_type pinvX, scaled_contrasts; + }; + //! @} + + + + /** \addtogroup Statistics + @{ */ + /*! A class to compute t-statistics using a 'variable' General Linear Model. + * This class produces a t-statistic per contrast of interest. It should be used in + * cases where additional subject data must be imported into the design matrix before + * computing t-values; the design matrix therefore does not remain fixed for all + * elements being tested, but varies depending on the particular element being tested. + * + * How additional data is imported into the design matrix will depend on the + * particular type of data being tested. Therefore an Importer class must be + * defined that is responsible for acquiring and vectorising these data. + */ + // TODO Define a "standard" interface for data import: Receives as input a + // text string corresponding to a file, and writes the result to a + // vector / block vector + // If this could be defined using a base class, it would remove the templating here... + // The same class would also be used in the cmd/ files to do the initial measurement matrix fill + class GLMTTestVariable : public GLMTestBase { NOMEMALIGN + public: + GLMTTestVariable (vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts); + + /*! Compute the t-statistics + * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) + * @param stats the vector containing the output t-statistics + * + * TODO In GLMTTestVariable, this function will additionally need to import the + * extra external data individually for each element tested. + */ + void operator() (const vector& perm_labelling, vector_type& stats) const override; protected: - const matrix_type& y; - matrix_type X, pinvX, scaled_contrasts; + const vector importers; + }; + + + /** \addtogroup Statistics + @{ */ + /*! A class to compute F-statistics using a fixed General Linear Model. + * This class produces a single F-statistic across all contrasts of interest. + */ + class GLMFTestFixed : public GLMTestBase { MEMALIGN(GLMFTestFixed) + public: + /*! + * @param measurements a matrix storing the measured data for each subject in a column + * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) + * @param contrast a matrix containing the contrast of interest. + */ + GLMFTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const matrix_type& ftests); + + /*! Compute the F-statistics + * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) + * @param stats the vector containing the output f-statistics + */ + void operator() (const vector& perm_labelling, vector_type& stats) const override; + + protected: + // TODO How to deal with contrast scaling? + // TODO How to deal with f-tests that apply to specific contrasts only? + const matrix_type ftests; }; //! @} + + + + } } } diff --git a/core/math/stats/import.cpp b/core/math/stats/import.cpp new file mode 100644 index 0000000000..d36f4181e4 --- /dev/null +++ b/core/math/stats/import.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2008-2016 the MRtrix3 contributors + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/ + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see www.mrtrix.org + * + */ + +#include "math/stats/import.h" + +namespace MR +{ + namespace Math + { + namespace Stats + { + + + + + + vector_type CohortDataImport::operator() (const size_t element) const + { + vector_type result (files.size()); + for (size_t i = 0; i != files.size(); ++i) + (*files[i]) [element]; // Get the intensity for just a particular element from this input data file + return result; + } + + + + + } + } +} + diff --git a/core/math/stats/import.h b/core/math/stats/import.h new file mode 100644 index 0000000000..e0ad9d8a0c --- /dev/null +++ b/core/math/stats/import.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2008-2016 the MRtrix3 contributors + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/ + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see www.mrtrix.org + * + */ +#ifndef __math_stats_import_h__ +#define __math_stats_import_h__ + +#include +#include +#include +#include + +#include "file/path.h" + +#include "math/stats/typedefs.h" + + +namespace MR +{ + namespace Math + { + namespace Stats + { + + + + /** \addtogroup Statistics + @{ */ + /*! A base class defining the interface for importing subject data + * This class defines the interface for how subject data is imported + * into a GLM measurement matrix. Exactly how the subject data is + * 'vectorised' will depend on the particular type of data being + * tested; nevertheless, the data for each subject should be stored + * in a single column within the measurement matrix (or in some + * cases, within the design matrix). + */ + class SubjectDataImportBase + { + public: + SubjectDataImportBase (const std::string& path) : + path (path) { } + + /*! + * @param column the column of a matrix into which the data from this + * particular file should be loaded + */ + virtual void operator() (Eigen::Block& column) = 0; + + /*! + * @param index extract the data from this file corresponding to a particular + * row in the measurements vector + */ + virtual void operator[] (const size_t index) = 0; + + protected: + const std::string path; + + }; + //! @} + + + + // TODO Implementation of the above class would be more difficult for 0.3.15 version of + // fixelcfestats because it would need to keep track of a mapping between template + // fixel index, and subject voxel / fixel index. Would it be preferable to do the + // 0.3.16 merge first? + + + + // During the initial import, the above class can simply be fed one subject at a time + // according to per-file path + // However for use in GLMTTestVariable, a class is needed that stores a list of text files, + // where each text file contains a list of file names (one for each subject), and + // for each subject a mechanism of data access is spawned & remains open throughout + // processing. + class CohortDataImport + { + public: + CohortDataImport() { } + + // Needs to be its own function rather than the constructor + // so that the correct template type can be invoked explicitly + template + void initialise (const std::string&); + + /*! + * @param index for a particular element being tested (data will be acquired for + * all subjects for that element) + */ + vector_type operator() (const size_t index) const; + + bool valid() const { return files.size(); } + + protected: + std::vector> files; + }; + + + + template + void CohortDataImport::initialise (const std::string& path) + { + // TODO Read the provided text file one at a time + // For each file, create an instance of SubjectDataImport + // (which must derive from SubjectDataImportBase) + const std::string directory = Path::dirname (path); + std::ifstream ifs (path.c_str()); + std::string line; + while (getline (ifs, line)) { + std::string filename (Path::join (directory, line)); + size_t p = filename.find_last_not_of(" \t"); + if (std::string::npos != p) + filename.erase(p+1); + if (!Path::exists (filename)) + throw Exception ("Reading text file \"" + Path::basename (path) + "\": input data file not found: \"" + filename + "\""); + files.push_back (std::make_shared (filename)); + } + } + + + + + } + } +} + + +#endif From be04e4070e260d8c917819801a6566ffa7be5621 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 3 Apr 2017 15:47:09 +1000 Subject: [PATCH 0029/1471] fixelcfestats: First compilable version using data import class --- cmd/fixelcfestats.cpp | 124 ++++++++++++++++++++++--------------- core/math/stats/import.cpp | 2 +- core/math/stats/import.h | 14 ++++- 3 files changed, 85 insertions(+), 55 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 1960fbdbe7..8ce5dc1e3a 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -129,7 +129,50 @@ void write_fixel_output (const std::string& filename, -void run() { +// TODO Define data importer class that willl obtain fixel data for a +// specific subject based on the string path to the image file for +// that subject +class SubjectFixelImport : public SubjectDataImportBase +{ NOMEMALIGN + public: + SubjectFixelImport (const std::string& path) : + SubjectDataImportBase (path), + H (Header::open (path)), + data (H.get_image()) + { + for (size_t axis = 1; axis < data.ndim(); ++axis) { + if (data.size(axis) > 1) + throw Exception ("Image file \"" + path + "\" does not contain fixel data (wrong dimensions)"); + } + } + + void operator() (matrix_type::ColXpr column) const override + { + assert (column.rows() == data.size(0)); + Image temp (data); // For thread-safety + column = temp.row(0); + } + + default_type operator[] (const size_t index) const override + { + assert (index < data.size(0)); + Image temp (data); // For thread-safety + temp.index(0) = index; + return default_type(temp.value()); + } + + const Header& header() const { return H; } + + private: + Header H; + const Image data; + +}; + + + +void run() +{ auto opt = get_options ("negative"); bool compute_negative_contrast = opt.size() ? true : false; @@ -173,30 +216,18 @@ void run() { } } } + // Read identifiers and check files exist - vector identifiers; - Header header; - { - ProgressBar progress ("validating input files..."); - std::ifstream ifs (argument[1].c_str()); - std::string temp; - while (getline (ifs, temp)) { - std::string filename (Path::join (input_fixel_directory, temp)); - size_t p = filename.find_last_not_of(" \t"); - if (std::string::npos != p) - filename.erase(p+1); - if (!MR::Path::exists (filename)) - throw Exception ("input fixel image not found: " + filename); - header = Header::open (filename); - Fixel::fixels_match (index_header, header); - identifiers.push_back (filename); - progress++; - } + CohortDataImport importer; + importer.initialise (argument[1]); + for (size_t i = 0; i != importer.size(); ++i) { + if (!Fixel::fixels_match (index_header, dynamic_cast(importer[i].get())->header())) + throw Exception ("Fixel data file \"" + importer[i]->name() + "\" does not match template fixel image"); } // Load design matrix: const matrix_type design = load_matrix (argument[2]); - if (design.rows() != (ssize_t)identifiers.size()) + if (design.rows() != (ssize_t)importer.size()) throw Exception ("number of input files does not match number of rows in design matrix"); // Load permutations file if supplied @@ -213,10 +244,14 @@ void run() { opt = get_options("permutations_nonstationary"); vector > permutations_nonstationary; if (opt.size()) { - permutations_nonstationary = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - nperms_nonstationary = permutations_nonstationary.size(); - if (permutations_nonstationary[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); + if (do_nonstationary_adjustment) { + permutations_nonstationary = Math::Stats::Permutation::load_permutations_file (opt[0][0]); + nperms_nonstationary = permutations_nonstationary.size(); + if (permutations_nonstationary[0].size() != (size_t)design.rows()) + throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); + } else { + WARN ("-permutations_nonstationary option ignored: nonstationarity correction is not being performed (-nonstationary option)"); + } } // Load contrast matrix: @@ -306,7 +341,7 @@ void run() { } } - Header output_header (header); + Header output_header (dynamic_cast(importer[0].get())->header()); output_header.keyval()["num permutations"] = str(num_perms); output_header.keyval()["dh"] = str(cfe_dh); output_header.keyval()["cfe_e"] = str(cfe_e); @@ -318,42 +353,29 @@ void run() { // Load input data - matrix_type data (num_fixels, identifiers.size()); + matrix_type data (num_fixels, importer.size()); data.setZero(); - { - ProgressBar progress ("loading input images", identifiers.size()); - for (size_t subject = 0; subject < identifiers.size(); subject++) { - LogLevelLatch log_level (0); - - auto subject_data = Image::open (identifiers[subject]).with_direct_io(); - vector subject_data_vector (num_fixels, 0.0); - for (auto i = Loop (index_image, 0, 3)(index_image); i; ++i) { - index_image.index(3) = 1; - uint32_t offset = index_image.value(); - uint32_t fixel_index = 0; - for (auto f = Fixel::Loop (index_image) (subject_data); f; ++f, ++fixel_index) { - if (!std::isfinite(static_cast(subject_data.value()))) - throw Exception ("subject data file " + identifiers[subject] + " contains non-finite value: " + str(subject_data.value())); - subject_data_vector[offset + fixel_index] = subject_data.value(); - } - } + { + ProgressBar progress ("loading input images", importer.size()); + for (size_t subject = 0; subject < importer.size(); subject++) { + (*importer[subject]) (data.col (subject)); // Smooth the data + vector_type smoothed_data (vector_type::Zero (num_fixels)); for (size_t fixel = 0; fixel < num_fixels; ++fixel) { value_type value = 0.0; std::map::const_iterator it = smoothing_weights[fixel].begin(); - for (; it != smoothing_weights[fixel].end(); ++it) { - value += subject_data_vector[it->first] * it->second; - } - data (fixel, subject) = value; + for (; it != smoothing_weights[fixel].end(); ++it) + value += data (it->first, subject) * it->second; + smoothed_data (fixel) = value; } - progress++; + if (!smoothed_data.allFinite()) + throw Exception ("Input fixel data \"" + importer[subject]->name() + "\" contains at least one non-finite value"); + data.col (subject) = smoothed_data; } + progress++; } - - if (!data.allFinite()) - throw Exception ("input data contains non-finite value(s)"); { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation"); auto temp = Math::Stats::GLM::solve_betas (data, design); diff --git a/core/math/stats/import.cpp b/core/math/stats/import.cpp index d36f4181e4..815f6e89df 100644 --- a/core/math/stats/import.cpp +++ b/core/math/stats/import.cpp @@ -30,7 +30,7 @@ namespace MR { vector_type result (files.size()); for (size_t i = 0; i != files.size(); ++i) - (*files[i]) [element]; // Get the intensity for just a particular element from this input data file + result[i] = (*files[i]) [element]; // Get the intensity for just a particular element from this input data file return result; } diff --git a/core/math/stats/import.h b/core/math/stats/import.h index e0ad9d8a0c..d6953e1bc7 100644 --- a/core/math/stats/import.h +++ b/core/math/stats/import.h @@ -54,13 +54,15 @@ namespace MR * @param column the column of a matrix into which the data from this * particular file should be loaded */ - virtual void operator() (Eigen::Block& column) = 0; + virtual void operator() (matrix_type::ColXpr column) const = 0; /*! * @param index extract the data from this file corresponding to a particular * row in the measurements vector */ - virtual void operator[] (const size_t index) = 0; + virtual default_type operator[] (const size_t index) const = 0; + + const std::string& name() const { return path; } protected: const std::string path; @@ -99,7 +101,13 @@ namespace MR */ vector_type operator() (const size_t index) const; - bool valid() const { return files.size(); } + size_t size() const { return files.size(); } + + std::shared_ptr operator[] (const size_t i) const + { + assert (i < files.size()); + return files[i]; + } protected: std::vector> files; From bafaf70bcde6e4e674a87b90c10004172401bf87 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 3 Apr 2017 20:50:59 +1000 Subject: [PATCH 0030/1471] fixelcfestats: First compilable version with per-fixel design matrix --- cmd/connectomestats.cpp | 2 +- cmd/fixelcfestats.cpp | 52 ++++-- cmd/mrclusterstats.cpp | 2 +- cmd/vectorstats.cpp | 4 +- core/math/stats/glm.cpp | 13 +- core/math/stats/glm.h | 8 +- src/stats/permstack.cpp | 2 +- src/stats/permstack.h | 2 +- src/stats/permtest.cpp | 261 ++++++++++++++++++++++++++ src/stats/permtest.h | 401 +++++++++++++--------------------------- 10 files changed, 448 insertions(+), 299 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index ed1293c301..694fde1d17 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -262,7 +262,7 @@ void run() save_vector (stdevs.col(0), output_prefix + "_std_dev.csv"); } - Math::Stats::GLMTTestFixed glm_ttest (data, design, contrast); + std::shared_ptr glm_ttest (new Math::Stats::GLMTTestFixed (data, design, contrast)); // If performing non-stationarity adjustment we need to pre-compute the empirical statistic vector_type empirical_statistic; diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 8ce5dc1e3a..e5676e2ab9 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -102,6 +102,10 @@ void usage () + Option ("negative", "automatically test the negative (opposite) contrast. By computing the opposite contrast simultaneously " "the computation time is reduced.") + + Option ("column", "add a column to the design matrix corresponding to subject fixel-wise values " + "(the contrast vector length must include columns for these additions)").allow_multiple() + + Argument ("path").type_file_in() + + Option ("smooth", "smooth the fixel value along the fibre tracts using a Gaussian kernel with the supplied FWHM (default: " + str(DEFAULT_SMOOTHING_STD, 2) + "mm)") + Argument ("FWHM").type_float (0.0, 200.0) @@ -129,7 +133,7 @@ void write_fixel_output (const std::string& filename, -// TODO Define data importer class that willl obtain fixel data for a +// Define data importer class that willl obtain fixel data for a // specific subject based on the string path to the image file for // that subject class SubjectFixelImport : public SubjectDataImportBase @@ -254,11 +258,22 @@ void run() } } - // Load contrast matrix: + // Load contrast matrix const matrix_type contrast = load_matrix (argument[3]); - if (contrast.cols() != design.cols()) - throw Exception ("the number of columns per contrast does not equal the number of columns in the design matrix"); + // Before validating the contrast matrix, we first need to see if there are any + // additional design matrix columns coming from fixel-wise subject data + vector extra_columns; + opt = get_options ("column"); + for (size_t i = 0; i != opt.size(); ++i) { + extra_columns.push_back (CohortDataImport()); + extra_columns[i].initialise (opt[i][0]); + } + + if (contrast.cols() + ssize_t(extra_columns.size()) != design.cols()) + throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" + + (extra_columns.size() ? " (in addition to the " + str(extra_columns.size()) + " uses of -column)" : "") + + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")"); if (contrast.rows() > 1) throw Exception ("only a single contrast vector (defined as a row) is currently supported"); @@ -376,7 +391,9 @@ void run() progress++; } - { + if (extra_columns.size()) { + WARN ("Beta coefficients, effect size and standard deviation outputs not yet implemented for fixel-wise extra columns"); + } else { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation"); auto temp = Math::Stats::GLM::solve_betas (data, design); @@ -392,20 +409,27 @@ void run() write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), temp.row(0), output_header); } - Math::Stats::GLMTTestFixed glm_ttest (data, design, contrast); - std::shared_ptr cfe_integrator; - cfe_integrator.reset (new Stats::CFE::Enhancer (connectivity_matrix, cfe_dh, cfe_e, cfe_h)); - vector_type empirical_cfe_statistic; + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size()) { + glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast)); + } else { + glm_test.reset (new GLMTTestFixed (data, design, contrast)); + } + + // Construct the class for performing fixel-based statistical enhancement + std::shared_ptr cfe_integrator (new Stats::CFE::Enhancer (connectivity_matrix, cfe_dh, cfe_e, cfe_h)); // If performing non-stationarity adjustment we need to pre-compute the empirical CFE statistic + vector_type empirical_cfe_statistic; if (do_nonstationary_adjustment) { if (permutations_nonstationary.size()) { Stats::PermTest::PermutationStack permutations (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment"); - Stats::PermTest::precompute_empirical_stat (glm_ttest, cfe_integrator, permutations, empirical_cfe_statistic); + Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, permutations, empirical_cfe_statistic); } else { Stats::PermTest::PermutationStack permutations (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment", false); - Stats::PermTest::precompute_empirical_stat (glm_ttest, cfe_integrator, permutations, empirical_cfe_statistic); + Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, permutations, empirical_cfe_statistic); } output_header.keyval()["nonstationary adjustment"] = str(true); write_fixel_output (Path::join (output_fixel_directory, "cfe_empirical.mif"), empirical_cfe_statistic, output_header); @@ -420,7 +444,7 @@ void run() if (compute_negative_contrast) cfe_output_neg.reset (new vector_type (num_fixels)); - Stats::PermTest::precompute_default_permutation (glm_ttest, cfe_integrator, empirical_cfe_statistic, cfe_output, cfe_output_neg, tvalue_output); + Stats::PermTest::precompute_default_permutation (glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, cfe_output_neg, tvalue_output); write_fixel_output (Path::join (output_fixel_directory, "cfe.mif"), cfe_output, output_header); write_fixel_output (Path::join (output_fixel_directory, "tvalue.mif"), tvalue_output, output_header); @@ -440,12 +464,12 @@ void run() } if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_ttest, cfe_integrator, empirical_cfe_statistic, + Stats::PermTest::run_permutations (permutations, glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, cfe_output_neg, perm_distribution, perm_distribution_neg, uncorrected_pvalues, uncorrected_pvalues_neg); } else { - Stats::PermTest::run_permutations (num_perms, glm_ttest, cfe_integrator, empirical_cfe_statistic, + Stats::PermTest::run_permutations (num_perms, glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, cfe_output_neg, perm_distribution, perm_distribution_neg, uncorrected_pvalues, uncorrected_pvalues_neg); diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index fa404a7c25..7f12c25e24 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -210,7 +210,7 @@ void run() { if (compute_negative_contrast) default_cluster_output_neg.reset (new vector_type (num_vox)); - Math::Stats::GLMTTestFixed glm (data, design, contrast); + std::shared_ptr glm (new Math::Stats::GLMTTestFixed (data, design, contrast)); std::shared_ptr enhancer; if (use_tfce) { diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 07dc7c9b3b..a3f2983d4c 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -158,7 +158,7 @@ void run() save_vector (stdevs.col(0), output_prefix + "_std_dev.csv"); } - Math::Stats::GLMTTestFixed glm_ttest (data, design, contrast); + std::shared_ptr glm_ttest (new Math::Stats::GLMTTestFixed (data, design, contrast)); // Precompute default statistic // Don't use convenience function: No enhancer! @@ -167,7 +167,7 @@ void run() for (size_t i = 0; i != filenames.size(); ++i) default_permutation[i] = i; vector_type default_tvalues; - glm_ttest (default_permutation, default_tvalues); + (*glm_ttest) (default_permutation, default_tvalues); save_vector (default_tvalues, output_prefix + "_tvalue.csv"); // Perform permutation testing diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 21fccc93fa..916701903a 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -161,7 +161,7 @@ namespace MR - GLMTTestVariable::GLMTTestVariable (vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : + GLMTTestVariable::GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : GLMTestBase (measurements, design, contrasts), importers (importers) { @@ -223,6 +223,17 @@ namespace MR + matrix_type GLMTTestVariable::default_design (const matrix_type& design, const size_t index) const + { + matrix_type output (design.rows(), design.cols() + importers.size()); + output.block (0, 0, design.rows(), design.cols()) = design; + for (size_t i = 0; i != importers.size(); ++i) + output.col (design.cols() + i) = importers[i] (index); + return output; + } + + + GLMFTestFixed::GLMFTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const matrix_type& ftests) : GLMTestBase (measurements, design, contrasts), diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index ad00a62cf8..035fc919cb 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -229,7 +229,7 @@ namespace MR // The same class would also be used in the cmd/ files to do the initial measurement matrix fill class GLMTTestVariable : public GLMTestBase { NOMEMALIGN public: - GLMTTestVariable (vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts); + GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts); /*! Compute the t-statistics * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) @@ -240,8 +240,12 @@ namespace MR */ void operator() (const vector& perm_labelling, vector_type& stats) const override; + // TODO A function to acquire the design matrix for the default permutation + // (note that this needs to be re-run for each element being tested) + matrix_type default_design (const matrix_type& design, const size_t index) const; + protected: - const vector importers; + const vector& importers; }; diff --git a/src/stats/permstack.cpp b/src/stats/permstack.cpp index e6ddde239a..89b53bd963 100644 --- a/src/stats/permstack.cpp +++ b/src/stats/permstack.cpp @@ -31,7 +31,7 @@ namespace MR Math::Stats::Permutation::generate (num_permutations, num_samples, permutations, include_default); } - PermutationStack::PermutationStack (vector >& permutations, const std::string msg) : + PermutationStack::PermutationStack (const vector< vector >& permutations, const std::string msg) : num_permutations (permutations.size()), permutations (permutations), counter (0), diff --git a/src/stats/permstack.h b/src/stats/permstack.h index 1c1629ba1d..252398f44b 100644 --- a/src/stats/permstack.h +++ b/src/stats/permstack.h @@ -43,7 +43,7 @@ namespace MR public: PermutationStack (const size_t num_permutations, const size_t num_samples, const std::string msg, const bool include_default = true); - PermutationStack (vector >& permutations, const std::string msg); + PermutationStack (const vector< vector >& permutations, const std::string msg); bool operator() (Permutation&); diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index 292999835b..a62ce6ff2c 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -56,6 +56,267 @@ namespace MR + PreProcessor::PreProcessor (const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + vector_type& global_enhanced_sum, + vector& global_enhanced_count) : + stats_calculator (stats_calculator), + enhancer (enhancer), global_enhanced_sum (global_enhanced_sum), + global_enhanced_count (global_enhanced_count), enhanced_sum (vector_type::Zero (global_enhanced_sum.size())), + enhanced_count (global_enhanced_sum.size(), 0.0), stats (global_enhanced_sum.size()), + enhanced_stats (global_enhanced_sum.size()), mutex (new std::mutex()) + { + assert (stats_calculator); + assert (enhancer); + } + + + + PreProcessor::~PreProcessor () + { + std::lock_guard lock (*mutex); + for (ssize_t i = 0; i < global_enhanced_sum.size(); ++i) { + global_enhanced_sum[i] += enhanced_sum[i]; + global_enhanced_count[i] += enhanced_count[i]; + } + } + + + + bool PreProcessor::operator() (const Permutation& permutation) + { + (*stats_calculator) (permutation.data, stats); + (*enhancer) (stats, enhanced_stats); + for (ssize_t i = 0; i < enhanced_stats.size(); ++i) { + if (enhanced_stats[i] > 0.0) { + enhanced_sum[i] += enhanced_stats[i]; + enhanced_count[i]++; + } + } + return true; + } + + + + + + + + Processor::Processor (const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistics, + const vector_type& default_enhanced_statistics, + const std::shared_ptr default_enhanced_statistics_neg, + vector_type& perm_dist_pos, + std::shared_ptr perm_dist_neg, + vector& global_uncorrected_pvalue_counter, + std::shared_ptr< vector > global_uncorrected_pvalue_counter_neg) : + stats_calculator (stats_calculator), + enhancer (enhancer), empirical_enhanced_statistics (empirical_enhanced_statistics), + default_enhanced_statistics (default_enhanced_statistics), default_enhanced_statistics_neg (default_enhanced_statistics_neg), + statistics (stats_calculator->num_elements()), enhanced_statistics (stats_calculator->num_elements()), + uncorrected_pvalue_counter (stats_calculator->num_elements(), 0), + perm_dist_pos (perm_dist_pos), perm_dist_neg (perm_dist_neg), + global_uncorrected_pvalue_counter (global_uncorrected_pvalue_counter), + global_uncorrected_pvalue_counter_neg (global_uncorrected_pvalue_counter_neg), + mutex (new std::mutex()) + { + assert (stats_calculator); + if (global_uncorrected_pvalue_counter_neg) + uncorrected_pvalue_counter_neg.reset (new vector(stats_calculator->num_elements(), 0)); + } + + + + Processor::~Processor () + { + std::lock_guard lock (*mutex); + for (size_t i = 0; i < stats_calculator->num_elements(); ++i) { + global_uncorrected_pvalue_counter[i] += uncorrected_pvalue_counter[i]; + if (global_uncorrected_pvalue_counter_neg) + (*global_uncorrected_pvalue_counter_neg)[i] = (*uncorrected_pvalue_counter_neg)[i]; + } + } + + + + bool Processor::operator() (const Permutation& permutation) + { + (*stats_calculator) (permutation.data, statistics); + if (enhancer) { + perm_dist_pos[permutation.index] = (*enhancer) (statistics, enhanced_statistics); + } else { + enhanced_statistics = statistics; + perm_dist_pos[permutation.index] = enhanced_statistics.maxCoeff(); + } + + if (empirical_enhanced_statistics.size()) { + perm_dist_pos[permutation.index] = 0.0; + for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { + enhanced_statistics[i] /= empirical_enhanced_statistics[i]; + perm_dist_pos[permutation.index] = std::max(perm_dist_pos[permutation.index], enhanced_statistics[i]); + } + } + + for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { + if (default_enhanced_statistics[i] > enhanced_statistics[i]) + uncorrected_pvalue_counter[i]++; + } + + // Compute the opposite contrast + if (perm_dist_neg) { + statistics = -statistics; + + (*perm_dist_neg)[permutation.index] = (*enhancer) (statistics, enhanced_statistics); + + if (empirical_enhanced_statistics.size()) { + (*perm_dist_neg)[permutation.index] = 0.0; + for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { + enhanced_statistics[i] /= empirical_enhanced_statistics[i]; + (*perm_dist_neg)[permutation.index] = std::max ((*perm_dist_neg)[permutation.index], enhanced_statistics[i]); + } + } + + for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { + if ((*default_enhanced_statistics_neg)[i] > enhanced_statistics[i]) + (*uncorrected_pvalue_counter_neg)[i]++; + } + } + return true; + } + + + + + + + + void precompute_empirical_stat (const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + PermutationStack& perm_stack, vector_type& empirical_statistic) + { + vector global_enhanced_count (empirical_statistic.size(), 0); + { + PreProcessor preprocessor (stats_calculator, enhancer, empirical_statistic, global_enhanced_count); + Thread::run_queue (perm_stack, Permutation(), Thread::multi (preprocessor)); + } + for (ssize_t i = 0; i < empirical_statistic.size(); ++i) { + if (global_enhanced_count[i] > 0) + empirical_statistic[i] /= static_cast (global_enhanced_count[i]); + } + } + + + + + void precompute_default_permutation (const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistic, + vector_type& default_enhanced_statistics, + std::shared_ptr default_enhanced_statistics_neg, + vector_type& default_statistics) + { + vector default_labelling (stats_calculator->num_subjects()); + for (size_t i = 0; i < default_labelling.size(); ++i) + default_labelling[i] = i; + (*stats_calculator) (default_labelling, default_statistics); + (*enhancer) (default_statistics, default_enhanced_statistics); + + if (empirical_enhanced_statistic.size()) + default_enhanced_statistics /= empirical_enhanced_statistic; + + // Compute the opposite contrast + if (default_enhanced_statistics_neg) { + default_statistics = -default_statistics; + + (*enhancer) (default_statistics, *default_enhanced_statistics_neg); + + if (empirical_enhanced_statistic.size()) + (*default_enhanced_statistics_neg) /= empirical_enhanced_statistic; + + // revert default_statistics to positive contrast for output + default_statistics = -default_statistics; + } + } + + + + + void run_permutations (PermutationStack& perm_stack, + const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistic, + const vector_type& default_enhanced_statistics, + const std::shared_ptr default_enhanced_statistics_neg, + vector_type& perm_dist_pos, + std::shared_ptr perm_dist_neg, + vector_type& uncorrected_pvalues, + std::shared_ptr uncorrected_pvalues_neg) + { + vector global_uncorrected_pvalue_count (stats_calculator->num_elements(), 0); + std::shared_ptr< vector > global_uncorrected_pvalue_count_neg; + if (perm_dist_neg) + global_uncorrected_pvalue_count_neg.reset (new vector (stats_calculator->num_elements(), 0)); + + { + Processor processor (stats_calculator, enhancer, + empirical_enhanced_statistic, + default_enhanced_statistics, default_enhanced_statistics_neg, + perm_dist_pos, perm_dist_neg, + global_uncorrected_pvalue_count, global_uncorrected_pvalue_count_neg); + Thread::run_queue (perm_stack, Permutation(), Thread::multi (processor)); + } + + for (size_t i = 0; i < stats_calculator->num_elements(); ++i) { + uncorrected_pvalues[i] = global_uncorrected_pvalue_count[i] / default_type(perm_stack.num_permutations); + if (perm_dist_neg) + (*uncorrected_pvalues_neg)[i] = (*global_uncorrected_pvalue_count_neg)[i] / default_type(perm_stack.num_permutations); + } + } + + + + + void run_permutations (const vector>& permutations, + const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistic, + const vector_type& default_enhanced_statistics, + const std::shared_ptr default_enhanced_statistics_neg, + vector_type& perm_dist_pos, + std::shared_ptr perm_dist_neg, + vector_type& uncorrected_pvalues, + std::shared_ptr uncorrected_pvalues_neg) + { + PermutationStack perm_stack (permutations, "running " + str(permutations.size()) + " permutations"); + + run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, default_enhanced_statistics, default_enhanced_statistics_neg, + perm_dist_pos, perm_dist_neg, uncorrected_pvalues, uncorrected_pvalues_neg); + } + + + + + void run_permutations (const size_t num_permutations, + const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistic, + const vector_type& default_enhanced_statistics, + const std::shared_ptr default_enhanced_statistics_neg, + vector_type& perm_dist_pos, + std::shared_ptr perm_dist_neg, + vector_type& uncorrected_pvalues, + std::shared_ptr uncorrected_pvalues_neg) + { + PermutationStack perm_stack (num_permutations, stats_calculator->num_subjects(), "running " + str(num_permutations) + " permutations"); + + run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, default_enhanced_statistics, default_enhanced_statistics_neg, + perm_dist_pos, perm_dist_neg, uncorrected_pvalues, uncorrected_pvalues_neg); + } + + + + } } } diff --git a/src/stats/permtest.h b/src/stats/permtest.h index d28e06ed9b..0613a62e50 100644 --- a/src/stats/permtest.h +++ b/src/stats/permtest.h @@ -23,6 +23,7 @@ #include "thread.h" #include "thread_queue.h" #include "math/math.h" +#include "math/stats/glm.h" #include "math/stats/permutation.h" #include "math/stats/typedefs.h" @@ -52,282 +53,130 @@ namespace MR /*! A class to pre-compute the empirical enhanced statistic image for non-stationarity correction */ - template - class PreProcessor { MEMALIGN (PreProcessor) - public: - PreProcessor (const StatsType& stats_calculator, - const std::shared_ptr enhancer, - vector_type& global_enhanced_sum, - vector& global_enhanced_count) : - stats_calculator (stats_calculator), - enhancer (enhancer), global_enhanced_sum (global_enhanced_sum), - global_enhanced_count (global_enhanced_count), enhanced_sum (vector_type::Zero (global_enhanced_sum.size())), - enhanced_count (global_enhanced_sum.size(), 0.0), stats (global_enhanced_sum.size()), - enhanced_stats (global_enhanced_sum.size()), mutex (new std::mutex()) {} - - ~PreProcessor () - { - std::lock_guard lock (*mutex); - for (ssize_t i = 0; i < global_enhanced_sum.size(); ++i) { - global_enhanced_sum[i] += enhanced_sum[i]; - global_enhanced_count[i] += enhanced_count[i]; - } - } - - bool operator() (const Permutation& permutation) - { - stats_calculator (permutation.data, stats); - (*enhancer) (stats, enhanced_stats); - for (ssize_t i = 0; i < enhanced_stats.size(); ++i) { - if (enhanced_stats[i] > 0.0) { - enhanced_sum[i] += enhanced_stats[i]; - enhanced_count[i]++; - } - } - return true; - } - - protected: - StatsType stats_calculator; - std::shared_ptr enhancer; - vector_type& global_enhanced_sum; - vector& global_enhanced_count; - vector_type enhanced_sum; - vector enhanced_count; - vector_type stats; - vector_type enhanced_stats; - std::shared_ptr mutex; - }; - - - - - /*! A class to perform the permutation testing */ - template - class Processor { MEMALIGN (Processor) - public: - Processor (const StatsType& stats_calculator, - const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistics, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector& global_uncorrected_pvalue_counter, - std::shared_ptr< vector > global_uncorrected_pvalue_counter_neg) : - stats_calculator (stats_calculator), - enhancer (enhancer), empirical_enhanced_statistics (empirical_enhanced_statistics), - default_enhanced_statistics (default_enhanced_statistics), default_enhanced_statistics_neg (default_enhanced_statistics_neg), - statistics (stats_calculator.num_elements()), enhanced_statistics (stats_calculator.num_elements()), - uncorrected_pvalue_counter (stats_calculator.num_elements(), 0), - perm_dist_pos (perm_dist_pos), perm_dist_neg (perm_dist_neg), - global_uncorrected_pvalue_counter (global_uncorrected_pvalue_counter), - global_uncorrected_pvalue_counter_neg (global_uncorrected_pvalue_counter_neg), - mutex (new std::mutex()) - { - if (global_uncorrected_pvalue_counter_neg) - uncorrected_pvalue_counter_neg.reset (new vector(stats_calculator.num_elements(), 0)); - } - - - ~Processor () { - std::lock_guard lock (*mutex); - for (size_t i = 0; i < stats_calculator.num_elements(); ++i) { - global_uncorrected_pvalue_counter[i] += uncorrected_pvalue_counter[i]; - if (global_uncorrected_pvalue_counter_neg) - (*global_uncorrected_pvalue_counter_neg)[i] = (*uncorrected_pvalue_counter_neg)[i]; - } - } - - - bool operator() (const Permutation& permutation) - { - stats_calculator (permutation.data, statistics); - if (enhancer) { - perm_dist_pos[permutation.index] = (*enhancer) (statistics, enhanced_statistics); - } else { - enhanced_statistics = statistics; - perm_dist_pos[permutation.index] = enhanced_statistics.maxCoeff(); - } - - if (empirical_enhanced_statistics.size()) { - perm_dist_pos[permutation.index] = 0.0; - for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { - enhanced_statistics[i] /= empirical_enhanced_statistics[i]; - perm_dist_pos[permutation.index] = std::max(perm_dist_pos[permutation.index], enhanced_statistics[i]); - } - } - - for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { - if (default_enhanced_statistics[i] > enhanced_statistics[i]) - uncorrected_pvalue_counter[i]++; - } - - // Compute the opposite contrast - if (perm_dist_neg) { - statistics = -statistics; - - (*perm_dist_neg)[permutation.index] = (*enhancer) (statistics, enhanced_statistics); - - if (empirical_enhanced_statistics.size()) { - (*perm_dist_neg)[permutation.index] = 0.0; - for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { - enhanced_statistics[i] /= empirical_enhanced_statistics[i]; - (*perm_dist_neg)[permutation.index] = std::max ((*perm_dist_neg)[permutation.index], enhanced_statistics[i]); - } - } - - for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { - if ((*default_enhanced_statistics_neg)[i] > enhanced_statistics[i]) - (*uncorrected_pvalue_counter_neg)[i]++; - } - } - return true; - } - - protected: - StatsType stats_calculator; - std::shared_ptr enhancer; - const vector_type& empirical_enhanced_statistics; - const vector_type& default_enhanced_statistics; - const std::shared_ptr default_enhanced_statistics_neg; - vector_type statistics; - vector_type enhanced_statistics; - vector uncorrected_pvalue_counter; - std::shared_ptr > uncorrected_pvalue_counter_neg; - vector_type& perm_dist_pos; - std::shared_ptr perm_dist_neg; - - vector& global_uncorrected_pvalue_counter; - std::shared_ptr > global_uncorrected_pvalue_counter_neg; - std::shared_ptr mutex; - }; - - - // Precompute the empircal test statistic for non-stationarity adjustment - template - void precompute_empirical_stat (const StatsType& stats_calculator, const std::shared_ptr enhancer, - PermutationStack& perm_stack, vector_type& empirical_statistic) - { - vector global_enhanced_count (empirical_statistic.size(), 0); - { - PreProcessor preprocessor (stats_calculator, enhancer, empirical_statistic, global_enhanced_count); - Thread::run_queue (perm_stack, Permutation(), Thread::multi (preprocessor)); - } - for (ssize_t i = 0; i < empirical_statistic.size(); ++i) { - if (global_enhanced_count[i] > 0) - empirical_statistic[i] /= static_cast (global_enhanced_count[i]); - } - } - - - - // Precompute the default statistic image and enhanced statistic. We need to precompute this for calculating the uncorrected p-values. - template - void precompute_default_permutation (const StatsType& stats_calculator, - const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - vector_type& default_enhanced_statistics, - std::shared_ptr default_enhanced_statistics_neg, - vector_type& default_statistics) - { - vector default_labelling (stats_calculator.num_subjects()); - for (size_t i = 0; i < default_labelling.size(); ++i) - default_labelling[i] = i; - stats_calculator (default_labelling, default_statistics); - (*enhancer) (default_statistics, default_enhanced_statistics); - - if (empirical_enhanced_statistic.size()) - default_enhanced_statistics /= empirical_enhanced_statistic; - - // Compute the opposite contrast - if (default_enhanced_statistics_neg) { - default_statistics = -default_statistics; - - (*enhancer) (default_statistics, *default_enhanced_statistics_neg); - - if (empirical_enhanced_statistic.size()) - (*default_enhanced_statistics_neg) /= empirical_enhanced_statistic; - - // revert default_statistics to positive contrast for output - default_statistics = -default_statistics; - } - } - - template - inline void run_permutations (PermutationStack& perm_stack, - const StatsType& stats_calculator, - const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector_type& uncorrected_pvalues, - std::shared_ptr uncorrected_pvalues_neg) - { - vector global_uncorrected_pvalue_count (stats_calculator.num_elements(), 0); - std::shared_ptr< vector > global_uncorrected_pvalue_count_neg; - if (perm_dist_neg) - global_uncorrected_pvalue_count_neg.reset (new vector (stats_calculator.num_elements(), 0)); - - { - Processor processor (stats_calculator, enhancer, - empirical_enhanced_statistic, - default_enhanced_statistics, default_enhanced_statistics_neg, - perm_dist_pos, perm_dist_neg, - global_uncorrected_pvalue_count, global_uncorrected_pvalue_count_neg); - Thread::run_queue (perm_stack, Permutation(), Thread::multi (processor)); - } - - for (size_t i = 0; i < stats_calculator.num_elements(); ++i) { - uncorrected_pvalues[i] = global_uncorrected_pvalue_count[i] / default_type(perm_stack.num_permutations); - if (perm_dist_neg) - (*uncorrected_pvalues_neg)[i] = (*global_uncorrected_pvalue_count_neg)[i] / default_type(perm_stack.num_permutations); - } - - } - - - template - inline void run_permutations (vector>& permutations, - const StatsType& stats_calculator, - const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector_type& uncorrected_pvalues, - std::shared_ptr uncorrected_pvalues_neg) - { - PermutationStack perm_stack (permutations, "running " + str(permutations.size()) + " permutations"); - - run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, default_enhanced_statistics, default_enhanced_statistics_neg, - perm_dist_pos, perm_dist_neg, uncorrected_pvalues, uncorrected_pvalues_neg); - } - - - template - inline void run_permutations (const size_t num_permutations, - const StatsType& stats_calculator, - const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector_type& uncorrected_pvalues, - std::shared_ptr uncorrected_pvalues_neg) - { - PermutationStack perm_stack (num_permutations, stats_calculator.num_subjects(), "running " + str(num_permutations) + " permutations"); - - run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, default_enhanced_statistics, default_enhanced_statistics_neg, - perm_dist_pos, perm_dist_neg, uncorrected_pvalues, uncorrected_pvalues_neg); - } - - - //! @} + class PreProcessor { MEMALIGN (PreProcessor) + public: + PreProcessor (const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + vector_type& global_enhanced_sum, + vector& global_enhanced_count); + + ~PreProcessor(); + + bool operator() (const Permutation&); + + protected: + std::shared_ptr stats_calculator; + std::shared_ptr enhancer; + vector_type& global_enhanced_sum; + vector& global_enhanced_count; + vector_type enhanced_sum; + vector enhanced_count; + vector_type stats; + vector_type enhanced_stats; + std::shared_ptr mutex; + }; + + + + + /*! A class to perform the permutation testing */ + class Processor { MEMALIGN (Processor) + public: + Processor (const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistics, + const vector_type& default_enhanced_statistics, + const std::shared_ptr default_enhanced_statistics_neg, + vector_type& perm_dist_pos, + std::shared_ptr perm_dist_neg, + vector& global_uncorrected_pvalue_counter, + std::shared_ptr< vector > global_uncorrected_pvalue_counter_neg); + + ~Processor(); + + bool operator() (const Permutation&); + + protected: + std::shared_ptr stats_calculator; + std::shared_ptr enhancer; + const vector_type& empirical_enhanced_statistics; + const vector_type& default_enhanced_statistics; + const std::shared_ptr default_enhanced_statistics_neg; + vector_type statistics; + vector_type enhanced_statistics; + vector uncorrected_pvalue_counter; + std::shared_ptr > uncorrected_pvalue_counter_neg; + vector_type& perm_dist_pos; + std::shared_ptr perm_dist_neg; + + vector& global_uncorrected_pvalue_counter; + std::shared_ptr > global_uncorrected_pvalue_counter_neg; + std::shared_ptr mutex; + }; + + + + + // Precompute the empircal test statistic for non-stationarity adjustment + void precompute_empirical_stat (const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + PermutationStack& perm_stack, vector_type& empirical_statistic); + + + + + // Precompute the default statistic image and enhanced statistic. We need to precompute this for calculating the uncorrected p-values. + void precompute_default_permutation (const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistic, + vector_type& default_enhanced_statistics, + std::shared_ptr default_enhanced_statistics_neg, + vector_type& default_statistics); + + + + // Functions for running a large number of permutations + // Different interfaces depending on how the permutations themselves are constructed: + // - A pre-existing permutation stack class + // - Pre-defined permutations (likely provided via a command-line option) + // - A requested number of permutations + void run_permutations (PermutationStack& perm_stack, + const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistic, + const vector_type& default_enhanced_statistics, + const std::shared_ptr default_enhanced_statistics_neg, + vector_type& perm_dist_pos, + std::shared_ptr perm_dist_neg, + vector_type& uncorrected_pvalues, + std::shared_ptr uncorrected_pvalues_neg); + + + void run_permutations (const vector>& permutations, + const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistic, + const vector_type& default_enhanced_statistics, + const std::shared_ptr default_enhanced_statistics_neg, + vector_type& perm_dist_pos, + std::shared_ptr perm_dist_neg, + vector_type& uncorrected_pvalues, + std::shared_ptr uncorrected_pvalues_neg); + + + void run_permutations (const size_t num_permutations, + const std::shared_ptr stats_calculator, + const std::shared_ptr enhancer, + const vector_type& empirical_enhanced_statistic, + const vector_type& default_enhanced_statistics, + const std::shared_ptr default_enhanced_statistics_neg, + vector_type& perm_dist_pos, + std::shared_ptr perm_dist_neg, + vector_type& uncorrected_pvalues, + std::shared_ptr uncorrected_pvalues_neg); + + + //! @} } } From 49966ea9a54a832eba86f25b8d8e794612be5803 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 4 Apr 2017 10:54:30 +1000 Subject: [PATCH 0031/1471] mrconvert: Neaten template functions --- cmd/mrconvert.cpp | 50 ++++++++++++++++++++++------------------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/cmd/mrconvert.cpp b/cmd/mrconvert.cpp index 605fca3081..48bf05b336 100644 --- a/cmd/mrconvert.cpp +++ b/cmd/mrconvert.cpp @@ -213,36 +213,32 @@ inline vector set_header (Header& header, const ImageType& input) -template -inline void copy_permute (Header& header_in, Header& header_out, const vector>& pos, const std::string& output_filename) +template +void copy_permute (const InputType& in, Header& header_out, const std::string& output_filename) { + const auto axes = set_header (header_out, in); + auto out = Image::create (output_filename, header_out); + DWI::export_grad_commandline (out); + PhaseEncoding::export_commandline (out); + auto perm = Adapter::make (in, axes); + threaded_copy_with_progress (perm, out, 0, std::numeric_limits::max(), 2); +} - auto in = header_in.get_image(); - if (pos.empty()) { - const auto axes = set_header (header_out, in); - auto out = Header::create (output_filename, header_out).get_image(); - DWI::export_grad_commandline (out); - PhaseEncoding::export_commandline (out); - auto perm = Adapter::make (in, axes); - threaded_copy_with_progress (perm, out, 0, std::numeric_limits::max(), 2); +template +void extract (Header& header_in, Header& header_out, const vector>& pos, const std::string& output_filename) +{ + auto in = header_in.get_image(); + if (pos.empty()) { + copy_permute (in, header_out, output_filename); } else { - - auto extract = Adapter::make (in, pos); - const auto axes = set_header (header_out, extract); - auto out = Image::create (output_filename, header_out); - DWI::export_grad_commandline (out); - PhaseEncoding::export_commandline (out); - - auto perm = Adapter::make (extract, axes); - threaded_copy_with_progress (perm, out, 0, std::numeric_limits::max(), 2); - + auto extract = Adapter::Extract> (in, pos); + copy_permute (extract, header_out, output_filename); } - } @@ -376,15 +372,15 @@ void run () case DataType::UInt16: case DataType::UInt32: if (header_out.datatype().is_signed()) - copy_permute (header_in, header_out, pos, argument[1]); + extract (header_in, header_out, pos, argument[1]); else - copy_permute (header_in, header_out, pos, argument[1]); + extract (header_in, header_out, pos, argument[1]); break; case DataType::UInt64: if (header_out.datatype().is_signed()) - copy_permute (header_in, header_out, pos, argument[1]); + extract (header_in, header_out, pos, argument[1]); else - copy_permute (header_in, header_out, pos, argument[1]); + extract (header_in, header_out, pos, argument[1]); break; case DataType::Undefined: throw Exception ("invalid output image data type"); break; @@ -392,9 +388,9 @@ void run () } else { if (header_out.datatype().is_complex()) - copy_permute (header_in, header_out, pos, argument[1]); + extract (header_in, header_out, pos, argument[1]); else - copy_permute (header_in, header_out, pos, argument[1]); + extract (header_in, header_out, pos, argument[1]); } From 567d9802409cf95ba11acddce0f824908123cd11 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 4 Apr 2017 11:02:06 +1000 Subject: [PATCH 0032/1471] Fixel::find_directions_header() changes Previously, all images in the fixel directory were opened, and then is_directions_file() was called, which checked both the image properties and the file name. Apart from being inefficient, this also clogs -info output as the directory is scanned. This change validates that the name of a file is a plausible directions file before instantiating a Header and checking the image properties. --- cmd/fixelcfestats.cpp | 7 ++++- core/fixel/helpers.h | 64 ++++++++++++++++++++++++------------------- core/math/stats/glm.h | 9 ++---- 3 files changed, 44 insertions(+), 36 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index e5676e2ab9..64ab9300d6 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -228,9 +228,13 @@ void run() if (!Fixel::fixels_match (index_header, dynamic_cast(importer[i].get())->header())) throw Exception ("Fixel data file \"" + importer[i]->name() + "\" does not match template fixel image"); } + if (importer.size()) { + CONSOLE ("number of element-wise design matrix columns: " + str(importer.size())); + } // Load design matrix: const matrix_type design = load_matrix (argument[2]); + CONSOLE ("design matrix dimensions: " + str(design.rows()) + " x " + str(design.cols())); if (design.rows() != (ssize_t)importer.size()) throw Exception ("number of input files does not match number of rows in design matrix"); @@ -260,6 +264,7 @@ void run() // Load contrast matrix const matrix_type contrast = load_matrix (argument[3]); + CONSOLE ("Contrast matrix dimensions: " + str(contrast.rows()) + " x " + str(contrast.cols())); // Before validating the contrast matrix, we first need to see if there are any // additional design matrix columns coming from fixel-wise subject data @@ -393,10 +398,10 @@ void run() if (extra_columns.size()) { WARN ("Beta coefficients, effect size and standard deviation outputs not yet implemented for fixel-wise extra columns"); + // TODO } else { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation"); auto temp = Math::Stats::GLM::solve_betas (data, design); - for (ssize_t i = 0; i < contrast.cols(); ++i) { write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), temp.row(i), output_header); ++progress; diff --git a/core/fixel/helpers.h b/core/fixel/helpers.h index aec2c9ee59..fd519cd61c 100644 --- a/core/fixel/helpers.h +++ b/core/fixel/helpers.h @@ -32,19 +32,22 @@ namespace MR namespace Fixel { - FORCE_INLINE bool is_index_image (const Header& in) + FORCE_INLINE bool is_index_filename (const std::string& path) { - bool is_index = false; - if (in.ndim() == 4) { - if (in.size(3) == 2) { - for (std::initializer_list::iterator it = supported_sparse_formats.begin(); - it != supported_sparse_formats.end(); ++it) { - if (Path::basename (in.name()) == "index" + *it) - is_index = true; - } - } + for (std::initializer_list::iterator it = supported_sparse_formats.begin(); + it != supported_sparse_formats.end(); ++it) { + if (Path::basename (path) == "index" + *it) + return true; } - return is_index; + return false; + } + + + FORCE_INLINE bool is_index_image (const Header& in) + { + return is_index_filename (in.name()) + && in.ndim() == 4 + && in.size(3) == 2; } template @@ -61,22 +64,29 @@ namespace MR } - FORCE_INLINE bool is_directions_file (const Header& in) + FORCE_INLINE bool is_directions_filename (const std::string& path) { - bool is_directions = false; - if (in.ndim() == 3) { - if (in.size(1) == 3 && in.size(2) == 1) { - for (std::initializer_list::iterator it = supported_sparse_formats.begin(); - it != supported_sparse_formats.end(); ++it) { - if (Path::basename (in.name()) == "directions" + *it) - is_directions = true; - } - } + for (std::initializer_list::iterator it = supported_sparse_formats.begin(); + it != supported_sparse_formats.end(); ++it) { + if (Path::basename (path) == "directions" + *it) + return true; } - return is_directions; + return false; + } + + + FORCE_INLINE bool is_directions_file (const Header& in) + { + return is_directions_filename (in.name()) + && in.ndim() == 3 + && in.size(1) == 3 + && in.size(2) == 1; } + + + FORCE_INLINE void check_data_file (const Header& in) { if (!is_data_file (in)) @@ -242,17 +252,15 @@ namespace MR auto dir_walker = Path::Dir (fixel_directory_path); std::string fname; - while ((fname = dir_walker.read_name ()).size ()) { - Header tmp_header; - auto full_path = Path::join (fixel_directory_path, fname); - if (Path::has_suffix (fname, supported_sparse_formats) - && is_directions_file (tmp_header = Header::open (full_path))) { + while ((fname = dir_walker.read_name()).size()) { + if (is_directions_filename (fname)) { + const Header tmp_header = Header::open (Path::join (fixel_directory_path, fname)); if (is_directions_file (tmp_header)) { if (fixels_match (index_header, tmp_header)) { if (directions_found == true) throw Exception ("multiple directions files found in fixel image directory: " + fixel_directory_path); directions_found = true; - header = std::move(tmp_header); + header = std::move (tmp_header); } else { WARN ("fixel directions file (" + fname + ") does not contain the same number of elements as fixels in the index file" ); } diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 035fc919cb..481256cb71 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -222,11 +222,6 @@ namespace MR * particular type of data being tested. Therefore an Importer class must be * defined that is responsible for acquiring and vectorising these data. */ - // TODO Define a "standard" interface for data import: Receives as input a - // text string corresponding to a file, and writes the result to a - // vector / block vector - // If this could be defined using a base class, it would remove the templating here... - // The same class would also be used in the cmd/ files to do the initial measurement matrix fill class GLMTTestVariable : public GLMTestBase { NOMEMALIGN public: GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts); @@ -235,7 +230,7 @@ namespace MR * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) * @param stats the vector containing the output t-statistics * - * TODO In GLMTTestVariable, this function will additionally need to import the + * In GLMTTestVariable, this function has to import the * extra external data individually for each element tested. */ void operator() (const vector& perm_labelling, vector_type& stats) const override; @@ -253,6 +248,7 @@ namespace MR @{ */ /*! A class to compute F-statistics using a fixed General Linear Model. * This class produces a single F-statistic across all contrasts of interest. + * NOT YET IMPLEMENTED */ class GLMFTestFixed : public GLMTestBase { MEMALIGN(GLMFTestFixed) public: @@ -270,7 +266,6 @@ namespace MR void operator() (const vector& perm_labelling, vector_type& stats) const override; protected: - // TODO How to deal with contrast scaling? // TODO How to deal with f-tests that apply to specific contrasts only? const matrix_type ftests; }; From 69b0417884f22b5ec2d92570710b4d876dcfd45d Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 4 Apr 2017 11:36:14 +1000 Subject: [PATCH 0033/1471] fixelcfestats: Various fixes for per-fixel design matrices --- cmd/fixelcfestats.cpp | 8 ++++---- core/fixel/helpers.h | 2 +- core/math/stats/glm.cpp | 2 +- core/math/stats/import.h | 18 +++++++++++------- 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 64ab9300d6..5af55ecd12 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -159,7 +159,7 @@ class SubjectFixelImport : public SubjectDataImportBase default_type operator[] (const size_t index) const override { - assert (index < data.size(0)); + assert (index < size_t(data.size(0))); Image temp (data); // For thread-safety temp.index(0) = index; return default_type(temp.value()); @@ -275,10 +275,10 @@ void run() extra_columns[i].initialise (opt[i][0]); } - if (contrast.cols() + ssize_t(extra_columns.size()) != design.cols()) + if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" - + (extra_columns.size() ? " (in addition to the " + str(extra_columns.size()) + " uses of -column)" : "") - + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")"); + + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); if (contrast.rows() > 1) throw Exception ("only a single contrast vector (defined as a row) is currently supported"); diff --git a/core/fixel/helpers.h b/core/fixel/helpers.h index fd519cd61c..8751822484 100644 --- a/core/fixel/helpers.h +++ b/core/fixel/helpers.h @@ -254,7 +254,7 @@ namespace MR std::string fname; while ((fname = dir_walker.read_name()).size()) { if (is_directions_filename (fname)) { - const Header tmp_header = Header::open (Path::join (fixel_directory_path, fname)); + Header tmp_header = Header::open (Path::join (fixel_directory_path, fname)); if (is_directions_file (tmp_header)) { if (fixels_match (index_header, tmp_header)) { if (directions_found == true) diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 916701903a..1f56d66096 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -167,7 +167,7 @@ namespace MR { // Make sure that the specified contrasts reflect the full design matrix (with additional // data loaded) - assert (contrasts.cols() == X.cols() + importers.size()); + assert (contrasts.cols() == X.cols() + ssize_t(importers.size())); } diff --git a/core/math/stats/import.h b/core/math/stats/import.h index d6953e1bc7..f0c28e39ec 100644 --- a/core/math/stats/import.h +++ b/core/math/stats/import.h @@ -125,13 +125,17 @@ namespace MR std::ifstream ifs (path.c_str()); std::string line; while (getline (ifs, line)) { - std::string filename (Path::join (directory, line)); - size_t p = filename.find_last_not_of(" \t"); - if (std::string::npos != p) - filename.erase(p+1); - if (!Path::exists (filename)) - throw Exception ("Reading text file \"" + Path::basename (path) + "\": input data file not found: \"" + filename + "\""); - files.push_back (std::make_shared (filename)); + size_t p = line.find_last_not_of(" \t"); + if (p != std::string::npos) + line.erase (p+1); + if (line.size()) { + const std::string filename (Path::join (directory, line)); + try { + files.push_back (std::make_shared (filename)); + } catch (Exception& e) { + throw Exception (e, "Reading text file \"" + Path::basename (path) + "\": input image data file not found: \"" + filename + "\""); + } + } } } From 437a3aa6beb51196a5760edd4f446feadb95800d Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 4 Apr 2017 15:39:11 +1000 Subject: [PATCH 0034/1471] fixelcfestats: Various changes and fixes Functions for T-tests were moved into individual GLM classes, to reduce confusion with respect to pre-scaling of contrasts and transposition of matrices. --- cmd/fixelcfestats.cpp | 13 +-- core/math/stats/glm.cpp | 186 ++++++++++++++++++++++++--------------- core/math/stats/glm.h | 189 +++++++++++++++++----------------------- 3 files changed, 207 insertions(+), 181 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 5af55ecd12..62e7393c90 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -221,16 +221,14 @@ void run() } } - // Read identifiers and check files exist + // Read file names and check files exist CohortDataImport importer; importer.initialise (argument[1]); for (size_t i = 0; i != importer.size(); ++i) { if (!Fixel::fixels_match (index_header, dynamic_cast(importer[i].get())->header())) throw Exception ("Fixel data file \"" + importer[i]->name() + "\" does not match template fixel image"); } - if (importer.size()) { - CONSOLE ("number of element-wise design matrix columns: " + str(importer.size())); - } + CONSOLE ("Number of subjects: " + str(importer.size())); // Load design matrix: const matrix_type design = load_matrix (argument[2]); @@ -274,6 +272,9 @@ void run() extra_columns.push_back (CohortDataImport()); extra_columns[i].initialise (opt[i][0]); } + if (extra_columns.size()) { + CONSOLE ("number of element-wise design matrix columns: " + str(extra_columns.size())); + } if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" @@ -293,7 +294,7 @@ void run() if (!num_tracks) throw Exception ("no tracks found in input file"); if (num_tracks < 1000000) - WARN ("more than 1 million tracks should be used to ensure robust fixel-fixel connectivity"); + WARN ("more than 1 million tracks is preferable to ensure robust fixel-fixel connectivity; file \"" + track_filename + "\" contains only " + str(num_tracks)); { typedef DWI::Tractography::Mapping::SetVoxelDir SetVoxelDir; DWI::Tractography::Mapping::TrackLoader loader (track_file, num_tracks, "pre-computing fixel-fixel connectivity"); @@ -328,7 +329,7 @@ void run() auto it = connectivity_matrix[fixel].begin(); while (it != connectivity_matrix[fixel].end()) { const connectivity_value_type connectivity = it->second.value / connectivity_value_type (fixel_TDI[fixel]); - if (connectivity < connectivity_threshold) { + if (connectivity < connectivity_threshold) { connectivity_matrix[fixel].erase (it++); } else { if (do_smoothing) { diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 1f56d66096..840118bee9 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -28,58 +28,12 @@ namespace MR namespace GLM { - matrix_type scale_contrasts (const matrix_type& contrasts, const matrix_type& design, const size_t degrees_of_freedom) - { - assert (contrasts.cols() == design.cols()); - const matrix_type XtX = design.transpose() * design; - const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); - matrix_type scaled_contrasts (contrasts); - - for (size_t n = 0; n < size_t(contrasts.rows()); ++n) { - auto pinv_XtX_c = pinv_XtX * contrasts.row(n).transpose(); - scaled_contrasts.row(n) *= std::sqrt (value_type(degrees_of_freedom) / contrasts.row(n).dot (pinv_XtX_c)); - } - return scaled_contrasts; - } - void ttest_prescaled (matrix_type& tvalues, - const matrix_type& design, - const matrix_type& pinv_design, - const matrix_type& measurements, - const matrix_type& scaled_contrasts, - matrix_type& betas, - matrix_type& residuals) - { - betas.noalias() = measurements * pinv_design; - residuals.noalias() = measurements - betas * design; - tvalues.noalias() = betas * scaled_contrasts; - for (size_t n = 0; n < size_t(tvalues.rows()); ++n) - tvalues.row(n).array() /= residuals.row(n).norm(); - } - void ttest (matrix_type& tvalues, - const matrix_type& design, - const matrix_type& measurements, - const matrix_type& contrasts, - matrix_type& betas, - matrix_type& residuals) - { - const matrix_type pinv_design = Math::pinv (design); - betas.noalias() = measurements * pinv_design; - residuals.noalias() = measurements - betas * design; - const matrix_type XtX = design.transpose() * design; - const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); - const size_t degrees_of_freedom = design.rows() - rank(design); - tvalues.noalias() = betas * contrasts; - for (size_t n = 0; n != size_t(tvalues.rows()); ++n) { - const default_type variance = residuals.row(n).squaredNorm() / degrees_of_freedom; - tvalues.row(n).array() /= sqrt(variance * contrasts.row(n).dot (pinv_XtX * contrasts.row(n).transpose())); - } - } @@ -120,17 +74,20 @@ namespace MR - GLMTTestFixed::GLMTTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast) : - GLMTestBase (measurements, design, contrast), + GLMTTestFixed::GLMTTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : + GLMTestBase (measurements, design, contrasts), pinvX (Math::pinv (X)), - scaled_contrasts (GLM::scale_contrasts (contrast, X, X.rows()-rank(X)).transpose()) { } + scaled_contrasts_t (scale_contrasts().transpose()) + { + assert (contrasts.cols() == design.cols()); + } void GLMTTestFixed::operator() (const vector& perm_labelling, vector_type& output) const { output = vector_type::Zero (y.rows()); - matrix_type tvalues, betas, residuals, SX, pinvSX; + matrix_type tvalues, betas, residuals_t, SX_t, pinvSX_t; // TODO Currently the entire design matrix is permuted; // we may instead prefer Freedman-Lane @@ -138,18 +95,18 @@ namespace MR // since the columns that correspond to nuisance variables // varies between rows - SX.resize (X.rows(), X.cols()); - pinvSX.resize (pinvX.rows(), pinvX.cols()); + SX_t.resize (X.rows(), X.cols()); + pinvSX_t.resize (pinvX.rows(), pinvX.cols()); for (ssize_t i = 0; i < X.rows(); ++i) { - SX.row(i) = X.row (perm_labelling[i]); - pinvSX.col(i) = pinvX.col (perm_labelling[i]); + SX_t.row(i) = X.row (perm_labelling[i]); + pinvSX_t.col(i) = pinvX.col (perm_labelling[i]); } - SX.transposeInPlace(); - pinvSX.transposeInPlace(); + SX_t.transposeInPlace(); + pinvSX_t.transposeInPlace(); for (ssize_t i = 0; i < y.rows(); i += GLM_BATCH_SIZE) { const matrix_type tmp = y.block (i, 0, std::min (GLM_BATCH_SIZE, (int)(y.rows()-i)), y.cols()); - GLM::ttest_prescaled (tvalues, SX, pinvSX, tmp, scaled_contrasts, betas, residuals); + ttest (tvalues, SX_t, pinvSX_t, tmp, betas, residuals_t); for (ssize_t n = 0; n < tvalues.rows(); ++n) { value_type val = tvalues(n,0); if (!std::isfinite (val)) @@ -161,6 +118,72 @@ namespace MR + void GLMTTestFixed::ttest (matrix_type& tvalues, + const matrix_type& design_t, + const matrix_type& pinv_design_t, + const matrix_type& measurements, + matrix_type& betas, + matrix_type& residuals_t) const + { + betas.noalias() = measurements * pinv_design_t; + residuals_t.noalias() = measurements - betas * design_t; + tvalues.noalias() = betas * scaled_contrasts_t; + for (size_t n = 0; n < size_t(tvalues.rows()); ++n) + tvalues.row(n).array() /= residuals_t.row(n).norm(); + } + + + + + // scale contrasts for use in ttest() member function + /* This function pre-scales a contrast matrix in order to make conversion from GLM betas + * to t-values more computationally efficient. + * + * For design matrix X, contrast matrix c, beta vector b and variance o^2, the t-value is calculated as: + * c^T.b + * t = -------------------------- + * sqrt(o^2.c^T.(X^T.X)^-1.c) + * + * Definition of variance (for vector of residuals e): + * e^T.e + * o^2 = ------ + * DOF(X) + * + * (Note that the above equations are used directly in GLMTTestVariable) + * + * This function will generate scaled contrasts c' from c, such that: + * DOF(X) + * c' = c.sqrt(------------------) + * c^T.(X^T.X)^-1.c + * + * c'^T.b + * t = ----------- + * sqrt(e^T.e) + * + * Note each row of the contrast matrix will still be treated as an independent contrast. The number + * of elements in each contrast vector must equal the number of columns in the design matrix. + */ + matrix_type GLMTTestFixed::scale_contrasts() const + { + const matrix_type XtX = X.transpose() * X; + const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); + const size_t degrees_of_freedom = X.rows() - Math::rank (X); + matrix_type scaled_contrasts (c); + for (size_t n = 0; n < size_t(c.rows()); ++n) { + const auto ct_pinv_XtX_c = c.row(n).dot (pinv_XtX * c.row(n).transpose()); + scaled_contrasts.row(n) *= std::sqrt (value_type(degrees_of_freedom) / ct_pinv_XtX_c); + } + return scaled_contrasts; + } + + + + + + + + + GLMTTestVariable::GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : GLMTestBase (measurements, design, contrasts), importers (importers) @@ -175,7 +198,7 @@ namespace MR void GLMTTestVariable::operator() (const vector& perm_labelling, vector_type& output) const { output = vector_type::Zero (y.rows()); - matrix_type tvalues, betas, residuals; + vector_type tvalues, betas, residuals; // Set the size of the permuted design matrix to include the additional columns // that will be imported from external files @@ -203,17 +226,13 @@ namespace MR for (ssize_t row = 0; row != X.rows(); ++row) SX.block(row, X.cols(), 1, importers.size()) = extra_data.row(perm_labelling[row]); - // Need to pre-scale contrasts if we want to use the ttest() function; - // otherwise, need to define a different function that doesn't rely on pre-scaling - // Went for the latter option; this call doesn't need pre-scaling of contrasts, - // nor does it need a pre-computed pseudo-inverse of the design matrix - GLM::ttest (tvalues, SX.transpose(), y.row(element), c, betas, residuals); + ttest (tvalues, SX, y.row(element), betas, residuals); // FIXME // Currently output only the first contrast, as is done in GLMTTestFixed // tvalues should have one row only (since we're only testing a single row), and // number of columns equal to the number of contrasts - value_type val = tvalues (element, 0); + value_type val = tvalues[0]; if (!std::isfinite (val)) val = value_type(0); output[element] = val; @@ -223,18 +242,49 @@ namespace MR - matrix_type GLMTTestVariable::default_design (const matrix_type& design, const size_t index) const + void GLMTTestVariable::ttest (vector_type& tvalues, + const matrix_type& design, + const vector_type& measurements, + vector_type& betas, + vector_type& residuals) const + { + matrix_type pinv_design = Math::pinv (design); + const matrix_type XtX = design.transpose() * design; + const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); + betas = pinv_design * measurements.matrix(); + residuals = measurements - (design * betas.matrix()).array(); + tvalues = c * betas.matrix(); + const default_type variance = residuals.matrix().squaredNorm() / (design.cols() - rank(design)); + // The fact that we're only able to test one element at a time here should be + // placing a restriction on the dimensionality of tvalues + // Previously, could be (number of elements) * (number of contrasts); + // now can only reflect the number of contrasts + for (size_t n = 0; n != size_t(tvalues.size()); ++n) { + const default_type ct_pinv_XtX_c = c.row(n).dot (pinv_XtX * c.row(n).transpose()); + tvalues[n] /= std::sqrt (variance * ct_pinv_XtX_c); + } + } + + + + matrix_type GLMTTestVariable::default_design (const size_t index) const { - matrix_type output (design.rows(), design.cols() + importers.size()); - output.block (0, 0, design.rows(), design.cols()) = design; + matrix_type output (X.rows(), X.cols() + importers.size()); + output.block (0, 0, X.rows(), X.cols()) = X; for (size_t i = 0; i != importers.size(); ++i) - output.col (design.cols() + i) = importers[i] (index); + output.col (X.cols() + i) = importers[i] (index); return output; } + + + + + + GLMFTestFixed::GLMFTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const matrix_type& ftests) : GLMTestBase (measurements, design, contrasts), ftests (ftests) { } diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 481256cb71..e9a03152d0 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -31,113 +31,44 @@ namespace MR namespace GLM { + /** \addtogroup Statistics + @{ */ + /*! Compute a matrix of the beta coefficients + * @param measurements a matrix storing the measured data for each subject in a column + * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) + * @return the matrix containing the output effect + */ + matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design); - // TODO With the upcoming changes, many of these 'loose' functions become specific to the GLMTTestFixed class - // Therefore they should be moved - - - //! scale contrasts for use in t-test - /*! This function pre-scales a contrast matrix in order to make conversion from GLM betas - * to t-values more computationally efficient. - * - * For design matrix X, contrast matrix c, beta vector b and variance o^2, the t-value is calculated as: - * c^T.b - * t = -------------------------- - * sqrt(o^2.c^T.(X^T.X)^-1.c) - * - * Definition of variance (for vector of residuals e): - * e^T.e - * o^2 = ------ - * DOF(X) - * - * This function will generate scaled contrasts c' from c, such that: - * DOF(X) - * c' = c.sqrt(------------------) - * c^T.(X^T.X)^-1.c - * - * c'^T.b - * t = ----------- - * sqrt(e^T.e) - * - * Note each row of the contrast matrix will still be treated as an independent contrast. The number - * of elements in each contrast vector must equal the number of columns in the design matrix */ - matrix_type scale_contrasts (const matrix_type& contrasts, const matrix_type& design, const size_t degrees_of_freedom); - - - - //! generic GLM t-test - /*! note that the data, effects, and residual matrices are transposed. - * This is to take advantage of Eigen's convention of storing - * matrices in column-major format by default. - * - * Note also that the contrast matrix should already have been scaled - * using the GLM::scale_contrasts() function. */ - void ttest_prescaled (matrix_type& tvalues, - const matrix_type& design, - const matrix_type& pinv_design, - const matrix_type& measurements, - const matrix_type& scaled_contrasts, - matrix_type& betas, - matrix_type& residuals); - - - //! generic GLM t-test - /*! note that the data, effects, and residual matrices are transposed. - * This is to take advantage of Eigen's convention of storing - * matrices in column-major format by default. - * - * This version does not require, or take advantage of, pre-calculation - * of the pseudo-inverse of the design matrix. - * - * Note that for this version the contrast matrix should NOT have been scaled - * using the GLM::scale_contrasts() function. */ - void ttest (matrix_type& tvalues, - const matrix_type& design, - const matrix_type& measurements, - const matrix_type& contrasts, - matrix_type& betas, - matrix_type& residuals); - - - - /** \addtogroup Statistics - @{ */ - /*! Compute a matrix of the beta coefficients - * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) - * @return the matrix containing the output effect - */ - matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design); - - /*! Compute the effect of interest - * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) - * @param contrast a matrix defining the group difference - * @return the matrix containing the output effect - */ - matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); + /*! Compute the effect of interest + * @param measurements a matrix storing the measured data for each subject in a column + * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) + * @param contrast a matrix defining the group difference + * @return the matrix containing the output effect + */ + matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); - /*! Compute the pooled standard deviation - * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) - * @return the matrix containing the output standard deviation size - */ - matrix_type stdev (const matrix_type& measurements, const matrix_type& design); + /*! Compute the pooled standard deviation + * @param measurements a matrix storing the measured data for each subject in a column + * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) + * @return the matrix containing the output standard deviation size + */ + matrix_type stdev (const matrix_type& measurements, const matrix_type& design); - /*! Compute cohen's d, the standardised effect size between two means - * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) - * @param contrast a matrix defining the group difference - * @return the matrix containing the output standardised effect size - */ - matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); - //! @} + /*! Compute cohen's d, the standardised effect size between two means + * @param measurements a matrix storing the measured data for each subject in a column + * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) + * @param contrast a matrix defining the group difference + * @return the matrix containing the output standardised effect size + */ + matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); + //! @} } // End GLM namespace @@ -152,10 +83,12 @@ namespace MR y (measurements), X (design), c (contrasts), - dim (c.rows()) + outputs (c.rows()) { assert (y.cols() == X.rows()); - assert (c.cols() == X.cols()); + // Can no longer apply this assertion here; GLMTTestVariable later + // expands the number of columns in X + //assert (c.cols() == X.cols()); } /*! Compute the statistics @@ -164,13 +97,14 @@ namespace MR */ virtual void operator() (const vector& perm_labelling, vector_type& output) const = 0; - size_t num_subjects () const { return y.cols(); } size_t num_elements () const { return y.rows(); } - size_t num_outputs () const { return dim; } + size_t num_factors () const { return X.cols(); } + size_t num_outputs () const { return outputs; } + size_t num_subjects () const { return X.rows(); } protected: const matrix_type& y, X, c; - size_t dim; + size_t outputs; }; @@ -204,7 +138,30 @@ namespace MR void operator() (const vector& perm_labelling, vector_type& output) const override; protected: - const matrix_type pinvX, scaled_contrasts; + const matrix_type pinvX, scaled_contrasts_t; + + //! GLM t-test incorporating various optimisations + /*! note that the data, effects, and residual matrices are transposed. + * This is to take advantage of Eigen's convention of storing + * matrices in column-major format by default. + * + * This function makes use of member variable scaled_contrasts_t, + * set up by the GLMTTestFixed constructor, which is also transposed. */ + void ttest (matrix_type& tvalues, + const matrix_type& design_t, + const matrix_type& pinv_design_t, + const matrix_type& measurements, + matrix_type& betas, + matrix_type& residuals_t) const; + + //! Pre-scaling of contrast matrix + /*! This modulates the contents of the contrast matrix for compatibility + * with member function ttest(). + * + * Scaling is performed in a member function such that member scaled_contrasts_t + * can be defined as const. */ + matrix_type scale_contrasts() const; + }; //! @} @@ -235,12 +192,30 @@ namespace MR */ void operator() (const vector& perm_labelling, vector_type& stats) const override; - // TODO A function to acquire the design matrix for the default permutation - // (note that this needs to be re-run for each element being tested) - matrix_type default_design (const matrix_type& design, const size_t index) const; + /*! Acquire the design matrix for the default permutation + * (note that this needs to be re-run for each element being tested) + * @param index the index of the element for which the design matrix is requested + * @return the design matrix for that element, including imported data for extra columns + */ + matrix_type default_design (const size_t index) const; protected: const vector& importers; + + //! generic GLM t-test + /*! This version of the t-test function does not incorporate the + * optimisations that are used in the GLMTTestFixed class, since + * many are not applicable when the design matrix changes between + * different elements. + * + * Since the design matrix varies between the different elements + * being tested, this function only accepts testing of a single + * vector of measurements at a time. */ + void ttest (vector_type& tvalues, + const matrix_type& design, + const vector_type& measurements, + vector_type& betas, + vector_type& residuals) const; }; From f41f186bbd146a3de51ccf153c93bf6d1ba5448a Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 4 Apr 2017 16:22:12 +1000 Subject: [PATCH 0035/1471] fixelcfestats: Add default permutation statistics outputs for variable design matrix experiments --- cmd/fixelcfestats.cpp | 58 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 48 insertions(+), 10 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 62e7393c90..84f07c9ece 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -397,12 +397,57 @@ void run() progress++; } + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size()) { + glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast)); + } else { + glm_test.reset (new GLMTTestFixed (data, design, contrast)); + } + if (extra_columns.size()) { - WARN ("Beta coefficients, effect size and standard deviation outputs not yet implemented for fixel-wise extra columns"); - // TODO + + // For each variable of interest (e.g. beta coefficients, effect size etc.) need to: + // Construct the output data vector, with size = num_fixels + // For each fixel: + // Use glm_test to obtain the design matrix for the default permutation for that fixel + // Use the relevant Math::Stats::GLM function to get the value of interest for just that fixel + // (will still however need to come out as a matrix_type) + // Write that value to data vector + // Finally, use write_fixel_output() function to write to an image file + matrix_type betas (contrast.cols(), num_fixels); + vector_type abs_effect_size (num_fixels), std_effect_size (num_fixels), stdev (num_fixels); + { + ProgressBar progress ("estimating beta coefficients, effect size and standard deviation", num_fixels); + for (size_t f = 0; f != num_fixels; ++f) { + const auto design_f = dynamic_cast(glm_test.get())->default_design (f); + auto temp = Math::Stats::GLM::solve_betas (data.row(f), design_f); + betas.col(f) = temp; + temp = Math::Stats::GLM::abs_effect_size (data, design_f, contrast); + abs_effect_size[f] = temp(0,0); + temp = Math::Stats::GLM::std_effect_size (data, design_f, contrast); + std_effect_size[f] = temp(0,0); + temp = Math::Stats::GLM::stdev (data, design_f); + stdev[f] = temp(0,0); + ++progress; + } + } + { + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 3); + for (ssize_t i = 0; i != contrast.cols(); ++i) { + write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), betas.row(i), output_header); + ++progress; + } + write_fixel_output (Path::join (output_fixel_directory, "abs_effect.mif"), abs_effect_size, output_header); ++progress; + write_fixel_output (Path::join (output_fixel_directory, "std_effect.mif"), std_effect_size, output_header); ++progress; + write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), stdev, output_header); + } + } else { - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation"); + + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 7); auto temp = Math::Stats::GLM::solve_betas (data, design); + ++progress; for (ssize_t i = 0; i < contrast.cols(); ++i) { write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), temp.row(i), output_header); ++progress; @@ -413,14 +458,7 @@ void run() write_fixel_output (Path::join (output_fixel_directory, "std_effect.mif"), temp.row(0), output_header); ++progress; temp = Math::Stats::GLM::stdev (data, design); ++progress; write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), temp.row(0), output_header); - } - // Construct the class for performing the initial statistical tests - std::shared_ptr glm_test; - if (extra_columns.size()) { - glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast)); - } else { - glm_test.reset (new GLMTTestFixed (data, design, contrast)); } // Construct the class for performing fixel-based statistical enhancement From 03d6a6fcbd1324cbe6ac4205963b37f11a060cb2 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 6 Apr 2017 10:13:25 +1000 Subject: [PATCH 0036/1471] fixelcfestats: Fix and speedup GLM statistics for default permutation --- cmd/fixelcfestats.cpp | 77 +++++++++++++++++++++++++++++++++++++++-- core/math/stats/glm.cpp | 31 +++++++++++------ core/math/stats/glm.h | 18 ++++++++-- 3 files changed, 110 insertions(+), 16 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 84f07c9ece..826f32484c 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -415,22 +415,93 @@ void run() // (will still however need to come out as a matrix_type) // Write that value to data vector // Finally, use write_fixel_output() function to write to an image file + // + // TODO Over and above multi-threading, this is very slow + // Need to find out whether it's something to do with: + // - The data loading + // - The increased number of matrix solves + // - The fact that the matrices are not transposed (like they are for GLMTTestFixed) + // Things to try: + // - Use GLMTTestFixed class rather than solve_betas() + // (is JacobiSVD slower than getting the pseudoinverse?) matrix_type betas (contrast.cols(), num_fixels); vector_type abs_effect_size (num_fixels), std_effect_size (num_fixels), stdev (num_fixels); { + class Source + { + public: + Source (const size_t num_fixels) : + num_fixels (num_fixels), + counter (0), + progress ("estimating beta coefficients, effect size and standard deviation", num_fixels) { } + bool operator() (size_t& fixel_index) + { + fixel_index = counter++; + if (counter >= num_fixels) + return false; + ++progress; + return true; + } + private: + const size_t num_fixels; + size_t counter; + ProgressBar progress; + }; + + class Functor + { + public: + Functor (const matrix_type& data, std::shared_ptr glm_test, const matrix_type& contrasts, + matrix_type& betas, vector_type& abs_effect_size, vector_type& std_effect_size, vector_type& stdev) : + data (data), + glm_test (glm_test), + contrasts (contrasts), + global_betas (betas), + global_abs_effect_size (abs_effect_size), + global_std_effect_size (std_effect_size), + global_stdev (stdev) { } + bool operator() (const size_t& fixel_index) + { + const matrix_type data_f = data.row (fixel_index); + const matrix_type design_f = dynamic_cast(glm_test.get())->default_design (fixel_index); + Math::Stats::GLM::all_stats (data_f, design_f, contrasts, + local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + global_betas.col (fixel_index) = local_betas; + global_abs_effect_size[fixel_index] = local_abs_effect_size(0,0); + global_std_effect_size[fixel_index] = local_std_effect_size(0,0); + global_stdev[fixel_index] = local_stdev(0,0); + return true; + } + + private: + const matrix_type& data; + const std::shared_ptr glm_test; + const matrix_type& contrasts; + matrix_type& global_betas; + vector_type& global_abs_effect_size, global_std_effect_size, global_stdev; + matrix_type local_betas, local_abs_effect_size, local_std_effect_size, local_stdev; + std::shared_ptr progress; + }; + + Source source (num_fixels); + Functor functor (data, glm_test, contrast, + betas, abs_effect_size, std_effect_size, stdev); + Thread::run_queue (source, size_t(), functor); +/* ProgressBar progress ("estimating beta coefficients, effect size and standard deviation", num_fixels); for (size_t f = 0; f != num_fixels; ++f) { const auto design_f = dynamic_cast(glm_test.get())->default_design (f); auto temp = Math::Stats::GLM::solve_betas (data.row(f), design_f); betas.col(f) = temp; - temp = Math::Stats::GLM::abs_effect_size (data, design_f, contrast); + temp = Math::Stats::GLM::abs_effect_size (data.row(f), design_f, contrast); abs_effect_size[f] = temp(0,0); - temp = Math::Stats::GLM::std_effect_size (data, design_f, contrast); + temp = Math::Stats::GLM::std_effect_size (data.row(f), design_f, contrast); std_effect_size[f] = temp(0,0); - temp = Math::Stats::GLM::stdev (data, design_f); + temp = Math::Stats::GLM::stdev (data.row(f), design_f); stdev[f] = temp(0,0); ++progress; } +*/ } { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 3); diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 840118bee9..5e2c5703fe 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -28,23 +28,12 @@ namespace MR namespace GLM { - - - - - - - - - - matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design) { return design.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(measurements.transpose()); } - matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) { return contrasts * solve_betas (measurements, design); @@ -65,6 +54,26 @@ namespace MR { return abs_effect_size (measurements, design, contrasts).array() / stdev (measurements, design).array(); } + + + void all_stats (const matrix_type& measurements, + const matrix_type& design, + const matrix_type& contrasts, + matrix_type& betas, + matrix_type& abs_effect_size, + matrix_type& std_effect_size, + matrix_type& stdev) + { + betas = solve_betas (measurements, design); + abs_effect_size = contrasts * betas; + matrix_type residuals = measurements.transpose() - design * betas; + residuals = residuals.array().pow(2.0); + matrix_type one_over_dof (1, measurements.cols()); + one_over_dof.fill (1.0 / value_type(design.rows()-Math::rank (design))); + stdev = (one_over_dof * residuals).array().sqrt(); + std_effect_size = abs_effect_size.array() / stdev.array(); + } + } diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index e9a03152d0..1fd01b5ad1 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -36,7 +36,7 @@ namespace MR /*! Compute a matrix of the beta coefficients * @param measurements a matrix storing the measured data for each subject in a column * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) - * @return the matrix containing the output effect + * @return the matrix containing the output GLM betas */ matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design); @@ -55,7 +55,7 @@ namespace MR /*! Compute the pooled standard deviation * @param measurements a matrix storing the measured data for each subject in a column * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) - * @return the matrix containing the output standard deviation size + * @return the matrix containing the output standard deviation */ matrix_type stdev (const matrix_type& measurements, const matrix_type& design); @@ -68,6 +68,20 @@ namespace MR * @return the matrix containing the output standardised effect size */ matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); + + + + /*! Compute all GLM-related statistics + * @param measurements a matrix storing the measured data for each subject in a column + * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) + * @param contrast a matrix defining the group difference + * @param betas the matrix containing the output GLM betas + * @param abs_effect_size the matrix containing the output effect + * @param std_effect_size the matrix containing the output standardised effect size + * @param stdev the matrix containing the output standard deviation + */ + void all_stats (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, + matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, matrix_type& stdev); //! @} } // End GLM namespace From d2dde19ce8b77221b3d64e4cb636b8c14599193e Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 6 Apr 2017 12:07:10 +1000 Subject: [PATCH 0037/1471] fixelcfestats: Some code cleanup --- cmd/fixelcfestats.cpp | 54 ++++++++++++++----------------------------- 1 file changed, 17 insertions(+), 37 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 826f32484c..9a56388b50 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -415,15 +415,6 @@ void run() // (will still however need to come out as a matrix_type) // Write that value to data vector // Finally, use write_fixel_output() function to write to an image file - // - // TODO Over and above multi-threading, this is very slow - // Need to find out whether it's something to do with: - // - The data loading - // - The increased number of matrix solves - // - The fact that the matrices are not transposed (like they are for GLMTTestFixed) - // Things to try: - // - Use GLMTTestFixed class rather than solve_betas() - // (is JacobiSVD slower than getting the pseudoinverse?) matrix_type betas (contrast.cols(), num_fixels); vector_type abs_effect_size (num_fixels), std_effect_size (num_fixels), stdev (num_fixels); { @@ -433,19 +424,22 @@ void run() Source (const size_t num_fixels) : num_fixels (num_fixels), counter (0), - progress ("estimating beta coefficients, effect size and standard deviation", num_fixels) { } + progress (new ProgressBar ("estimating beta coefficients, effect size and standard deviation", num_fixels)) { } bool operator() (size_t& fixel_index) { fixel_index = counter++; - if (counter >= num_fixels) + if (counter >= num_fixels) { + progress.reset(); return false; - ++progress; + } + assert (progress); + ++(*progress); return true; } private: const size_t num_fixels; size_t counter; - ProgressBar progress; + std::unique_ptr progress; }; class Functor @@ -487,21 +481,6 @@ void run() Functor functor (data, glm_test, contrast, betas, abs_effect_size, std_effect_size, stdev); Thread::run_queue (source, size_t(), functor); -/* - ProgressBar progress ("estimating beta coefficients, effect size and standard deviation", num_fixels); - for (size_t f = 0; f != num_fixels; ++f) { - const auto design_f = dynamic_cast(glm_test.get())->default_design (f); - auto temp = Math::Stats::GLM::solve_betas (data.row(f), design_f); - betas.col(f) = temp; - temp = Math::Stats::GLM::abs_effect_size (data.row(f), design_f, contrast); - abs_effect_size[f] = temp(0,0); - temp = Math::Stats::GLM::std_effect_size (data.row(f), design_f, contrast); - std_effect_size[f] = temp(0,0); - temp = Math::Stats::GLM::stdev (data.row(f), design_f); - stdev[f] = temp(0,0); - ++progress; - } -*/ } { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 3); @@ -516,19 +495,20 @@ void run() } else { - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 7); - auto temp = Math::Stats::GLM::solve_betas (data, design); + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 4); + matrix_type betas, abs_effect_size, std_effect_size, stdev; + Math::Stats::GLM::all_stats (data, design, contrast, + betas, abs_effect_size, std_effect_size, stdev); ++progress; for (ssize_t i = 0; i < contrast.cols(); ++i) { - write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), temp.row(i), output_header); + write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), betas.row(i), output_header); ++progress; } - temp = Math::Stats::GLM::abs_effect_size (data, design, contrast); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "abs_effect.mif"), temp.row(0), output_header); ++progress; - temp = Math::Stats::GLM::std_effect_size (data, design, contrast); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "std_effect.mif"), temp.row(0), output_header); ++progress; - temp = Math::Stats::GLM::stdev (data, design); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), temp.row(0), output_header); + write_fixel_output (Path::join (output_fixel_directory, "abs_effect.mif"), abs_effect_size.row(0), output_header); + ++progress; + write_fixel_output (Path::join (output_fixel_directory, "std_effect.mif"), std_effect_size.row(0), output_header); + ++progress; + write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), stdev.row(0), output_header); } From c14c11e048ab49346d9bf5f4d78bd4fb1cd6b1c6 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 7 Apr 2017 13:53:57 +1000 Subject: [PATCH 0038/1471] fixelcfestats: Playing with memory alignment issues --- cmd/fixelcfestats.cpp | 6 +++--- core/math/stats/glm.h | 2 +- core/math/stats/import.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 9a56388b50..608de232fc 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -137,7 +137,7 @@ void write_fixel_output (const std::string& filename, // specific subject based on the string path to the image file for // that subject class SubjectFixelImport : public SubjectDataImportBase -{ NOMEMALIGN +{ MEMALIGN(SubjectFixelImport) public: SubjectFixelImport (const std::string& path) : SubjectDataImportBase (path), @@ -419,7 +419,7 @@ void run() vector_type abs_effect_size (num_fixels), std_effect_size (num_fixels), stdev (num_fixels); { class Source - { + { NOMEMALIGN public: Source (const size_t num_fixels) : num_fixels (num_fixels), @@ -443,7 +443,7 @@ void run() }; class Functor - { + { MEMALIGN(Functor) public: Functor (const matrix_type& data, std::shared_ptr glm_test, const matrix_type& contrasts, matrix_type& betas, vector_type& abs_effect_size, vector_type& std_effect_size, vector_type& stdev) : diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 1fd01b5ad1..badec8c7f2 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -193,7 +193,7 @@ namespace MR * particular type of data being tested. Therefore an Importer class must be * defined that is responsible for acquiring and vectorising these data. */ - class GLMTTestVariable : public GLMTestBase { NOMEMALIGN + class GLMTTestVariable : public GLMTestBase { MEMALIGN(GLMTTestVariable) public: GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts); diff --git a/core/math/stats/import.h b/core/math/stats/import.h index f0c28e39ec..bc3e5a9e0a 100644 --- a/core/math/stats/import.h +++ b/core/math/stats/import.h @@ -45,7 +45,7 @@ namespace MR * cases, within the design matrix). */ class SubjectDataImportBase - { + { NOMEMALIGN public: SubjectDataImportBase (const std::string& path) : path (path) { } @@ -86,7 +86,7 @@ namespace MR // for each subject a mechanism of data access is spawned & remains open throughout // processing. class CohortDataImport - { + { NOMEMALIGN public: CohortDataImport() { } From 672c0fa3b38fe2ba4a8e5ad2d7649f22188069eb Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 7 Apr 2017 14:29:59 +1000 Subject: [PATCH 0039/1471] fixelcfestats: Avoid use of make_shared due to memory alignment See #957. --- core/math/stats/import.h | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/core/math/stats/import.h b/core/math/stats/import.h index bc3e5a9e0a..c29b2ea5f3 100644 --- a/core/math/stats/import.h +++ b/core/math/stats/import.h @@ -72,13 +72,6 @@ namespace MR - // TODO Implementation of the above class would be more difficult for 0.3.15 version of - // fixelcfestats because it would need to keep track of a mapping between template - // fixel index, and subject voxel / fixel index. Would it be preferable to do the - // 0.3.16 merge first? - - - // During the initial import, the above class can simply be fed one subject at a time // according to per-file path // However for use in GLMTTestVariable, a class is needed that stores a list of text files, @@ -110,7 +103,7 @@ namespace MR } protected: - std::vector> files; + vector> files; }; @@ -118,7 +111,7 @@ namespace MR template void CohortDataImport::initialise (const std::string& path) { - // TODO Read the provided text file one at a time + // Read the provided text file one at a time // For each file, create an instance of SubjectDataImport // (which must derive from SubjectDataImportBase) const std::string directory = Path::dirname (path); @@ -131,7 +124,8 @@ namespace MR if (line.size()) { const std::string filename (Path::join (directory, line)); try { - files.push_back (std::make_shared (filename)); + std::shared_ptr subject (new SubjectDataImport (filename)); + files.emplace_back (subject); } catch (Exception& e) { throw Exception (e, "Reading text file \"" + Path::basename (path) + "\": input image data file not found: \"" + filename + "\""); } From 054aaa7bc3cd41dfe8d1c4af189d25cafa084934 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 11 Apr 2017 13:49:01 +1000 Subject: [PATCH 0040/1471] fixelcfestats: Two bug fixes - Incorrect definition of non-const references to global matrices during calculation of default statistics resulting in zero stdev and std_effect_size. - Incorrect calculation of degrees of freedom in GLMTTestVariable::ttest() resulting in infinite variance and zero t-values. --- cmd/fixelcfestats.cpp | 5 +++-- core/math/stats/glm.cpp | 25 ++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 608de232fc..05ec73661f 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -472,9 +472,10 @@ void run() const std::shared_ptr glm_test; const matrix_type& contrasts; matrix_type& global_betas; - vector_type& global_abs_effect_size, global_std_effect_size, global_stdev; + vector_type& global_abs_effect_size; + vector_type& global_std_effect_size; + vector_type& global_stdev; matrix_type local_betas, local_abs_effect_size, local_std_effect_size, local_stdev; - std::shared_ptr progress; }; Source source (num_fixels); diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 5e2c5703fe..9a624faa0a 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -14,6 +14,8 @@ #include "math/stats/glm.h" +#include "debug.h" + #define GLM_BATCH_SIZE 1024 namespace MR @@ -65,13 +67,22 @@ namespace MR matrix_type& stdev) { betas = solve_betas (measurements, design); + //std::cerr << "Betas: " << betas.rows() << " x " << betas.cols() << ", max " << betas.array().maxCoeff() << "\n"; abs_effect_size = contrasts * betas; + //std::cerr << "abs_effect_size: " << abs_effect_size.rows() << " x " << abs_effect_size.cols() << ", max " << abs_effect_size.array().maxCoeff() << "\n"; matrix_type residuals = measurements.transpose() - design * betas; residuals = residuals.array().pow(2.0); + //std::cerr << "residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; matrix_type one_over_dof (1, measurements.cols()); one_over_dof.fill (1.0 / value_type(design.rows()-Math::rank (design))); + //std::cerr << "one_over_dof: " << one_over_dof.rows() << " x " << one_over_dof.cols() << ", max " << one_over_dof.array().maxCoeff() << "\n"; + //VAR (design.rows()); + //VAR (Math::rank (design)); stdev = (one_over_dof * residuals).array().sqrt(); + //std::cerr << "stdev: " << stdev.rows() << " x " << stdev.cols() << ", max " << stdev.array().maxCoeff() << "\n"; std_effect_size = abs_effect_size.array() / stdev.array(); + //std::cerr << "std_effect_size: " << std_effect_size.rows() << " x " << std_effect_size.cols() << ", max " << std_effect_size.array().maxCoeff() << "\n"; + //TRACE; } } @@ -257,21 +268,33 @@ namespace MR vector_type& betas, vector_type& residuals) const { + //std::cerr << "Design: " << design.rows() << " x " << design.cols() << ", max " << design.array().maxCoeff() << "\n"; + //std::cerr << "Measurements: " << measurements.rows() << " x " << measurements.cols() << ", max " << measurements.array().maxCoeff() << "\n"; matrix_type pinv_design = Math::pinv (design); + //std::cerr << "PINV Design: " << pinv_design.rows() << " x " << pinv_design.cols() << ", max " << pinv_design.array().maxCoeff() << "\n"; const matrix_type XtX = design.transpose() * design; + //std::cerr << "XtX: " << XtX.rows() << " x " << XtX.cols() << ", max " << XtX.array().maxCoeff() << "\n"; const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); + //std::cerr << "PINV XtX: " << pinv_XtX.rows() << " x " << pinv_XtX.cols() << ", max " << pinv_XtX.array().maxCoeff() << "\n"; betas = pinv_design * measurements.matrix(); + //std::cerr << "Betas: " << betas.rows() << " x " << betas.cols() << ", max " << betas.array().maxCoeff() << "\n"; residuals = measurements - (design * betas.matrix()).array(); + //std::cerr << "Residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; tvalues = c * betas.matrix(); - const default_type variance = residuals.matrix().squaredNorm() / (design.cols() - rank(design)); + //std::cerr << "T-values: " << tvalues.rows() << " x " << tvalues.cols() << ", max " << tvalues.array().maxCoeff() << "\n"; + //VAR (Math::rank (design)); + const default_type variance = residuals.matrix().squaredNorm() / default_type(design.rows() - Math::rank(design)); + //VAR (variance); // The fact that we're only able to test one element at a time here should be // placing a restriction on the dimensionality of tvalues // Previously, could be (number of elements) * (number of contrasts); // now can only reflect the number of contrasts for (size_t n = 0; n != size_t(tvalues.size()); ++n) { const default_type ct_pinv_XtX_c = c.row(n).dot (pinv_XtX * c.row(n).transpose()); + //VAR (ct_pinv_XtX_c); tvalues[n] /= std::sqrt (variance * ct_pinv_XtX_c); } + //std::cerr << "T-values: " << tvalues.rows() << " x " << tvalues.cols() << ", max " << tvalues.array().maxCoeff() << "\n"; } From 6dd4e54d7b54dc8a21edf0e7e1f831380fa1ab38 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 11 Apr 2017 15:11:29 +1000 Subject: [PATCH 0041/1471] fixelcffestats: Forgot to multi-thread default permutation statistics calculation --- cmd/fixelcfestats.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 05ec73661f..9cae46b315 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -481,7 +481,7 @@ void run() Source source (num_fixels); Functor functor (data, glm_test, contrast, betas, abs_effect_size, std_effect_size, stdev); - Thread::run_queue (source, size_t(), functor); + Thread::run_queue (source, size_t(), Thread::multi (functor)); } { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 3); From e410c6bc9a504ada4d32010d21dc44f6bf19c3d5 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 11 Apr 2017 15:40:58 +1000 Subject: [PATCH 0042/1471] mrstats: Fix median value string width --- core/stats.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/stats.h b/core/stats.h index 806c2f4d64..7ae8797b85 100644 --- a/core/stats.h +++ b/core/stats.h @@ -106,7 +106,11 @@ namespace MR std::cout << std::setw(width) << std::right << ( count ? str(mean) : "N/A" ); if (!is_complex) { - std::cout << " " << std::setw(width) << std::right << ( count ? str(Math::median (values)) : "N/A" ); + std::cout << " " << std::setw(width) << std::right; + if (count) + std::cout << Math::median (values); + else + std::cout << "N/A"; } std::cout << " " << std::setw(width) << std::right << ( count > 1 ? str(std) : "N/A" ) << " " << std::setw(width) << std::right << ( count ? str(min) : "N/A" ) From 4294f2a142fadc33e11e76b3e8ce6df2a68d7b20 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 12 Apr 2017 16:46:43 +1000 Subject: [PATCH 0043/1471] fixelcfestats: Fix memory allocation for nonstationarity correction --- cmd/fixelcfestats.cpp | 7 ++++--- core/math/stats/glm.h | 5 +++++ src/stats/permtest.cpp | 2 ++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 9cae46b315..ca0658255d 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -50,7 +50,7 @@ using Stats::CFE::connectivity_value_type; void usage () { - AUTHOR = "David Raffelt (david.raffelt@florey.edu.au)"; + AUTHOR = "David Raffelt (david.raffelt@florey.edu.au) and Robert E. Smith (robert.smith@florey.edu.au)"; SYNOPSIS = "Fixel-based analysis using connectivity-based fixel enhancement and non-parametric permutation testing"; @@ -323,6 +323,7 @@ void run() } { + // TODO This could trivially be multi-threaded; fixels are handled independently ProgressBar progress ("normalising and thresholding fixel-fixel connectivity matrix", num_fixels); for (uint32_t fixel = 0; fixel < num_fixels; ++fixel) { @@ -481,7 +482,7 @@ void run() Source source (num_fixels); Functor functor (data, glm_test, contrast, betas, abs_effect_size, std_effect_size, stdev); - Thread::run_queue (source, size_t(), Thread::multi (functor)); + Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); } { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 3); @@ -519,7 +520,7 @@ void run() // If performing non-stationarity adjustment we need to pre-compute the empirical CFE statistic vector_type empirical_cfe_statistic; if (do_nonstationary_adjustment) { - + empirical_cfe_statistic = vector_type::Zero (num_fixels); if (permutations_nonstationary.size()) { Stats::PermTest::PermutationStack permutations (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment"); Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, permutations, empirical_cfe_statistic); diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index badec8c7f2..900c6dc683 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -193,6 +193,11 @@ namespace MR * particular type of data being tested. Therefore an Importer class must be * defined that is responsible for acquiring and vectorising these data. */ + + + // TODO This is still slower than I'd like + // Would some transposing trickery help with execution speed? + // Is it to do with the data fetching? class GLMTTestVariable : public GLMTestBase { MEMALIGN(GLMTTestVariable) public: GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts); diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index a62ce6ff2c..04d8f63e66 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -85,6 +85,8 @@ namespace MR bool PreProcessor::operator() (const Permutation& permutation) { + if (permutation.data.empty()) + return false; (*stats_calculator) (permutation.data, stats); (*enhancer) (stats, enhanced_stats); for (ssize_t i = 0; i < enhanced_stats.size(); ++i) { From 740f25eecd9d8047270c50dedcdba0078b6a7530 Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Tue, 2 May 2017 12:03:11 +0100 Subject: [PATCH 0044/1471] Add comments and standardise whitespace. --- cmd/dwidenoise.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index 9333b36fa5..758a2a39c3 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -14,9 +14,11 @@ #include "command.h" #include "image.h" + #include #include + #define DEFAULT_SIZE 5 using namespace MR; @@ -24,6 +26,7 @@ using namespace App; const char* const dtypes[] = { "float32", "float64", NULL }; + void usage () { SYNOPSIS = "Denoise DWI data and estimate the noise level based on the optimal threshold for PCA"; @@ -93,13 +96,14 @@ using value_type = float; template -class DenoisingFunctor { MEMALIGN(DenoisingFunctor) - public: +class DenoisingFunctor { + MEMALIGN(DenoisingFunctor) + +public: typedef Eigen::Matrix MatrixX; typedef Eigen::Matrix VectorX; - public: DenoisingFunctor (ImageType& dwi, vector extent, Image& mask, ImageType& noise) : extent {{extent[0]/2, extent[1]/2, extent[2]/2}}, m (dwi.size(3)), @@ -114,6 +118,7 @@ class DenoisingFunctor { MEMALIGN(DenoisingFunctor) void operator () (ImageType& dwi, ImageType& out) { + // Process voxels in mask only if (mask.valid()) { assign_pos_of (dwi).to (mask); if (!mask.value()) @@ -174,7 +179,6 @@ class DenoisingFunctor { MEMALIGN(DenoisingFunctor) } } - void load_data (ImageType& dwi) { pos[0] = dwi.index(0); pos[1] = dwi.index(1); pos[2] = dwi.index(2); @@ -194,7 +198,7 @@ class DenoisingFunctor { MEMALIGN(DenoisingFunctor) dwi.index(0) = pos[0]; dwi.index(1) = pos[1]; dwi.index(2) = pos[2]; - } + } private: const std::array extent; From 25aa63eba515cf5b88200084da3b9b1f27d75796 Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Tue, 2 May 2017 12:05:31 +0100 Subject: [PATCH 0045/1471] dwidenoise: correct MP-PCA threshold for mn, which in some cases led to asymmetric numerical instabilities. Here, we modify the formulas to be consistent with the m>n case at all times, which we found to be most numerically stable. --- cmd/dwidenoise.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index 758a2a39c3..d5823b950c 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -139,15 +139,15 @@ class DenoisingFunctor { VectorX s = eig.eigenvalues(); // Marchenko-Pastur optimal threshold - const double lam_r = std::max(double(s[1]), 0.0) / n; + const double lam_r = std::max(double(s[1]), 0.0) / r; double clam = 0.0; sigma2 = NaN; ssize_t cutoff_p = 0; for (ssize_t p = 1; p < r; ++p) { - double lam = std::max(double(s[p]), 0.0) / n; + double lam = std::max(double(s[p]), 0.0) / r; clam += lam; - double gam = double(m-r+p+1) / double(n); + double gam = double(std::max(m,n)-r+p+1) / double(r); double sigsq1 = clam / (p * std::max (gam, 1.0)); double sigsq2 = (lam - lam_r) / (4.0 * std::sqrt(gam)); // sigsq2 > sigsq1 if signal else noise From 6d49d2f507142b6aca7794f5d7bbaa4667c1235c Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Tue, 2 May 2017 14:04:50 +0100 Subject: [PATCH 0046/1471] dwidenoise: apply demeaning along longest matrix dimension. --- cmd/dwidenoise.cpp | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index d5823b950c..0f8f8867b0 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -110,7 +110,7 @@ class DenoisingFunctor { n (extent[0]*extent[1]*extent[2]), r ((m() = X * X.adjoint(); + } else + { + Xm = X.colwise().mean(); + X.rowwise() -= Xm.transpose(); // data centring + XtX.template triangularView() = X.adjoint() * X; + } Eigen::SelfAdjointEigenSolver eig (XtX); // eigenvalues provide squared singular values, sorted in increasing order: VectorX s = eig.eigenvalues(); @@ -169,8 +179,14 @@ class DenoisingFunctor { // Store output assign_pos_of(dwi).to(out); - for (auto l = Loop (3) (out); l; ++l) - out.value() = value_type (X(out.index(3), n/2) + Xm(out.index(3))); + if (m <= n) { + for (auto l = Loop (3) (out); l; ++l) + out.value() = value_type (X(out.index(3), n/2) + Xm(out.index(3))); + } + else { + for (auto l = Loop (3) (out); l; ++l) + out.value() = value_type (X(out.index(3), n/2) + Xm(n/2)); + } // store noise map if requested: if (noise.valid()) { @@ -190,10 +206,6 @@ class DenoisingFunctor { if (! is_out_of_bounds(dwi,0,3)) X.col(k) = dwi.row(3); - // data centring - Xm = X.rowwise().mean(); - X.colwise() -= Xm; - // reset image position dwi.index(0) = pos[0]; dwi.index(1) = pos[1]; From 01100924c42f2819c2f6ca375f7ea72db803e444 Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Tue, 2 May 2017 15:02:47 +0100 Subject: [PATCH 0047/1471] dwidenoise: fix cumsum with data centering. --- cmd/dwidenoise.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index 0f8f8867b0..9522b65703 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -149,11 +149,11 @@ class DenoisingFunctor { VectorX s = eig.eigenvalues(); // Marchenko-Pastur optimal threshold - const double lam_r = std::max(double(s[1]), 0.0) / r; + const double lam_r = std::max(double(s[0]), 0.0) / r; double clam = 0.0; sigma2 = NaN; ssize_t cutoff_p = 0; - for (ssize_t p = 1; p < r; ++p) + for (ssize_t p = 0; p < r; ++p) { double lam = std::max(double(s[p]), 0.0) / r; clam += lam; From 403d35838b30b098bdad7c5724d56ef124ee1479 Mon Sep 17 00:00:00 2001 From: Daan Christiaens Date: Wed, 3 May 2017 16:31:24 +0100 Subject: [PATCH 0048/1471] dwidenoise: use mn. --- cmd/dwidenoise.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/dwidenoise.cpp b/cmd/dwidenoise.cpp index 9522b65703..d3fa573439 100644 --- a/cmd/dwidenoise.cpp +++ b/cmd/dwidenoise.cpp @@ -149,16 +149,16 @@ class DenoisingFunctor { VectorX s = eig.eigenvalues(); // Marchenko-Pastur optimal threshold - const double lam_r = std::max(double(s[0]), 0.0) / r; + const double lam_r = std::max(double(s[0]), 0.0) / std::max(m,n); double clam = 0.0; sigma2 = NaN; ssize_t cutoff_p = 0; - for (ssize_t p = 0; p < r; ++p) - { - double lam = std::max(double(s[p]), 0.0) / r; + for (ssize_t p = 0; p < r; ++p) // p+1 is the number of noise components + { // (as opposed to the paper where p is defined as the number of signal components) + double lam = std::max(double(s[p]), 0.0) / std::max(m,n); clam += lam; - double gam = double(std::max(m,n)-r+p+1) / double(r); - double sigsq1 = clam / (p * std::max (gam, 1.0)); + double gam = double(p+1) / std::max(m,n); + double sigsq1 = clam / double(p+1); double sigsq2 = (lam - lam_r) / (4.0 * std::sqrt(gam)); // sigsq2 > sigsq1 if signal else noise if (sigsq2 < sigsq1) { From 2af1e929cf8002b6112bdae84ce976d12fa39676 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 19 Jun 2017 17:22:47 +1000 Subject: [PATCH 0049/1471] GLM: Permit non-finite numbers NOTE: Not yet tested Detects the presence of non-finite values in either the input data, or element-wise design matrix columns, and omits the relevant rows from the linear regression. Currently only implemented for fixelcfestats. --- cmd/fixelcfestats.cpp | 60 +++++++++++++++++++++++++------------ core/math/stats/glm.cpp | 61 +++++++++++++++++++++++++++++++++----- core/math/stats/glm.h | 3 +- core/math/stats/import.cpp | 14 +++++++++ core/math/stats/import.h | 4 +++ 5 files changed, 115 insertions(+), 27 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index ca0658255d..db2e1675a7 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -152,19 +152,21 @@ class SubjectFixelImport : public SubjectDataImportBase void operator() (matrix_type::ColXpr column) const override { - assert (column.rows() == data.size(0)); + assert (column.rows() == size()); Image temp (data); // For thread-safety column = temp.row(0); } default_type operator[] (const size_t index) const override { - assert (index < size_t(data.size(0))); + assert (index < size()); Image temp (data); // For thread-safety temp.index(0) = index; return default_type(temp.value()); } + size_t size() const override { return data.size(0); } + const Header& header() const { return H; } private: @@ -267,13 +269,18 @@ void run() // Before validating the contrast matrix, we first need to see if there are any // additional design matrix columns coming from fixel-wise subject data vector extra_columns; + bool nans_in_columns = false; opt = get_options ("column"); for (size_t i = 0; i != opt.size(); ++i) { extra_columns.push_back (CohortDataImport()); extra_columns[i].initialise (opt[i][0]); + if (!extra_columns[i].allFinite()) + nans_in_columns = true; } if (extra_columns.size()) { CONSOLE ("number of element-wise design matrix columns: " + str(extra_columns.size())); + if (nans_in_columns) + INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from fixel-wise design matrices accordingly"); } if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) @@ -352,13 +359,11 @@ void run() // Normalise smoothing weights value_type sum = 0.0; - for (auto smooth_it = smoothing_weights[fixel].begin(); smooth_it != smoothing_weights[fixel].end(); ++smooth_it) { + for (auto smooth_it = smoothing_weights[fixel].begin(); smooth_it != smoothing_weights[fixel].end(); ++smooth_it) sum += smooth_it->second; - } - value_type norm_factor = 1.0 / sum; - for (auto smooth_it = smoothing_weights[fixel].begin(); smooth_it != smoothing_weights[fixel].end(); ++smooth_it) { + const value_type norm_factor = 1.0 / sum; + for (auto smooth_it = smoothing_weights[fixel].begin(); smooth_it != smoothing_weights[fixel].end(); ++smooth_it) smooth_it->second *= norm_factor; - } progress++; } } @@ -375,9 +380,8 @@ void run() // Load input data - matrix_type data (num_fixels, importer.size()); - data.setZero(); - + matrix_type data = matrix_type::Zero (num_fixels, importer.size()); + bool nans_in_data = false; { ProgressBar progress ("loading input images", importer.size()); for (size_t subject = 0; subject < importer.size(); subject++) { @@ -385,23 +389,41 @@ void run() // Smooth the data vector_type smoothed_data (vector_type::Zero (num_fixels)); for (size_t fixel = 0; fixel < num_fixels; ++fixel) { - value_type value = 0.0; - std::map::const_iterator it = smoothing_weights[fixel].begin(); - for (; it != smoothing_weights[fixel].end(); ++it) - value += data (it->first, subject) * it->second; - smoothed_data (fixel) = value; + if (std::isfinite (data (fixel, subject))) { + value_type value = 0.0, sum_weights = 0.0; + std::map::const_iterator it = smoothing_weights[fixel].begin(); + for (; it != smoothing_weights[fixel].end(); ++it) { + if (std::isfinite (data (it->first, subject))) { + value += data (it->first, subject) * it->second; + sum_weights += it->second; + } + } + if (sum_weights) + smoothed_data (fixel) = value / sum_weights; + else + smoothed_data (fixel) = NaN; + } else { + smoothed_data (fixel) = NaN; + } } - if (!smoothed_data.allFinite()) - throw Exception ("Input fixel data \"" + importer[subject]->name() + "\" contains at least one non-finite value"); data.col (subject) = smoothed_data; + if (!smoothed_data.allFinite()) + nans_in_data = true; } progress++; } + if (nans_in_data) { + INFO ("Non-finite values present in data; rows will be removed from fixel-wise design matrices accordingly"); + if (!extra_columns.size()) { + INFO ("(Note that this will result in slower execution than if such values were not present)"); + } + } + // Construct the class for performing the initial statistical tests std::shared_ptr glm_test; - if (extra_columns.size()) { - glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast)); + if (extra_columns.size() || nans_in_data) { + glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast, nans_in_data, nans_in_columns)); } else { glm_test.reset (new GLMTTestFixed (data, design, contrast)); } diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 9a624faa0a..48a3f028e0 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -14,6 +14,7 @@ #include "math/stats/glm.h" +#include "bitset.h" #include "debug.h" #define GLM_BATCH_SIZE 1024 @@ -204,9 +205,11 @@ namespace MR - GLMTTestVariable::GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : + GLMTTestVariable::GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const bool nans_in_data, const bool nans_in_columns) : GLMTestBase (measurements, design, contrasts), - importers (importers) + importers (importers), + nans_in_data (nans_in_data), + nans_in_columns (nans_in_columns) { // Make sure that the specified contrasts reflect the full design matrix (with additional // data loaded) @@ -241,12 +244,56 @@ namespace MR for (ssize_t col = 0; col != ssize_t(importers.size()); ++col) extra_data.col(col) = importers[col] (element); - // Make sure the data from the additional columns is appropriately permuted - // (i.e. in the same way as what the fixed portion of the design matrix experienced) - for (ssize_t row = 0; row != X.rows(); ++row) - SX.block(row, X.cols(), 1, importers.size()) = extra_data.row(perm_labelling[row]); + // If there are non-finite values present either in the input + // data or the element-wise design matrix columns (or both), + // need to track which rows are being kept / discarded + BitSet row_mask (X.rows(), true); + if (nans_in_data) { + for (ssize_t row = 0; row != y.rows(); ++row) { + if (!std::isfinite (y (row, element))) + row_mask[row] = false; + } + } + if (nans_in_columns) { + // Bear in mind that we need to test for finite values in the + // row in which this data is going to be written to based on + // the permutation labelling + for (ssize_t row = 0; row != extra_data.rows(); ++row) { + if (!extra_data.row (perm_labelling[row]).allFinite()) + row_mask[row] = false; + } + } + + // Do we need to reduce the size of our matrices / vectors + // based on the presence of non-finite values? + if (row_mask.full()) { + + // Make sure the data from the additional columns is appropriately permuted + // (i.e. in the same way as what the fixed portion of the design matrix experienced) + for (ssize_t row = 0; row != X.rows(); ++row) + SX.block(row, X.cols(), 1, importers.size()) = extra_data.row (perm_labelling[row]); + + ttest (tvalues, SX, y.row(element), betas, residuals); - ttest (tvalues, SX, y.row(element), betas, residuals); + } else { + + const ssize_t new_num_rows = row_mask.count(); + vector_type y_masked (new_num_rows); + matrix_type SX_masked (new_num_rows, X.cols() + importers.size()); + ssize_t new_row = 0; + for (ssize_t old_row = 0; old_row != X.rows(); ++old_row) { + if (row_mask[old_row]) { + y_masked[new_row] = y(old_row, element); + SX_masked.block (new_row, 0, 1, X.cols()) = SX.block (old_row, 0, 1, X.cols()); + SX_masked.block (new_row, X.cols(), 1, importers.size()) = extra_data.row (perm_labelling[old_row]); + ++new_row; + } + } + assert (new_row == new_num_rows); + + ttest (tvalues, SX_masked, y_masked, betas, residuals); + + } // FIXME // Currently output only the first contrast, as is done in GLMTTestFixed diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 900c6dc683..e2bc4740ba 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -200,7 +200,7 @@ namespace MR // Is it to do with the data fetching? class GLMTTestVariable : public GLMTestBase { MEMALIGN(GLMTTestVariable) public: - GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts); + GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const bool nans_in_data, const bool nans_in_columns); /*! Compute the t-statistics * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) @@ -220,6 +220,7 @@ namespace MR protected: const vector& importers; + const bool nans_in_data, nans_in_columns; //! generic GLM t-test /*! This version of the t-test function does not incorporate the diff --git a/core/math/stats/import.cpp b/core/math/stats/import.cpp index 815f6e89df..1c10dc847a 100644 --- a/core/math/stats/import.cpp +++ b/core/math/stats/import.cpp @@ -37,6 +37,20 @@ namespace MR + bool CohortDataImport::allFinite() const + { + for (size_t i = 0; i != files.size(); ++i) { + for (size_t j = 0; j != files[i]->size(); ++j) { + if ((*files[i])[j]) + return false; + } + } + return true; + } + + + + } } } diff --git a/core/math/stats/import.h b/core/math/stats/import.h index c29b2ea5f3..c9ccbcf9ed 100644 --- a/core/math/stats/import.h +++ b/core/math/stats/import.h @@ -64,6 +64,8 @@ namespace MR const std::string& name() const { return path; } + virtual size_t size() const = 0; + protected: const std::string path; @@ -102,6 +104,8 @@ namespace MR return files[i]; } + bool allFinite() const; + protected: vector> files; }; From eaac89e517632e70e1a0ef4f016e43c311015e3a Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 21 Jun 2017 17:38:18 +1000 Subject: [PATCH 0050/1471] connectomestats: New option -column Provides functionality for edge-wise explanatory variables, implemented in the same manner as was done for fixelcfestats. --- cmd/connectomestats.cpp | 311 +++++++++++++++++++++++++----------- cmd/fixelcfestats.cpp | 1 + core/math/stats/import.h | 4 + src/connectome/connectome.h | 4 +- 4 files changed, 225 insertions(+), 95 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 694fde1d17..19b2342a28 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -19,6 +19,7 @@ #include "file/path.h" #include "math/stats/glm.h" +#include "math/stats/import.h" #include "math/stats/permutation.h" #include "math/stats/typedefs.h" @@ -30,6 +31,10 @@ using namespace MR; using namespace App; +using namespace MR::Math::Stats; + +using Math::Stats::matrix_type; +using Math::Stats::vector_type; const char* algorithms[] = { "nbs", "nbse", "none", nullptr }; @@ -73,7 +78,12 @@ void usage () + OptionGroup ("Additional options for connectomestats") + Option ("threshold", "the t-statistic value to use in threshold-based clustering algorithms") - + Argument ("value").type_float (0.0); + + Argument ("value").type_float (0.0) + + // TODO Generalise this across commands + + Option ("column", "add a column to the design matrix corresponding to subject edge-wise values " + "(the contrast vector length must include columns for these additions)").allow_multiple() + + Argument ("path").type_file_in(); REFERENCES + "* If using the NBS algorithm: \n" @@ -92,10 +102,6 @@ void usage () -using Math::Stats::matrix_type; -using Math::Stats::vector_type; - - void load_tfce_parameters (Stats::TFCE::Wrapper& enhancer) { @@ -107,32 +113,63 @@ void load_tfce_parameters (Stats::TFCE::Wrapper& enhancer) +// Define data importer class that willl obtain connectome data for a +// specific subject based on the string path to the image file for +// that subject +class SubjectConnectomeImport : public SubjectDataImportBase +{ MEMALIGN(SubjectConnectomeImport) + public: + SubjectConnectomeImport (const std::string& path) : + SubjectDataImportBase (path) + { + auto M = load_matrix (path); + Connectome::check (M); + if (Connectome::is_directed (M)) + throw Exception ("Connectome from file \"" + Path::basename (path) + "\" is a directed matrix"); + Connectome::to_upper (M); + Connectome::Mat2Vec mat2vec (M.rows()); + mat2vec.M2V (M, data); + } + + void operator() (matrix_type::ColXpr column) const override + { + assert (column.rows() == data.size()); + column = data; + } + + default_type operator[] (const size_t index) const override + { + assert (index < data.size()); + return (data[index]); + } + + size_t size() const override { return data.size(); } + + private: + vector_type data; + +}; + + + void run() { - // Read filenames - vector filenames; - { - std::string folder = Path::dirname (argument[0]); - std::ifstream ifs (argument[0].c_str()); - std::string temp; - while (getline (ifs, temp)) { - std::string filename (Path::join (folder, temp)); - size_t p = filename.find_last_not_of(" \t"); - if (std::string::npos != p) - filename.erase(p+1); - if (filename.size()) { - if (!MR::Path::exists (filename)) - throw Exception ("Input connectome file not found: \"" + filename + "\""); - filenames.push_back (filename); - } - } + // Read file names and check files exist + CohortDataImport importer; + importer.initialise (argument[0]); + CONSOLE ("Number of subjects: " + str(importer.size())); + const size_t num_edges = importer[0]->size(); + + for (size_t i = 1; i < importer.size(); ++i) { + if (importer[i]->size() != importer[0]->size()) + throw Exception ("Size of connectome for subject " + str(i) + " (file \"" + importer[i]->name() + "\" does not match that of first subject"); } - const MR::Connectome::matrix_type example_connectome = load_matrix (filenames.front()); - if (example_connectome.rows() != example_connectome.cols()) - throw Exception ("Connectome of first subject is not square (" + str(example_connectome.rows()) + " x " + str(example_connectome.cols()) + ")"); + // TODO Could determine this from the vector length with the right equation + const MR::Connectome::matrix_type example_connectome = load_matrix (importer[0]->name()); const MR::Connectome::node_t num_nodes = example_connectome.rows(); + Connectome::Mat2Vec mat2vec (num_nodes); // Initialise enhancement algorithm std::shared_ptr enhancer; @@ -168,111 +205,199 @@ void run() // Load design matrix const matrix_type design = load_matrix (argument[2]); - if (size_t(design.rows()) != filenames.size()) - throw Exception ("number of subjects does not match number of rows in design matrix"); + if (size_t(design.rows()) != importer.size()) + throw Exception ("number of subjects (" + str(importer.size()) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); + + // Load contrast matrix + matrix_type contrast = load_matrix (argument[3]); + + // Before validating the contrast matrix, we first need to see if there are any + // additional design matrix columns coming from fixel-wise subject data + vector extra_columns; + bool nans_in_columns = false; + auto opt = get_options ("column"); + for (size_t i = 0; i != opt.size(); ++i) { + extra_columns.push_back (CohortDataImport()); + extra_columns[i].initialise (opt[i][0]); + if (!extra_columns[i].allFinite()) + nans_in_columns = true; + } + if (extra_columns.size()) { + CONSOLE ("number of element-wise design matrix columns: " + str(extra_columns.size())); + if (nans_in_columns) + INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from edge-wise design matrices accordingly"); + } + + // Now we can check the contrast matrix + if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) + throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" + + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); + if (contrast.rows() > 1) + throw Exception ("only a single contrast vector (defined as a row) is currently supported"); + // Load permutations file if supplied - auto opt = get_options("permutations"); + opt = get_options ("permutations"); vector > permutations; if (opt.size()) { - permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); + permutations = Permutation::load_permutations_file (opt[0][0]); num_perms = permutations.size(); if (permutations[0].size() != (size_t)design.rows()) throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); } // Load non-stationary correction permutations file if supplied - opt = get_options("permutations_nonstationary"); + opt = get_options ("permutations_nonstationary"); vector > permutations_nonstationary; if (opt.size()) { - permutations_nonstationary = Math::Stats::Permutation::load_permutations_file (opt[0][0]); + permutations_nonstationary = Permutation::load_permutations_file (opt[0][0]); nperms_nonstationary = permutations.size(); if (permutations_nonstationary[0].size() != (size_t)design.rows()) throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); } - // Load contrast matrix - matrix_type contrast = load_matrix (argument[3]); - if (contrast.cols() > design.cols()) - throw Exception ("too many contrasts for design matrix"); - contrast.conservativeResize (contrast.rows(), design.cols()); - const std::string output_prefix = argument[4]; // Load input data // For compatibility with existing statistics code, symmetric matrix data is adjusted - // into vector form - one row per edge in the symmetric connectome. The Mat2Vec class - // deals with the re-ordering of matrix data into this form. - MR::Connectome::Mat2Vec mat2vec (num_nodes); - const size_t num_edges = mat2vec.vec_size(); - matrix_type data (num_edges, filenames.size()); + // into vector form - one row per edge in the symmetric connectome. This has already + // been performed when the CohortDataImport class is initialised. + matrix_type data (num_edges, importer.size()); { - ProgressBar progress ("Loading input connectome data", filenames.size()); - for (size_t subject = 0; subject < filenames.size(); subject++) { - - const std::string& path (filenames[subject]); - MR::Connectome::matrix_type subject_data; - try { - subject_data = load_matrix (path); - } catch (Exception& e) { - throw Exception (e, "Error loading connectome data for subject #" + str(subject) + " (file \"" + path + "\""); - } - - try { - MR::Connectome::to_upper (subject_data); - if (size_t(subject_data.rows()) != num_nodes) - throw Exception ("Connectome matrix is not the correct size (" + str(subject_data.rows()) + ", should be " + str(num_nodes) + ")"); - } catch (Exception& e) { - throw Exception (e, "Connectome for subject #" + str(subject) + " (file \"" + path + "\") invalid"); - } - - for (size_t i = 0; i != num_edges; ++i) - data(i, subject) = subject_data (mat2vec(i).first, mat2vec(i).second); - + ProgressBar progress ("Agglomerating input connectome data", importer.size()); + for (size_t subject = 0; subject < importer.size(); subject++) { + (*importer[subject]) (data.col (subject)); ++progress; } } + const bool nans_in_data = data.allFinite(); + + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast, nans_in_data, nans_in_columns)); + } else { + glm_test.reset (new GLMTTestFixed (data, design, contrast)); + } - { - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation...", contrast.cols() + 3); - - const matrix_type betas = Math::Stats::GLM::solve_betas (data, design); - for (size_t i = 0; i < size_t(contrast.cols()); ++i) { - save_matrix (mat2vec.V2M (betas.col(i)), output_prefix + "_beta_" + str(i) + ".csv"); - ++progress; + if (extra_columns.size()) { + + // For each variable of interest (e.g. beta coefficients, effect size etc.) need to: + // Construct the output data vector, with size = num_edges + // For each edge: + // Use glm_test to obtain the design matrix for the default permutation for that edge + // Use the relevant Math::Stats::GLM function to get the value of interest for just that edge + // (will still however need to come out as a matrix_type) + // Write that value to data vector + // Finally, write results to connectome files + matrix_type betas (contrast.cols(), num_edges); + vector_type abs_effect_size (num_edges), std_effect_size (num_edges), stdev (num_edges); + { + class Source + { NOMEMALIGN + public: + Source (const size_t num_edges) : + num_edges (num_edges), + counter (0), + progress (new ProgressBar ("estimating beta coefficients, effect size and standard deviation", num_edges)) { } + bool operator() (size_t& edge_index) + { + edge_index = counter++; + if (counter >= num_edges) { + progress.reset(); + return false; + } + assert (progress); + ++(*progress); + return true; + } + private: + const size_t num_edges; + size_t counter; + std::unique_ptr progress; + }; + + class Functor + { MEMALIGN(Functor) + public: + Functor (const matrix_type& data, std::shared_ptr glm_test, const matrix_type& contrasts, + matrix_type& betas, vector_type& abs_effect_size, vector_type& std_effect_size, vector_type& stdev) : + data (data), + glm_test (glm_test), + contrasts (contrasts), + global_betas (betas), + global_abs_effect_size (abs_effect_size), + global_std_effect_size (std_effect_size), + global_stdev (stdev) { } + bool operator() (const size_t& edge_index) + { + const matrix_type data_f = data.row (edge_index); + const matrix_type design_f = dynamic_cast(glm_test.get())->default_design (edge_index); + Math::Stats::GLM::all_stats (data_f, design_f, contrasts, + local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + global_betas.col (edge_index) = local_betas; + global_abs_effect_size[edge_index] = local_abs_effect_size(0,0); + global_std_effect_size[edge_index] = local_std_effect_size(0,0); + global_stdev[edge_index] = local_stdev(0,0); + return true; + } + + private: + const matrix_type& data; + const std::shared_ptr glm_test; + const matrix_type& contrasts; + matrix_type& global_betas; + vector_type& global_abs_effect_size; + vector_type& global_std_effect_size; + vector_type& global_stdev; + matrix_type local_betas, local_abs_effect_size, local_std_effect_size, local_stdev; + }; + + Source source (num_edges); + Functor functor (data, glm_test, contrast, + betas, abs_effect_size, std_effect_size, stdev); + Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); } - - const matrix_type abs_effects = Math::Stats::GLM::abs_effect_size (data, design, contrast); - save_matrix (mat2vec.V2M (abs_effects.col(0)), output_prefix + "_abs_effect.csv"); - ++progress; - - const matrix_type std_effects = Math::Stats::GLM::std_effect_size (data, design, contrast); - matrix_type first_std_effect = mat2vec.V2M (std_effects.col (0)); - for (MR::Connectome::node_t i = 0; i != num_nodes; ++i) { - for (MR::Connectome::node_t j = 0; j != num_nodes; ++j) { - if (!std::isfinite (first_std_effect (i, j))) - first_std_effect (i, j) = 0.0; + { + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 3); + for (ssize_t i = 0; i != contrast.cols(); ++i) { + save_matrix (mat2vec.V2M (betas.row(i)), "beta" + str(i) + ".csv"); + ++progress; } + save_matrix (mat2vec.V2M (abs_effect_size), "abs_effect.csv"); ++progress; + save_matrix (mat2vec.V2M (std_effect_size), "std_effect.csv"); ++progress; + save_matrix (mat2vec.V2M (stdev), "std_dev.csv"); } - save_matrix (first_std_effect, output_prefix + "_std_effect.csv"); + + } else { + + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 4); + matrix_type betas, abs_effect_size, std_effect_size, stdev; + Math::Stats::GLM::all_stats (data, design, contrast, + betas, abs_effect_size, std_effect_size, stdev); ++progress; + for (ssize_t i = 0; i < contrast.cols(); ++i) { + save_matrix (mat2vec.V2M (betas.row(i)), "beta" + str(i) + ".csv"); + ++progress; + } + save_matrix (mat2vec.V2M (abs_effect_size.row(0)), "abs_effect.csv"); ++progress; + save_matrix (mat2vec.V2M (std_effect_size.row(0)), "std_effect.csv"); ++progress; + save_matrix (mat2vec.V2M (stdev.row(0)), "std_dev.csv"); - const matrix_type stdevs = Math::Stats::GLM::stdev (data, design); - save_vector (stdevs.col(0), output_prefix + "_std_dev.csv"); } - std::shared_ptr glm_ttest (new Math::Stats::GLMTTestFixed (data, design, contrast)); // If performing non-stationarity adjustment we need to pre-compute the empirical statistic vector_type empirical_statistic; if (do_nonstationary_adjustment) { if (permutations_nonstationary.size()) { - Stats::PermTest::PermutationStack perm_stack (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment..."); - Stats::PermTest::precompute_empirical_stat (glm_ttest, enhancer, perm_stack, empirical_statistic); + Stats::PermTest::PermutationStack perm_stack (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment"); + Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, perm_stack, empirical_statistic); } else { - Stats::PermTest::PermutationStack perm_stack (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment...", true); - Stats::PermTest::precompute_empirical_stat (glm_ttest, enhancer, perm_stack, empirical_statistic); + Stats::PermTest::PermutationStack perm_stack (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment", true); + Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, perm_stack, empirical_statistic); } save_matrix (mat2vec.V2M (empirical_statistic), output_prefix + "_empirical.csv"); } @@ -281,7 +406,7 @@ void run() vector_type tvalue_output (num_edges); vector_type enhanced_output (num_edges); - Stats::PermTest::precompute_default_permutation (glm_ttest, enhancer, empirical_statistic, enhanced_output, std::shared_ptr(), tvalue_output); + Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_statistic, enhanced_output, std::shared_ptr(), tvalue_output); save_matrix (mat2vec.V2M (tvalue_output), output_prefix + "_tvalue.csv"); save_matrix (mat2vec.V2M (enhanced_output), output_prefix + "_enhanced.csv"); @@ -295,12 +420,12 @@ void run() vector_type uncorrected_pvalues (num_edges); if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_ttest, enhancer, empirical_statistic, + Stats::PermTest::run_permutations (permutations, glm_test, enhancer, empirical_statistic, enhanced_output, std::shared_ptr(), null_distribution, std::shared_ptr(), uncorrected_pvalues, std::shared_ptr()); } else { - Stats::PermTest::run_permutations (num_perms, glm_ttest, enhancer, empirical_statistic, + Stats::PermTest::run_permutations (num_perms, glm_test, enhancer, empirical_statistic, enhanced_output, std::shared_ptr(), null_distribution, std::shared_ptr(), uncorrected_pvalues, std::shared_ptr()); diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index db2e1675a7..2db840a853 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -22,6 +22,7 @@ #include "fixel/keys.h" #include "fixel/loop.h" #include "math/stats/glm.h" +#include "math/stats/import.h" #include "math/stats/permutation.h" #include "math/stats/typedefs.h" #include "stats/cfe.h" diff --git a/core/math/stats/import.h b/core/math/stats/import.h index c9ccbcf9ed..a6c5be9d59 100644 --- a/core/math/stats/import.h +++ b/core/math/stats/import.h @@ -20,6 +20,8 @@ #include #include +#include "progressbar.h" + #include "file/path.h" #include "math/stats/typedefs.h" @@ -118,6 +120,7 @@ namespace MR // Read the provided text file one at a time // For each file, create an instance of SubjectDataImport // (which must derive from SubjectDataImportBase) + ProgressBar progress ("Importing data from files listed in \"" + Path::basename (path) + "\""); const std::string directory = Path::dirname (path); std::ifstream ifs (path.c_str()); std::string line; @@ -134,6 +137,7 @@ namespace MR throw Exception (e, "Reading text file \"" + Path::basename (path) + "\": input image data file not found: \"" + filename + "\""); } } + ++progress; } } diff --git a/src/connectome/connectome.h b/src/connectome/connectome.h index 9556c1283c..f5dd7eff64 100644 --- a/src/connectome/connectome.h +++ b/src/connectome/connectome.h @@ -44,11 +44,11 @@ namespace MR { template - void check (const MatrixType& in, const node_t num_nodes) + void check (const MatrixType& in, const node_t num_nodes = 0) { if (in.rows() != in.cols()) throw Exception ("Connectome matrix is not square (" + str(in.rows()) + " x " + str(in.cols()) + ")"); - if (in.rows() != num_nodes) + if (num_nodes && (in.rows() != num_nodes)) throw Exception ("Connectome matrix contains " + str(in.rows()) + " nodes; expected " + str(num_nodes)); } From e3e24b2659e88225a934b4c272a3e9f547034da0 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 22 Jun 2017 17:48:18 +1000 Subject: [PATCH 0051/1471] mrclusterstats: Initial work towards -column option Performing substantial re-write of connected components filter in order to make roles and functionalities of different code more clear. --- cmd/mrclusterstats.cpp | 34 +++++-- core/filter/connected_components.h | 157 ++++++++++++++++++++++++++--- 2 files changed, 169 insertions(+), 22 deletions(-) diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 7f12c25e24..532a692705 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -13,15 +13,18 @@ #include "command.h" -#include "file/path.h" -#include "algo/loop.h" #include "image.h" + +#include "algo/loop.h" +#include "file/path.h" #include "math/SH.h" + #include "dwi/directions/predefined.h" -#include "timer.h" + #include "math/stats/glm.h" #include "math/stats/permutation.h" #include "math/stats/typedefs.h" + #include "stats/cluster.h" #include "stats/enhance.h" #include "stats/permtest.h" @@ -87,6 +90,25 @@ void usage () +// TODO Want to make the connected-components filter & surrounding operations +// a little cleaner; in particular, for supporting voxel-wise regressors, +// need the mechanism for going from image data to vectorised data to be +// accessible by a derivative of SubjectDataImportBase +// +// This will include: +// * A class for transforming image positions into indices for vectorised data +// (requires only an input mask image) +// (If input data has higher dimensionality than mask image, need to +// effectively replicate mask across extra axes) +// * A class for determining the connectivity between elements +// (requires access to the first class, axis list, degree of voxel connectivity, +// also ideally capacity to define custom connectivity for a particular axis +// in order to support dixel data) + + + + + template void write_output (const VectorType& data, const vector >& mask_indices, @@ -161,7 +183,8 @@ void run() { // Load Mask and compute adjacency auto mask_image = mask_header.get_image(); Filter::Connector connector (do_26_connectivity); - vector > mask_indices = connector.precompute_adjacency (mask_image); + connector.precompute_adjacency (mask_image); + const vector >& mask_indices = connector.get_mask_indices(); const size_t num_vox = mask_indices.size(); matrix_type data (num_vox, subjects.size()); @@ -174,8 +197,7 @@ void run() { auto input_image = Image::open (subjects[subject]); //.with_direct_io (3); <- Should be inputting 3D images? check_dimensions (input_image, mask_image, 0, 3); int index = 0; - vector >::iterator it; - for (it = mask_indices.begin(); it != mask_indices.end(); ++it) { + for (auto it = mask_indices.begin(); it != mask_indices.end(); ++it) { input_image.index(0) = (*it)[0]; input_image.index(1) = (*it)[1]; input_image.index(2) = (*it)[2]; diff --git a/core/filter/connected_components.h b/core/filter/connected_components.h index 7461078240..482eb0f8c3 100644 --- a/core/filter/connected_components.h +++ b/core/filter/connected_components.h @@ -17,8 +17,10 @@ #include "memory.h" #include "image.h" -#include "algo/loop.h" +#include "types.h" +#include "adapter/replicate.h" +#include "algo/loop.h" #include "filter/base.h" #include @@ -29,7 +31,128 @@ namespace MR namespace Filter { - class cluster { NOMEMALIGN + + + // A class to achieve a mapping from a voxel position in an image + // with any number of axes, to an index in a 1D vector of data. + class Voxel2Vector + { MEMALIGN(Voxel2Vector) + public: + + typedef uint32_t index_t; + + static const index_t invalid = std::numeric_limits::max(); + + Voxel2Vector (Image& mask, const Header& data); + + size_t size() const { return reverse.size(); } + + const vector& operator[] (const size_t index) const { + assert (index < reverse.size()); + return reverse[index]; + } + + template + index_t operator() (const PosType& pos) const { + Image temp (forward); // For thread-safety + assign_pos_of (pos).to (temp); + if (is_out_of_bounds (temp)) + return invalid; + return temp.value(); + } + + private: + Image forward; + vector< vector > reverse; + }; + + + + // A class that pre-computes and stores, for each voxel, a + // list of voxels (represented as indices) that are adjacent + // + // If we were to re-implement dixel-wise connectivity, it would + // be done using a plugin to this class to define the volumes + // on the fourth axis that correspond to neighbouring directions + class Adjacency + { + public: + typedef Voxel2Vector::index_t index_t; + + Adjacency() : + use_26_neighbours (false), + enabled_axes (3, true) { } + + void toggle_axis (const size_t axis, const bool value) { + if (axis > enabled_axes.size()) + enabled_axes.resize (axis+1, false); + enabled_axes[axis] = value; + } + + void initialise (const Header&, const Voxel2Vector&); + + const vector& operator[] (const size_t index) { + assert (index < data.size()); + return data[index]; + } + + private: + bool use_26_neighbours; + vector enabled_axes; + vector> data; + }; + + + + + + // TODO Re-define Connector class + // Maybe for now try to duplicate using old interface, get it compiled + // making use of new grunt code, then deal wtih interface changes later? + // Remember: Main purpose of rework is to expose Voxel2Vector to the + // GLM stats code... + // Maybe require that Voxel2Vector and Adjacency be created explicitly + // in order to construct Connector, but allow the ConnectedComponents + // filter to construct and store these internally itself (to hide) + // + // Note: When the filter is used, the adjacency is pre-computed + // based on the input image; therefore can't actually initialise + // it once and use it for multiple input images + // + // Two use cases: + // - mrclusterstats: Generate Voxel2Vector based on analysis mask, + // need to be able to pass shared_ptr to SubjectDataLoad classes + // - mrfilter: New Voxel2Vector is generated for each input image, since + // it needs to reflect the contents of the input mask image + + class Connector2 + { NOMEMALIGN + public: + Connector2 (const Voxel2Vector& v2v) : + v2v (v2v) { } + + + + + private: + const Voxel2Vector& v2v; + }; + + + + + + + + + + + + + + + class cluster + { NOMEMALIGN public: uint32_t label; uint32_t size; @@ -38,9 +161,8 @@ namespace MR } }; - - inline bool compare_clusters (const cluster& i, const cluster& j) - { + // Used for sorting clusters in order of size + inline bool largest (const cluster& i, const cluster& j) { return (i.size > j.size); } @@ -49,15 +171,16 @@ namespace MR public: Connector (bool do_26_connectivity) : - do_26_connectivity (do_26_connectivity), - dim_to_ignore (4, false) { - dim_to_ignore[3] = true; + do_26_connectivity (do_26_connectivity), + dim_to_ignore (4, false) + { + dim_to_ignore[3] = true; } // Perform connected components on the mask. - const vector >& run (vector& clusters, - vector& labels) const { + void run (vector& clusters, + vector& labels) const { labels.resize (adjacent_indices.size(), 0); uint32_t current_label = 1; for (uint32_t i = 0; i < labels.size(); i++) { @@ -73,7 +196,6 @@ namespace MR } if (clusters.size() > std::numeric_limits::max()) throw Exception ("The number of clusters is larger than can be labelled with an unsigned 32bit integer."); - return mask_indices; } @@ -109,7 +231,7 @@ namespace MR template - const vector >& precompute_adjacency (MaskImageType& mask) { + void precompute_adjacency (MaskImageType& mask) { auto index_image = Image::scratch (mask); @@ -159,8 +281,6 @@ namespace MR } adjacent_indices.push_back (neighbour_indices); } - - return mask_indices; } @@ -240,6 +360,9 @@ namespace MR } + const vector>& get_mask_indices() const { return mask_indices; } + + bool do_26_connectivity; vector dim_to_ignore; vector > mask_indices; @@ -307,11 +430,11 @@ namespace MR vector clusters; vector labels; - vector > mask_indices = connector.run (clusters, labels); + connector.run (clusters, labels); if (progress) ++(*progress); - std::sort (clusters.begin(), clusters.end(), compare_clusters); + std::sort (clusters.begin(), clusters.end(), largest); if (progress) ++(*progress); @@ -322,6 +445,8 @@ namespace MR for (auto l = Loop (out) (out); l; ++l) out.value() = 0; + const vector >& mask_indices = connector.get_mask_indices(); + for (uint32_t i = 0; i < mask_indices.size(); i++) { assign_pos_of (mask_indices[i]).to (out); From 47c39efd7e43e0b82eed307eb6a908bd4c2ea5c9 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 23 Jun 2017 12:02:58 +1000 Subject: [PATCH 0052/1471] More re-working of connected components code The roles of different parts of the code have been made more clear by encapsulating them within appropriately-named classes. The functionality of mapping each voxel position to an index in a 1D vector of data now resides in the Voxel2Vector class, placed in core/misc since it is used both by the connected components filter, and the statistics code. core/bitset.h has also been moved into this new core/misc/ directory, since it's just a utility class that isn't as fundamental to the compilation of MRtrix3 as the other contents of core/. --- cmd/mrclusterstats.cpp | 49 +- cmd/shbasis.cpp | 9 +- core/filter/connected_components.cpp | 147 +++++ core/filter/connected_components.h | 577 +++++++------------ core/math/stats/glm.cpp | 2 +- core/{ => misc}/bitset.cpp | 2 +- core/{ => misc}/bitset.h | 4 +- core/misc/voxel2vector.h | 105 ++++ src/connectome/enhance.cpp | 2 + src/connectome/enhance.h | 2 - src/dwi/directions/mask.h | 3 +- src/dwi/directions/set.cpp | 2 +- src/dwi/shells.h | 3 +- src/dwi/tractography/SIFT2/coeff_optimiser.h | 3 +- src/dwi/tractography/SIFT2/tckfactor.cpp | 2 +- src/dwi/tractography/connectome/extract.cpp | 2 +- src/dwi/tractography/connectome/matrix.cpp | 2 +- src/gui/mrview/tool/connectome/connectome.h | 6 +- src/stats/cluster.cpp | 2 +- 19 files changed, 500 insertions(+), 424 deletions(-) create mode 100644 core/filter/connected_components.cpp rename core/{ => misc}/bitset.cpp (99%) rename core/{ => misc}/bitset.h (99%) create mode 100644 core/misc/voxel2vector.h diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 532a692705..a866a5ba54 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -111,11 +111,10 @@ void usage () template void write_output (const VectorType& data, - const vector >& mask_indices, + const Voxel2Vector& v2v, ImageType& image) { - for (size_t i = 0; i < mask_indices.size(); i++) { - for (size_t dim = 0; dim < image.ndim(); dim++) - image.index(dim) = mask_indices[i][dim]; + for (size_t i = 0; i != v2v.size(); i++) { + assign_pos_of (v2v[i]).to (image); image.value() = data[i]; } } @@ -181,11 +180,12 @@ void run() { auto mask_header = Header::open (argument[3]); // Load Mask and compute adjacency - auto mask_image = mask_header.get_image(); - Filter::Connector connector (do_26_connectivity); - connector.precompute_adjacency (mask_image); - const vector >& mask_indices = connector.get_mask_indices(); - const size_t num_vox = mask_indices.size(); + auto mask_image = mask_header.get_image(); + Voxel2Vector v2v (mask_image, mask_header); + Filter::Connector connector; + connector.adjacency.set_26_adjacency (do_26_connectivity); + connector.adjacency.initialise (mask_header, v2v); + const size_t num_vox = v2v.size(); matrix_type data (num_vox, subjects.size()); @@ -196,12 +196,9 @@ void run() { LogLevelLatch log_level (0); auto input_image = Image::open (subjects[subject]); //.with_direct_io (3); <- Should be inputting 3D images? check_dimensions (input_image, mask_image, 0, 3); - int index = 0; - for (auto it = mask_indices.begin(); it != mask_indices.end(); ++it) { - input_image.index(0) = (*it)[0]; - input_image.index(1) = (*it)[1]; - input_image.index(2) = (*it)[2]; - data (index++, subject) = input_image.value(); + for (size_t voxel_index = 0; voxel_index != num_vox; ++voxel_index) { + assign_pos_of (v2v[voxel_index]).to (input_image); + data (voxel_index, subject) = input_image.value(); } progress++; } @@ -263,42 +260,42 @@ void run() { ProgressBar progress ("generating pre-permutation output", (compute_negative_contrast ? 3 : 2) + contrast.cols() + 3); { auto tvalue_image = Image::create (prefix + "tvalue.mif", output_header); - write_output (tvalue_output, mask_indices, tvalue_image); + write_output (tvalue_output, v2v, tvalue_image); } ++progress; { auto cluster_image = Image::create (prefix + (use_tfce ? "tfce.mif" : "cluster_sizes.mif"), output_header); - write_output (default_cluster_output, mask_indices, cluster_image); + write_output (default_cluster_output, v2v, cluster_image); } ++progress; if (compute_negative_contrast) { assert (default_cluster_output_neg); auto cluster_image_neg = Image::create (prefix + (use_tfce ? "tfce_neg.mif" : "cluster_sizes_neg.mif"), output_header); - write_output (*default_cluster_output_neg, mask_indices, cluster_image_neg); + write_output (*default_cluster_output_neg, v2v, cluster_image_neg); ++progress; } auto temp = Math::Stats::GLM::solve_betas (data, design); for (ssize_t i = 0; i < contrast.cols(); ++i) { auto beta_image = Image::create (prefix + "beta" + str(i) + ".mif", output_header); - write_output (temp.row(i), mask_indices, beta_image); + write_output (temp.row(i), v2v, beta_image); ++progress; } { const auto temp = Math::Stats::GLM::abs_effect_size (data, design, contrast); auto abs_effect_image = Image::create (prefix + "abs_effect.mif", output_header); - write_output (temp.row(0), mask_indices, abs_effect_image); + write_output (temp.row(0), v2v, abs_effect_image); } ++progress; { const auto temp = Math::Stats::GLM::std_effect_size (data, design, contrast); auto std_effect_image = Image::create (prefix + "std_effect.mif", output_header); - write_output (temp.row(0), mask_indices, std_effect_image); + write_output (temp.row(0), v2v, std_effect_image); } ++progress; { const auto temp = Math::Stats::GLM::stdev (data, design); auto std_dev_image = Image::create (prefix + "std_dev.mif", output_header); - write_output (temp.row(0), mask_indices, std_dev_image); + write_output (temp.row(0), v2v, std_dev_image); } } @@ -335,26 +332,26 @@ void run() { ProgressBar progress ("generating output", compute_negative_contrast ? 4 : 2); { auto uncorrected_pvalue_image = Image::create (prefix + "uncorrected_pvalue.mif", output_header); - write_output (uncorrected_pvalue, mask_indices, uncorrected_pvalue_image); + write_output (uncorrected_pvalue, v2v, uncorrected_pvalue_image); } ++progress; { vector_type fwe_pvalue_output (num_vox); Math::Stats::Permutation::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); auto fwe_pvalue_image = Image::create (prefix + "fwe_pvalue.mif", output_header); - write_output (fwe_pvalue_output, mask_indices, fwe_pvalue_image); + write_output (fwe_pvalue_output, v2v, fwe_pvalue_image); } ++progress; if (compute_negative_contrast) { assert (uncorrected_pvalue_neg); assert (perm_distribution_neg); auto uncorrected_pvalue_image_neg = Image::create (prefix + "uncorrected_pvalue_neg.mif", output_header); - write_output (*uncorrected_pvalue_neg, mask_indices, uncorrected_pvalue_image_neg); + write_output (*uncorrected_pvalue_neg, v2v, uncorrected_pvalue_image_neg); ++progress; vector_type fwe_pvalue_output_neg (num_vox); Math::Stats::Permutation::statistic2pvalue (*perm_distribution_neg, *default_cluster_output_neg, fwe_pvalue_output_neg); auto fwe_pvalue_image_neg = Image::create (prefix + "fwe_pvalue_neg.mif", output_header); - write_output (fwe_pvalue_output_neg, mask_indices, fwe_pvalue_image_neg); + write_output (fwe_pvalue_output_neg, v2v, fwe_pvalue_image_neg); } } diff --git a/cmd/shbasis.cpp b/cmd/shbasis.cpp index cfa7f5c268..ae54ed3c22 100644 --- a/cmd/shbasis.cpp +++ b/cmd/shbasis.cpp @@ -15,17 +15,16 @@ #include #include "app.h" -#include "bitset.h" #include "command.h" #include "datatype.h" -#include "progressbar.h" -#include "memory.h" - #include "header.h" #include "image.h" -#include "algo/loop.h" +#include "memory.h" +#include "progressbar.h" +#include "algo/loop.h" #include "math/SH.h" +#include "misc/bitset.h" using namespace MR; diff --git a/core/filter/connected_components.cpp b/core/filter/connected_components.cpp new file mode 100644 index 0000000000..7482f92ada --- /dev/null +++ b/core/filter/connected_components.cpp @@ -0,0 +1,147 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "filter/connected_components.h" + +namespace MR +{ + namespace Filter + { + + + + void Connector::Adjacency::initialise (const Header& header, const Voxel2Vector& v2v) + { + data.clear(); + // Simplify handling of 4D images: don't need to keep checking + // size of axes against number of image dimensions + if (header.ndim() < 3) + throw Exception ("Connected components filter not designed to handle less than 3 axes"); + if (header.ndim() > enabled_axes.size()) + enabled_axes.resize (header.ndim(), false); + // Begin by disabling adjacency offsets for those axes for which adjacency is not permitted + vector< vector > offsets; + vector o (header.ndim(), -1); + size_t start_axis = 0; + for (size_t axis = 0; axis != header.ndim(); ++axis) { + if (!enabled_axes[axis]) { + o[axis] = 0; + if (start_axis == axis) + ++start_axis; + } + } + if (start_axis == header.ndim()) + throw Exception ("Cannot initialise connected component filter: All axes have been disabled"); + // Now generate a list of plausible offsets between adjacent elements + while (*std::max_element (o.begin(), o.end()) < 2) { + // Determine whether or not this offset should be added to the list + if (!use_26_neighbours && header.ndim() >= 3 && ((std::abs(o[0]) + std::abs(o[1]) + std::abs(o[2])) > 1)) + continue; + offsets.push_back (o); + // Find the next offset to be tested + ++o[start_axis]; + for (size_t axis = start_axis; axis != header.ndim(); ++axis) { + if (enabled_axes[axis]) { + if (o[axis] == 2 && axis < header.ndim()-1) { + o[axis] = -1; + ++o[axis+1]; + } + } + } + } + // Now we can generate, for each element in the image, a list of adjacent elements + // This may appear different to previous code, given the use of the Voxel2Vector class + vector pos (header.ndim()); + vector neighbour (header.ndim()); + data.reserve (v2v.size()); + for (size_t i = 0; i != v2v.size(); ++i) { + pos = v2v[i]; + vector indices; + for (auto o : offsets) { + for (size_t axis = 0; axis != header.ndim(); ++axis) + neighbour[axis] = pos[axis] + o[axis]; + // Is this a valid neighbour position, i.e. within the mask? + // If so, the Voxel2vector class should provide us with a valid + // index of this neighbouring element + const index_t j = v2v (neighbour); + if (j != v2v.invalid) + indices.push_back (j); + } + data.push_back (indices); + } + DEBUG("Adjacency data for " + str(data.size()) + " voxels initialised"); + } + + + + void Connector::run (vector& clusters, + vector& labels) const + { + assert (adjacency.size()); + labels.resize (adjacency.size(), 0); + uint32_t current_label = 0; + for (uint32_t i = 0; i < labels.size(); i++) { + // This node has not been already clustered + if (!labels[i]) { + Cluster cluster (++current_label); + depth_first_search (i, cluster, labels); + clusters.push_back (cluster); + } + } + if (clusters.size() > std::numeric_limits::max()) + throw Exception ("The number of clusters is larger than can be labelled with an unsigned 32bit integer."); + } + + + + bool Connector::next_neighbour (uint32_t& node, vector& labels) const + { + for (auto n : adjacency[node]) { + if (!labels[n]) { + node = n; + return true; + } + } + return false; + } + + + + void Connector::depth_first_search (const uint32_t root, + Cluster& cluster, + vector& labels) const + { + uint32_t node = root; + std::stack stack; + while (true) { + labels[node] = cluster.label; + stack.push (node); + cluster.size++; + if (next_neighbour (node, labels)) { + continue; + } else { + do { + if (stack.top() == root) + return; + stack.pop(); + node = stack.top(); + } while (!next_neighbour (node, labels)); + } + } + } + + + + } +} diff --git a/core/filter/connected_components.h b/core/filter/connected_components.h index 482eb0f8c3..3526eb0c83 100644 --- a/core/filter/connected_components.h +++ b/core/filter/connected_components.h @@ -15,17 +15,18 @@ #ifndef __filter_connected_h__ #define __filter_connected_h__ -#include "memory.h" + #include "image.h" +#include "memory.h" #include "types.h" -#include "adapter/replicate.h" -#include "algo/loop.h" #include "filter/base.h" +#include "misc/voxel2vector.h" #include #include + namespace MR { namespace Filter @@ -33,341 +34,169 @@ namespace MR - // A class to achieve a mapping from a voxel position in an image - // with any number of axes, to an index in a 1D vector of data. - class Voxel2Vector - { MEMALIGN(Voxel2Vector) - public: - - typedef uint32_t index_t; - - static const index_t invalid = std::numeric_limits::max(); - - Voxel2Vector (Image& mask, const Header& data); - - size_t size() const { return reverse.size(); } - - const vector& operator[] (const size_t index) const { - assert (index < reverse.size()); - return reverse[index]; - } - - template - index_t operator() (const PosType& pos) const { - Image temp (forward); // For thread-safety - assign_pos_of (pos).to (temp); - if (is_out_of_bounds (temp)) - return invalid; - return temp.value(); - } - - private: - Image forward; - vector< vector > reverse; - }; - - - - // A class that pre-computes and stores, for each voxel, a - // list of voxels (represented as indices) that are adjacent - // - // If we were to re-implement dixel-wise connectivity, it would - // be done using a plugin to this class to define the volumes - // on the fourth axis that correspond to neighbouring directions - class Adjacency - { - public: - typedef Voxel2Vector::index_t index_t; - - Adjacency() : - use_26_neighbours (false), - enabled_axes (3, true) { } - - void toggle_axis (const size_t axis, const bool value) { - if (axis > enabled_axes.size()) - enabled_axes.resize (axis+1, false); - enabled_axes[axis] = value; - } - - void initialise (const Header&, const Voxel2Vector&); - - const vector& operator[] (const size_t index) { - assert (index < data.size()); - return data[index]; - } - - private: - bool use_26_neighbours; - vector enabled_axes; - vector> data; - }; - - - - - - // TODO Re-define Connector class - // Maybe for now try to duplicate using old interface, get it compiled - // making use of new grunt code, then deal wtih interface changes later? - // Remember: Main purpose of rework is to expose Voxel2Vector to the - // GLM stats code... - // Maybe require that Voxel2Vector and Adjacency be created explicitly - // in order to construct Connector, but allow the ConnectedComponents - // filter to construct and store these internally itself (to hide) - // - // Note: When the filter is used, the adjacency is pre-computed - // based on the input image; therefore can't actually initialise - // it once and use it for multiple input images - // - // Two use cases: - // - mrclusterstats: Generate Voxel2Vector based on analysis mask, - // need to be able to pass shared_ptr to SubjectDataLoad classes - // - mrfilter: New Voxel2Vector is generated for each input image, since - // it needs to reflect the contents of the input mask image - - class Connector2 + class Connector { NOMEMALIGN - public: - Connector2 (const Voxel2Vector& v2v) : - v2v (v2v) { } - - - - - private: - const Voxel2Vector& v2v; - }; - - - - + public: + // A class that pre-computes and stores, for each voxel, a + // list of voxels (represented as indices) that are adjacent + // + // If we were to re-implement dixel-wise connectivity, it would + // be done using an alternative initialise() function for this + // class, to define the volumes on the fourth axis that + // correspond to neighbouring directions using a Directions::Set. + class Adjacency + { + public: + typedef Voxel2Vector::index_t index_t; + + Adjacency() : + use_26_neighbours (false), + enabled_axes (3, true) { } + + void toggle_axis (const size_t axis, const bool value) { + if (axis > enabled_axes.size()) + enabled_axes.resize (axis+1, false); + enabled_axes[axis] = value; + data.clear(); + } + void initialise (const Header&, const Voxel2Vector&); + const vector& operator[] (const size_t index) const { + assert (size()); + assert (index < size()); + return data[index]; + } + void set_26_adjacency (const bool i) { + use_26_neighbours = i; + data.clear(); + } + size_t size() const { return data.size(); } + private: + bool use_26_neighbours; + vector enabled_axes; + vector> data; + } adjacency; - class cluster - { NOMEMALIGN - public: - uint32_t label; - uint32_t size; - bool operator< (const cluster& j) const { - return size < j.size; + class Cluster + { NOMEMALIGN + public: + Cluster (const uint32_t l) : + label (l), + size (0) { } + uint32_t label; + uint32_t size; + bool operator< (const Cluster& j) const { + return size < j.size; + } + }; + // Used for sorting clusters in order of size + static bool largest (const Cluster& i, const Cluster& j) { + return (i.size > j.size); } - }; - // Used for sorting clusters in order of size - inline bool largest (const cluster& i, const cluster& j) { - return (i.size > j.size); - } - class Connector { NOMEMALIGN - - public: - Connector (bool do_26_connectivity) : - do_26_connectivity (do_26_connectivity), - dim_to_ignore (4, false) - { - dim_to_ignore[3] = true; - } + Connector () { } // Perform connected components on the mask. - void run (vector& clusters, - vector& labels) const { - labels.resize (adjacent_indices.size(), 0); - uint32_t current_label = 1; - for (uint32_t i = 0; i < labels.size(); i++) { - // this node has not been already clustered - if (labels[i] == 0) { - cluster cluster; - cluster.label = current_label; - cluster.size = 0; - depth_first_search (i, cluster, labels); - clusters.push_back (cluster); - current_label++; - } - } - if (clusters.size() > std::numeric_limits::max()) - throw Exception ("The number of clusters is larger than can be labelled with an unsigned 32bit integer."); - } - - - // Perform connected components on data with the defined threshold. Assumes adjacency is the same as the mask. + void run (vector&, vector&) const; template - void run (vector& clusters, - vector& labels, - const VectorType& data, - const float threshold) const { - labels.resize (adjacent_indices.size(), 0); - uint32_t current_label = 1; - for (uint32_t i = 0; i < labels.size(); i++) { - // this node has not been already clustered and is above threshold - if (labels[i] == 0 && data[i] > threshold) { - cluster cluster; - cluster.label = current_label; - cluster.size = 0; - depth_first_search (i, cluster, labels, data, threshold); - clusters.push_back (cluster); - current_label++; - } - } - if (clusters.size() > std::numeric_limits::max()) - throw Exception ("The number of clusters is larger than can be labelled with an unsigned 32bit integer."); - } + void run (vector&, vector&, + const VectorType&, const float) const; - void set_dim_to_ignore (vector& ignore_dim) { - for (size_t d = 0; d < ignore_dim.size(); ++d) { - dim_to_ignore[d] = ignore_dim[d]; - } - } - + private: - template - void precompute_adjacency (MaskImageType& mask) { + bool next_neighbour (uint32_t&, vector&) const; + template + bool next_neighbour (uint32_t&, vector&, + const VectorType&, const float) const; - auto index_image = Image::scratch (mask); + void depth_first_search (const uint32_t, Cluster&, vector&) const; + template + void depth_first_search (const uint32_t, Cluster&, vector&, + const VectorType&, const float) const; - // 1st pass, store mask image indices and their index in the array - for (auto l = Loop (mask) (mask, index_image); l; ++l) { - if (mask.value() >= 0.5) { - // For each voxel, store the index within mask_indices for 2nd pass - index_image.value() = mask_indices.size(); - vector index (mask.ndim()); - for (size_t dim = 0; dim < mask.ndim(); dim++) - index[dim] = mask.index(dim); - mask_indices.push_back (index); - } else { - index_image.value() = 0; - } - } - // Here we pre-compute the offsets for our neighbours in 4D space - vector< vector > neighbour_offsets; - vector offset (4); - for (offset[0] = -1; offset[0] <= 1; offset[0]++) { - for (offset[1] = -1; offset[1] <= 1; offset[1]++) { - for (offset[2] = -1; offset[2] <= 1; offset[2]++) { - for (offset[3] = -1; offset[3] <= 1; offset[3]++) { - if (!do_26_connectivity && ((abs(offset[0]) + abs(offset[1]) + abs(offset[2]) + abs(offset[3])) > 1)) - continue; - if ((abs(offset[0]) && dim_to_ignore[0]) || (abs(offset[1]) && dim_to_ignore[1]) || - (abs(offset[2]) && dim_to_ignore[2]) || (abs(offset[3]) && dim_to_ignore[3])) - continue; - neighbour_offsets.push_back (offset); - } - } - } - } - // 2nd pass, define adjacency - MaskImageType mask_neigh (mask); - for (vector >::const_iterator it = mask_indices.begin(); it != mask_indices.end(); ++it) { - vector neighbour_indices; - for (vector< vector >::const_iterator offset = neighbour_offsets.begin(); offset != neighbour_offsets.end(); ++offset) { - for (size_t dim = 0; dim < mask.ndim(); dim++) - mask_neigh.index(dim) = (*it)[dim] + (*offset)[dim]; - if (!is_out_of_bounds (mask_neigh)) { - if (mask_neigh.value() >= 0.5) { - assign_pos_of (mask_neigh).to (index_image); - neighbour_indices.push_back (index_image.value()); - } - } - } - adjacent_indices.push_back (neighbour_indices); - } - } + }; - bool next_neighbour (uint32_t& node, vector& labels) const { - for (size_t n = 0; n < adjacent_indices[node].size(); n++) { - if (labels[adjacent_indices[node][n]] == 0) { - node = adjacent_indices[node][n]; - return true; - } - } - return false; - } - template - bool next_neighbour (uint32_t& node, - vector& labels, - const VectorType& data, - const float threshold) const { - for (size_t n = 0; n < adjacent_indices[node].size(); n++) { - if (labels[adjacent_indices[node][n]] == 0 && data[adjacent_indices[node][n]] > threshold) { - node = adjacent_indices[node][n]; - return true; - } - } - return false; + template + void Connector::run (vector& clusters, + vector& labels, + const VectorType& data, + const float threshold) const + { + assert (adjacency.size()); + labels.resize (adjacency.size(), 0); + uint32_t current_label = 0; + for (uint32_t i = 0; i < labels.size(); i++) { + // This node has not been already clustered and is above threshold + if (!labels[i] && data[i] > threshold) { + Cluster cluster (++current_label); + depth_first_search (i, cluster, labels, data, threshold); + clusters.push_back (cluster); } + } + if (clusters.size() > std::numeric_limits::max()) + throw Exception ("The number of clusters is larger than can be labelled with an unsigned 32bit integer."); + } - // use a non-recursive depth first search to agglomerate adjacent voxels - void depth_first_search (uint32_t root, - cluster& cluster, - vector& labels) const { - uint32_t node = root; - std::stack stack; - while (true) { - labels[node] = cluster.label; - stack.push (node); - cluster.size++; - if (next_neighbour (node, labels)) { - continue; - } else { - do { - if (stack.top() == root) - return; - stack.pop(); - node = stack.top(); - } while (!next_neighbour (node, labels)); - } - } - } - // use a non-recursive depth first search to agglomerate adjacent voxels - template - void depth_first_search (uint32_t root, - cluster& cluster, - vector& labels, - const VectorType& data, - const float threshold) const { - uint32_t node = root; - std::stack stack; - while (true) { - labels[node] = cluster.label; - stack.push (node); - cluster.size++; - if (next_neighbour (node, labels, data, threshold)) { - continue; - } else { - do { - if (stack.top() == root) - return; - stack.pop(); - node = stack.top(); - } while (!next_neighbour (node, labels, data, threshold)); - } - } + template + bool Connector::next_neighbour (uint32_t& node, + vector& labels, + const VectorType& data, + const float threshold) const + { + for (auto n : adjacency[node]) { + if (!labels[n] && data[n] > threshold) { + node = n; + return true; } + } + return false; + } - const vector>& get_mask_indices() const { return mask_indices; } + template + void Connector::depth_first_search (const uint32_t root, + Cluster& cluster, + vector& labels, + const VectorType& data, + const float threshold) const + { + uint32_t node = root; + std::stack stack; + while (true) { + labels[node] = cluster.label; + stack.push (node); + cluster.size++; + if (next_neighbour (node, labels, data, threshold)) { + continue; + } else { + do { + if (stack.top() == root) + return; + stack.pop(); + node = stack.top(); + } while (!next_neighbour (node, labels, data, threshold)); + } + } + } - bool do_26_connectivity; - vector dim_to_ignore; - vector > mask_indices; - vector > adjacent_indices; - }; @@ -390,93 +219,93 @@ namespace MR class ConnectedComponents : public Base { MEMALIGN(ConnectedComponents) public: - template - ConnectedComponents (const HeaderType& in) : - Base (in), - largest_only (false), - do_26_connectivity (false) - { - if (this->ndim() > 4) - throw Exception ("Cannot run connected components analysis with more than 4 dimensions"); - datatype_ = DataType::UInt32; - dim_to_ignore.resize (this->ndim(), false); - if (this->ndim() == 4) // Ignore 4D unless explicitly instructed to - dim_to_ignore[3] = true; - } - - template - ConnectedComponents (const HeaderType& in, const std::string& message) : - ConnectedComponents (in) - { - set_message (message); - } - - - template - void operator() (InputVoxelType& in, OutputVoxelType& out) - { - Connector connector (do_26_connectivity); - - if (dim_to_ignore.size()) - connector.set_dim_to_ignore (dim_to_ignore); - - connector.precompute_adjacency (in); + template + ConnectedComponents (const HeaderType& in) : + Base (in), + largest_only (false), + do_26_connectivity (false) + { + if (this->ndim() > 4) + throw Exception ("Cannot run connected components analysis with more than 4 dimensions"); + datatype_ = DataType::UInt32; + dim_to_ignore.resize (this->ndim(), false); + if (this->ndim() == 4) // Ignore 4D unless explicitly instructed to + dim_to_ignore[3] = true; + } - std::unique_ptr progress; - if (message.size()) { - progress.reset (new ProgressBar (message)); - ++(*progress); + template + ConnectedComponents (const HeaderType& in, const std::string& message) : + ConnectedComponents (in) + { + set_message (message); } - vector clusters; - vector labels; - connector.run (clusters, labels); - if (progress) - ++(*progress); - std::sort (clusters.begin(), clusters.end(), largest); - if (progress) - ++(*progress); + template + void operator() (InputVoxelType& in, OutputVoxelType& out) + { + Voxel2Vector v2v (in, *this); + + Connector connector; + for (size_t axis = 0; axis != dim_to_ignore.size(); ++axis) + connector.adjacency.toggle_axis (axis, !dim_to_ignore[axis]); + connector.adjacency.set_26_adjacency (do_26_connectivity); + connector.adjacency.initialise (in, v2v); + + std::unique_ptr progress; + if (message.size()) { + progress.reset (new ProgressBar (message)); + ++(*progress); + } - vector label_lookup (clusters.size(), 0); - for (uint32_t c = 0; c < clusters.size(); c++) - label_lookup[clusters[c].label - 1] = c + 1; + vector clusters; + vector labels; + connector.run (clusters, labels); + if (progress) ++(*progress); + + // Sort clusters in order from largest to smallest + std::sort (clusters.begin(), clusters.end(), Connector::largest); + if (progress) ++(*progress); + + // Generate a lookup table to map input cluster index to + // output cluster index following cluster-size sorting + vector index_lookup (clusters.size() + 1, 0); + for (uint32_t c = 0; c < clusters.size(); c++) + index_lookup[clusters[c].label] = c + 1; + + for (auto l = Loop (out) (out); l; ++l) + out.value() = 0; + + for (uint32_t i = 0; i < v2v.size(); i++) { + assign_pos_of (v2v[i]).to (out); + if (largest_only) { + if (index_lookup[labels[i]] == 1) + out.value() = 1; + } else { + out.value() = index_lookup[labels[i]]; + } + } + } - for (auto l = Loop (out) (out); l; ++l) - out.value() = 0; - const vector >& mask_indices = connector.get_mask_indices(); - for (uint32_t i = 0; i < mask_indices.size(); i++) + void set_ignore_dim (size_t dim, bool ignore) { - assign_pos_of (mask_indices[i]).to (out); - if (largest_only) { - if (label_lookup[labels[i] - 1] == 1) - out.value() = 1; - } else { - out.value() = label_lookup[labels[i] - 1]; - } + assert (dim < this->ndim()); + dim_to_ignore[dim] = ignore; } - } - void set_ignore_dim (size_t dim, bool ignore) - { - assert (dim < this->ndim()); - dim_to_ignore[dim] = ignore; - } - - - void set_largest_only (bool value) - { - largest_only = value; - } + void set_largest_only (bool value) + { + largest_only = value; + } - void set_26_connectivity (bool value) - { - do_26_connectivity = value; - } + void set_26_connectivity (bool value) + { + do_26_connectivity = value; + } protected: diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 48a3f028e0..0e95f21dc9 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -14,8 +14,8 @@ #include "math/stats/glm.h" -#include "bitset.h" #include "debug.h" +#include "misc/bitset.h" #define GLM_BATCH_SIZE 1024 diff --git a/core/bitset.cpp b/core/misc/bitset.cpp similarity index 99% rename from core/bitset.cpp rename to core/misc/bitset.cpp index 7c1ddf122d..767fe86fb5 100644 --- a/core/bitset.cpp +++ b/core/misc/bitset.cpp @@ -12,7 +12,7 @@ */ -#include "bitset.h" +#include "misc/bitset.h" diff --git a/core/bitset.h b/core/misc/bitset.h similarity index 99% rename from core/bitset.h rename to core/misc/bitset.h index 8be4b0c145..47a408ba69 100644 --- a/core/bitset.h +++ b/core/misc/bitset.h @@ -12,8 +12,8 @@ */ -#ifndef __bitset_h__ -#define __bitset_h__ +#ifndef __misc_bitset_h__ +#define __misc_bitset_h__ #include diff --git a/core/misc/voxel2vector.h b/core/misc/voxel2vector.h new file mode 100644 index 0000000000..4710a0ac7c --- /dev/null +++ b/core/misc/voxel2vector.h @@ -0,0 +1,105 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#ifndef __misc_voxel2vector_h__ +#define __misc_voxel2vector_h__ + +#include + +#include "exception.h" +#include "header.h" +#include "image.h" +#include "algo/loop.h" +#include "types.h" + +#include "adapter/replicate.h" + + +namespace MR +{ + + + + // A class to achieve a mapping from a voxel position in an image + // with any number of axes, to an index within a 1D vector of data. + class Voxel2Vector + { MEMALIGN(Voxel2Vector) + public: + + typedef uint32_t index_t; + + static const index_t invalid = std::numeric_limits::max(); + + template + Voxel2Vector (MaskType& mask, const Header& data); + + template + Voxel2Vector (MaskType& mask) : + Voxel2Vector (mask, Header(mask)) { } + + size_t size() const { return reverse.size(); } + + const vector& operator[] (const size_t index) const { + assert (index < reverse.size()); + return reverse[index]; + } + + template + index_t operator() (const PosType& pos) const { + Image temp (forward); // For thread-safety + assign_pos_of (pos).to (temp); + if (is_out_of_bounds (temp)) + return invalid; + return temp.value(); + } + + private: + Image forward; + vector< vector > reverse; + }; + + + + template + Voxel2Vector::Voxel2Vector (MaskType& mask, const Header& data) : + forward (Image::scratch (data, "Voxel to vector index conversion scratch image")) + { + if (!dimensions_match (mask, data, 0, std::min (mask.ndim(), data.ndim()))) + throw Exception ("Dimension mismatch between image data and processing mask"); + // E.g. Mask may be 3D but data are 4D; for any voxel where the mask is + // true, want to include data from all volumes + Adapter::Replicate> r_mask (mask, data); + // Loop in axis order so that those voxels contiguous in memory are still + // contiguous in the vectorised data + index_t counter = 0; + for (auto l = Loop(data) (r_mask, forward); l; ++l) { + if (r_mask.value()) { + forward.value() = counter++; + vector pos; + for (size_t index = 0; index != data.ndim(); ++index) + pos.push_back (forward.index(index)); + reverse.push_back (pos); + } else { + forward.value() = invalid; + } + } + DEBUG ("Voxel2Vector class has " + str(reverse.size()) + " non-zero entries"); + } + + + +} + + +#endif diff --git a/src/connectome/enhance.cpp b/src/connectome/enhance.cpp index 741990d148..31c89b1830 100644 --- a/src/connectome/enhance.cpp +++ b/src/connectome/enhance.cpp @@ -19,6 +19,8 @@ #include "progressbar.h" +#include "misc/bitset.h" + namespace MR { namespace Connectome { diff --git a/src/connectome/enhance.h b/src/connectome/enhance.h index 67e1756086..5be4433a14 100644 --- a/src/connectome/enhance.h +++ b/src/connectome/enhance.h @@ -19,8 +19,6 @@ #include #include -#include "bitset.h" - #include "connectome/mat2vec.h" #include "stats/enhance.h" #include "stats/tfce.h" diff --git a/src/dwi/directions/mask.h b/src/dwi/directions/mask.h index 774759cf88..9e412b6ae0 100644 --- a/src/dwi/directions/mask.h +++ b/src/dwi/directions/mask.h @@ -18,7 +18,8 @@ #include -#include "bitset.h" +#include "misc/bitset.h" + #include "dwi/directions/set.h" diff --git a/src/dwi/directions/set.cpp b/src/dwi/directions/set.cpp index 38acc63027..e04a7ff4b4 100644 --- a/src/dwi/directions/set.cpp +++ b/src/dwi/directions/set.cpp @@ -18,7 +18,7 @@ #include #include -#include "bitset.h" +#include "misc/bitset.h" #include "math/rng.h" diff --git a/src/dwi/shells.h b/src/dwi/shells.h index e8fff844d5..cd0499e213 100644 --- a/src/dwi/shells.h +++ b/src/dwi/shells.h @@ -21,9 +21,8 @@ #include #include "app.h" -#include "bitset.h" - #include "file/config.h" +#include "misc/bitset.h" // Don't expect these values to change depending on the particular command that is initialising the Shells class; diff --git a/src/dwi/tractography/SIFT2/coeff_optimiser.h b/src/dwi/tractography/SIFT2/coeff_optimiser.h index 7ef0c0da4e..b843dd22a3 100644 --- a/src/dwi/tractography/SIFT2/coeff_optimiser.h +++ b/src/dwi/tractography/SIFT2/coeff_optimiser.h @@ -18,10 +18,9 @@ #include -#include "bitset.h" - #include "math/golden_section_search.h" #include "math/quadratic_line_search.h" +#include "misc/bitset.h" #include "dwi/tractography/SIFT/track_index_range.h" #include "dwi/tractography/SIFT/types.h" diff --git a/src/dwi/tractography/SIFT2/tckfactor.cpp b/src/dwi/tractography/SIFT2/tckfactor.cpp index 8daad778c2..7748093f6b 100644 --- a/src/dwi/tractography/SIFT2/tckfactor.cpp +++ b/src/dwi/tractography/SIFT2/tckfactor.cpp @@ -12,11 +12,11 @@ */ -#include "bitset.h" #include "header.h" #include "image.h" #include "math/math.h" +#include "misc/bitset.h" #include "fixel/legacy/fixel_metric.h" #include "fixel/legacy/image.h" diff --git a/src/dwi/tractography/connectome/extract.cpp b/src/dwi/tractography/connectome/extract.cpp index 5f40a84270..75843b45e6 100644 --- a/src/dwi/tractography/connectome/extract.cpp +++ b/src/dwi/tractography/connectome/extract.cpp @@ -14,7 +14,7 @@ #include "dwi/tractography/connectome/extract.h" -#include "bitset.h" +#include "misc/bitset.h" namespace MR { diff --git a/src/dwi/tractography/connectome/matrix.cpp b/src/dwi/tractography/connectome/matrix.cpp index 817807a213..125afcc2ac 100644 --- a/src/dwi/tractography/connectome/matrix.cpp +++ b/src/dwi/tractography/connectome/matrix.cpp @@ -14,7 +14,7 @@ #include "dwi/tractography/connectome/matrix.h" -#include "bitset.h" +#include "misc/bitset.h" namespace MR { diff --git a/src/gui/mrview/tool/connectome/connectome.h b/src/gui/mrview/tool/connectome/connectome.h index 3daf8d8334..18dc9fb112 100644 --- a/src/gui/mrview/tool/connectome/connectome.h +++ b/src/gui/mrview/tool/connectome/connectome.h @@ -18,9 +18,11 @@ #include #include -#include "bitset.h" #include "image.h" +#include "misc/bitset.h" +#include "surface/mesh.h" + #include "gui/opengl/gl.h" #include "gui/opengl/lighting.h" #include "gui/opengl/shader.h" @@ -36,8 +38,6 @@ #include "gui/color_button.h" #include "gui/projection.h" -#include "surface/mesh.h" - #include "connectome/mat2vec.h" #include "connectome/lut.h" diff --git a/src/stats/cluster.cpp b/src/stats/cluster.cpp index ecdca0629c..5104a99076 100644 --- a/src/stats/cluster.cpp +++ b/src/stats/cluster.cpp @@ -27,7 +27,7 @@ namespace MR value_type ClusterSize::operator() (const vector_type& stats, const value_type T, vector_type& get_cluster_sizes) const { - vector clusters; + vector clusters; vector labels (stats.size(), 0); connector.run (clusters, labels, stats, T); get_cluster_sizes.resize (stats.size()); From c2e3a6f7db444d764f916b45633b7b92383b0916 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 23 Jun 2017 16:45:23 +1000 Subject: [PATCH 0053/1471] mrclusterstats: -column option Functionality for voxel-wise explanatory variables, just as has already been implemented for fixelcfestats and connectomestats. --- cmd/maskfilter.cpp | 13 +- cmd/mrclusterstats.cpp | 380 ++++++++++++++++++++--------- core/filter/connected_components.h | 48 ++-- 3 files changed, 303 insertions(+), 138 deletions(-) diff --git a/cmd/maskfilter.cpp b/cmd/maskfilter.cpp index af47ffc5b1..7b0406a92e 100644 --- a/cmd/maskfilter.cpp +++ b/cmd/maskfilter.cpp @@ -104,7 +104,7 @@ void run () { auto input_image = Image::open (argument[0]); int filter_index = argument[1]; - + if (filter_index == 0) { // Mask clean Filter::MaskClean filter (input_image, std::string("applying mask cleaning filter to image ") + Path::basename (argument[0])); filter.set_scale(get_option_value ("scale", DEFAULT_CLEAN_SCALE)); @@ -120,16 +120,9 @@ void run () { if (filter_index == 1) { // Connected components Filter::ConnectedComponents filter (input_image, std::string("applying connected-component filter to image ") + Path::basename (argument[0])); auto opt = get_options ("axes"); - vector axes; if (opt.size()) { - axes = opt[0][0]; - for (size_t d = 0; d < input_image.ndim(); d++) - filter.set_ignore_dim (d, true); - for (size_t i = 0; i < axes.size(); i++) { - if (axes[i] >= static_cast (input_image.ndim()) || axes[i] < 0) - throw Exception ("axis supplied to option -ignore is out of bounds"); - filter.set_ignore_dim (axes[i], false); - } + const vector axes = opt[0][0]; + filter.set_axes (axes); } bool largest_only = false; opt = get_options ("largest"); diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index a866a5ba54..bf037ace43 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -84,35 +84,26 @@ void usage () "This disables TFCE, which is the default otherwise.") + Argument ("value").type_float (1.0e-6) + + Option ("column", "add a column to the design matrix corresponding to subject voxel-wise values " + "(the contrast vector length must include columns for these additions)").allow_multiple() + + Argument ("path").type_file_in() + + Option ("connectivity", "use 26-voxel-neighbourhood connectivity (Default: 6)"); } -// TODO Want to make the connected-components filter & surrounding operations -// a little cleaner; in particular, for supporting voxel-wise regressors, -// need the mechanism for going from image data to vectorised data to be -// accessible by a derivative of SubjectDataImportBase -// -// This will include: -// * A class for transforming image positions into indices for vectorised data -// (requires only an input mask image) -// (If input data has higher dimensionality than mask image, need to -// effectively replicate mask across extra axes) -// * A class for determining the connectivity between elements -// (requires access to the first class, axis list, degree of voxel connectivity, -// also ideally capacity to define custom connectivity for a particular axis -// in order to support dixel data) - - +typedef Stats::TFCE::value_type value_type; -template +template void write_output (const VectorType& data, const Voxel2Vector& v2v, - ImageType& image) { + const std::string& path, + const Header& header) { + auto image = Image::create (path, header); for (size_t i = 0; i != v2v.size(); i++) { assign_pos_of (v2v[i]).to (image); image.value() = data[i]; @@ -121,7 +112,65 @@ void write_output (const VectorType& data, -typedef Stats::TFCE::value_type value_type; +// Define data importer class that willl obtain voxel data for a +// specific subject based on the string path to the image file for +// that subject +// +// The challenge with this mechanism for voxel data is that the +// class must know how to map data from voxels in 3D space into +// a 1D vector of data. This mapping must be done based on the +// analysis mask prior to the importing of any subject data. +// Moreover, in the case of voxel-wise design matrix columns, the +// class must have access to this mapping functionality without +// any modification of the class constructor (since these data +// are initialised in the CohortDataImport class). +// +class SubjectVoxelImport : public SubjectDataImportBase +{ MEMALIGN(SubjectVoxelImport) + public: + SubjectVoxelImport (const std::string& path) : + SubjectDataImportBase (path), + H (Header::open (path)), + data (H.get_image()) { } + + void operator() (matrix_type::ColXpr column) const override + { + assert (v2v); + assert (column.rows() == size()); + Image temp (data); // For thread-safety + for (size_t i = 0; i != size(); ++i) { + assign_pos_of ((*v2v)[i]).to (temp); + column[i] = temp.value(); + } + } + + default_type operator[] (const size_t index) const override + { + assert (v2v); + assert (index < size()); + Image temp (data); // For thread-safety + assign_pos_of ((*v2v)[index]).to (temp); + return temp.value(); + } + + size_t size() const override { assert (v2v); return v2v->size(); } + + const Header& header() const { return H; } + + void set_mapping (std::shared_ptr& ptr) { + v2v = ptr; + } + + private: + Header H; + const Image data; + + static std::shared_ptr v2v; + +}; +std::shared_ptr SubjectVoxelImport::v2v = nullptr; + + @@ -138,23 +187,61 @@ void run() { const bool do_26_connectivity = get_options("connectivity").size(); const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); - // Read filenames - vector subjects; - { - std::string folder = Path::dirname (argument[0]); - std::ifstream ifs (argument[0].c_str()); - std::string temp; - while (getline (ifs, temp)) - subjects.push_back (Path::join (folder, temp)); + // Load analysis mask and compute adjacency + auto mask_header = Header::open (argument[3]); + auto mask_image = mask_header.get_image(); + Voxel2Vector v2v (mask_image, mask_header); + Filter::Connector connector; + connector.adjacency.set_26_adjacency (do_26_connectivity); + connector.adjacency.initialise (mask_header, v2v); + const size_t num_voxels = v2v.size(); + + // Read file names and check files exist + CohortDataImport importer; + importer.initialise (argument[0]); + for (size_t i = 0; i != importer.size(); ++i) { + if (!dimensions_match (dynamic_cast(importer[i].get())->header(), mask_header)) + throw Exception ("Image file \"" + importer[i]->name() + "\" does not match analysis mask"); } + CONSOLE ("Number of subjects: " + str(importer.size())); // Load design matrix const matrix_type design = load_matrix (argument[1]); - if (design.rows() != (ssize_t)subjects.size()) + if (design.rows() != (ssize_t)importer.size()) throw Exception ("number of input files does not match number of rows in design matrix"); + // Load contrast matrix + const matrix_type contrast = load_matrix (argument[2]); + if (contrast.cols() != design.cols()) + throw Exception ("the number of contrasts does not equal the number of columns in the design matrix"); + + // Before validating the contrast matrix, we first need to see if there are any + // additional design matrix columns coming from voxel-wise subject data + // TODO Functionalise this + vector extra_columns; + bool nans_in_columns = false; + auto opt = get_options ("column"); + for (size_t i = 0; i != opt.size(); ++i) { + extra_columns.push_back (CohortDataImport()); + extra_columns[i].initialise (opt[i][0]); + if (!extra_columns[i].allFinite()) + nans_in_columns = true; + } + if (extra_columns.size()) { + CONSOLE ("number of element-wise design matrix columns: " + str(extra_columns.size())); + if (nans_in_columns) + INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from voxel-wise design matrices accordingly"); + } + + if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) + throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" + + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); + if (contrast.rows() > 1) + throw Exception ("only a single contrast vector (defined as a row) is currently supported"); + // Load permutations file if supplied - auto opt = get_options("permutations"); + opt = get_options("permutations"); vector > permutations; if (opt.size()) { permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); @@ -173,38 +260,24 @@ void run() { throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); } - // Load contrast matrix - const matrix_type contrast = load_matrix (argument[2]); - if (contrast.cols() != design.cols()) - throw Exception ("the number of contrasts does not equal the number of columns in the design matrix"); - - auto mask_header = Header::open (argument[3]); - // Load Mask and compute adjacency - auto mask_image = mask_header.get_image(); - Voxel2Vector v2v (mask_image, mask_header); - Filter::Connector connector; - connector.adjacency.set_26_adjacency (do_26_connectivity); - connector.adjacency.initialise (mask_header, v2v); - const size_t num_vox = v2v.size(); - - matrix_type data (num_vox, subjects.size()); - + matrix_type data (num_voxels, importer.size()); + bool nans_in_data = false; { // Load images - ProgressBar progress("loading images", subjects.size()); - for (size_t subject = 0; subject < subjects.size(); subject++) { - LogLevelLatch log_level (0); - auto input_image = Image::open (subjects[subject]); //.with_direct_io (3); <- Should be inputting 3D images? - check_dimensions (input_image, mask_image, 0, 3); - for (size_t voxel_index = 0; voxel_index != num_vox; ++voxel_index) { - assign_pos_of (v2v[voxel_index]).to (input_image); - data (voxel_index, subject) = input_image.value(); - } + ProgressBar progress ("loading input images", importer.size()); + for (size_t subject = 0; subject < importer.size(); subject++) { + (*importer[subject]) (data.col (subject)); + if (!data.col (subject).allFinite()) + nans_in_data = true; progress++; } } - if (!data.allFinite()) - WARN ("input data contains non-finite value(s)"); + if (nans_in_data) { + INFO ("Non-finite values present in data; rows will be removed from voxel-wise design matrices accordingly"); + if (!extra_columns.size()) { + INFO ("(Note that this will result in slower execution than if such values were not present)"); + } + } Header output_header (mask_header); output_header.datatype() = DataType::Float32; @@ -222,14 +295,126 @@ void run() { const std::string prefix (argument[4]); bool compute_negative_contrast = get_options("negative").size(); - vector_type default_cluster_output (num_vox); + vector_type default_cluster_output (num_voxels); std::shared_ptr default_cluster_output_neg; - vector_type tvalue_output (num_vox); + vector_type tvalue_output (num_voxels); vector_type empirical_enhanced_statistic; if (compute_negative_contrast) - default_cluster_output_neg.reset (new vector_type (num_vox)); + default_cluster_output_neg.reset (new vector_type (num_voxels)); - std::shared_ptr glm (new Math::Stats::GLMTTestFixed (data, design, contrast)); + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast, nans_in_data, nans_in_columns)); + } else { + glm_test.reset (new GLMTTestFixed (data, design, contrast)); + } + + if (extra_columns.size()) { + + // For each variable of interest (e.g. beta coefficients, effect size etc.) need to: + // Construct the output data vector, with size = num_voxels + // For each voxel: + // Use glm_test to obtain the design matrix for the default permutation for that voxel + // Use the relevant Math::Stats::GLM function to get the value of interest for just that voxel + // (will still however need to come out as a matrix_type) + // Write that value to data vector + // Finally, use write_output() function to write to an image file + matrix_type betas (contrast.cols(), num_voxels); + vector_type abs_effect_size (num_voxels), std_effect_size (num_voxels), stdev (num_voxels); + { + class Source + { NOMEMALIGN + public: + Source (const size_t num_voxels) : + num_voxels (num_voxels), + counter (0), + progress (new ProgressBar ("estimating beta coefficients, effect size and standard deviation", num_voxels)) { } + bool operator() (size_t& voxel_index) + { + voxel_index = counter++; + if (counter >= num_voxels) { + progress.reset(); + return false; + } + assert (progress); + ++(*progress); + return true; + } + private: + const size_t num_voxels; + size_t counter; + std::unique_ptr progress; + }; + + class Functor + { MEMALIGN(Functor) + public: + Functor (const matrix_type& data, std::shared_ptr glm_test, const matrix_type& contrasts, + matrix_type& betas, vector_type& abs_effect_size, vector_type& std_effect_size, vector_type& stdev) : + data (data), + glm_test (glm_test), + contrasts (contrasts), + global_betas (betas), + global_abs_effect_size (abs_effect_size), + global_std_effect_size (std_effect_size), + global_stdev (stdev) { } + bool operator() (const size_t& voxel_index) + { + const matrix_type data_f = data.row (voxel_index); + const matrix_type design_f = dynamic_cast(glm_test.get())->default_design (voxel_index); + Math::Stats::GLM::all_stats (data_f, design_f, contrasts, + local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + global_betas.col (voxel_index) = local_betas; + global_abs_effect_size[voxel_index] = local_abs_effect_size(0,0); + global_std_effect_size[voxel_index] = local_std_effect_size(0,0); + global_stdev[voxel_index] = local_stdev(0,0); + return true; + } + + private: + const matrix_type& data; + const std::shared_ptr glm_test; + const matrix_type& contrasts; + matrix_type& global_betas; + vector_type& global_abs_effect_size; + vector_type& global_std_effect_size; + vector_type& global_stdev; + matrix_type local_betas, local_abs_effect_size, local_std_effect_size, local_stdev; + }; + + Source source (num_voxels); + Functor functor (data, glm_test, contrast, + betas, abs_effect_size, std_effect_size, stdev); + Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); + } + { + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 3); + for (ssize_t i = 0; i != contrast.cols(); ++i) { + write_output (betas.row(i), v2v, prefix + (use_tfce ? "tfce.mif" : "cluster_sizes.mif"), output_header); + ++progress; + } + write_output (abs_effect_size, v2v, prefix + "abs_effect.mif", output_header); ++progress; + write_output (std_effect_size, v2v, prefix + "std_effect.mif", output_header); ++progress; + write_output (stdev, v2v, prefix + "std_dev.mif", output_header); + } + + } else { + + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + 4); + matrix_type betas, abs_effect_size, std_effect_size, stdev; + Math::Stats::GLM::all_stats (data, design, contrast, + betas, abs_effect_size, std_effect_size, stdev); + ++progress; + for (ssize_t i = 0; i != contrast.cols(); ++i) { + write_output (betas.row(i), v2v, prefix + (use_tfce ? "tfce.mif" : "cluster_sizes.mif"), output_header); + ++progress; + } + write_output (abs_effect_size.row(0), v2v, prefix + "abs_effect.mif", output_header); ++progress; + write_output (std_effect_size.row(0), v2v, prefix + "std_effect.mif", output_header); ++progress; + write_output (stdev.row(0), v2v, prefix + "std_dev.mif", output_header); + + } std::shared_ptr enhancer; if (use_tfce) { @@ -244,80 +429,61 @@ void run() { throw Exception ("nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); if (permutations_nonstationary.size()) { Stats::PermTest::PermutationStack permutations (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment..."); - Stats::PermTest::precompute_empirical_stat (glm, enhancer, permutations, empirical_enhanced_statistic); + Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, permutations, empirical_enhanced_statistic); } else { Stats::PermTest::PermutationStack permutations (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment...", false); - Stats::PermTest::precompute_empirical_stat (glm, enhancer, permutations, empirical_enhanced_statistic); + Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, permutations, empirical_enhanced_statistic); } save_matrix (empirical_enhanced_statistic, prefix + "empirical.txt"); } - Stats::PermTest::precompute_default_permutation (glm, enhancer, empirical_enhanced_statistic, + Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_enhanced_statistic, default_cluster_output, default_cluster_output_neg, tvalue_output); { ProgressBar progress ("generating pre-permutation output", (compute_negative_contrast ? 3 : 2) + contrast.cols() + 3); - { - auto tvalue_image = Image::create (prefix + "tvalue.mif", output_header); - write_output (tvalue_output, v2v, tvalue_image); - } - ++progress; - { - auto cluster_image = Image::create (prefix + (use_tfce ? "tfce.mif" : "cluster_sizes.mif"), output_header); - write_output (default_cluster_output, v2v, cluster_image); - } - ++progress; + write_output (tvalue_output, v2v, prefix + "tvalue.mif", output_header); ++progress; + write_output (default_cluster_output, v2v, prefix + (use_tfce ? "tfce.mif" : "cluster_sizes.mif"), output_header); ++progress; if (compute_negative_contrast) { assert (default_cluster_output_neg); - auto cluster_image_neg = Image::create (prefix + (use_tfce ? "tfce_neg.mif" : "cluster_sizes_neg.mif"), output_header); - write_output (*default_cluster_output_neg, v2v, cluster_image_neg); + write_output (*default_cluster_output_neg, v2v, prefix + (use_tfce ? "tfce_neg.mif" : "cluster_sizes_neg.mif"), output_header); ++progress; } auto temp = Math::Stats::GLM::solve_betas (data, design); for (ssize_t i = 0; i < contrast.cols(); ++i) { - auto beta_image = Image::create (prefix + "beta" + str(i) + ".mif", output_header); - write_output (temp.row(i), v2v, beta_image); + write_output (temp.row(i), v2v, prefix + "beta" + str(i) + ".mif", output_header); ++progress; } - { - const auto temp = Math::Stats::GLM::abs_effect_size (data, design, contrast); - auto abs_effect_image = Image::create (prefix + "abs_effect.mif", output_header); - write_output (temp.row(0), v2v, abs_effect_image); - } + const auto abs_effect_size = Math::Stats::GLM::abs_effect_size (data, design, contrast); + write_output (abs_effect_size.row(0), v2v, prefix + "abs_effect.mif", output_header); ++progress; - { - const auto temp = Math::Stats::GLM::std_effect_size (data, design, contrast); - auto std_effect_image = Image::create (prefix + "std_effect.mif", output_header); - write_output (temp.row(0), v2v, std_effect_image); - } + const auto std_effect_size = Math::Stats::GLM::std_effect_size (data, design, contrast); + write_output (std_effect_size.row(0), v2v, prefix + "std_effect.mif", output_header); ++progress; - { - const auto temp = Math::Stats::GLM::stdev (data, design); - auto std_dev_image = Image::create (prefix + "std_dev.mif", output_header); - write_output (temp.row(0), v2v, std_dev_image); - } + const auto stdev = Math::Stats::GLM::stdev (data, design); + write_output (stdev.row(0), v2v, prefix + "std_dev.mif", output_header); } if (!get_options ("notest").size()) { vector_type perm_distribution (num_perms); std::shared_ptr perm_distribution_neg; - vector_type uncorrected_pvalue (num_vox); + vector_type uncorrected_pvalue (num_voxels); std::shared_ptr uncorrected_pvalue_neg; if (compute_negative_contrast) { perm_distribution_neg.reset (new vector_type (num_perms)); - uncorrected_pvalue_neg.reset (new vector_type (num_vox)); + uncorrected_pvalue_neg.reset (new vector_type (num_voxels)); } if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm, enhancer, empirical_enhanced_statistic, + Stats::PermTest::run_permutations (permutations, glm_test, enhancer, empirical_enhanced_statistic, default_cluster_output, default_cluster_output_neg, perm_distribution, perm_distribution_neg, uncorrected_pvalue, uncorrected_pvalue_neg); } else { - Stats::PermTest::run_permutations (num_perms, glm, enhancer, empirical_enhanced_statistic, + Stats::PermTest::run_permutations (num_perms, glm_test, enhancer, empirical_enhanced_statistic, default_cluster_output, default_cluster_output_neg, perm_distribution, perm_distribution_neg, uncorrected_pvalue, uncorrected_pvalue_neg); @@ -330,28 +496,20 @@ void run() { } ProgressBar progress ("generating output", compute_negative_contrast ? 4 : 2); - { - auto uncorrected_pvalue_image = Image::create (prefix + "uncorrected_pvalue.mif", output_header); - write_output (uncorrected_pvalue, v2v, uncorrected_pvalue_image); - } + write_output (uncorrected_pvalue, v2v, prefix + "uncorrected_pvalue.mif", output_header); ++progress; - { - vector_type fwe_pvalue_output (num_vox); - Math::Stats::Permutation::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); - auto fwe_pvalue_image = Image::create (prefix + "fwe_pvalue.mif", output_header); - write_output (fwe_pvalue_output, v2v, fwe_pvalue_image); - } + vector_type fwe_pvalue_output (num_voxels); + Math::Stats::Permutation::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); + write_output (fwe_pvalue_output, v2v, prefix + "fwe_pvalue.mif", output_header); ++progress; if (compute_negative_contrast) { assert (uncorrected_pvalue_neg); assert (perm_distribution_neg); - auto uncorrected_pvalue_image_neg = Image::create (prefix + "uncorrected_pvalue_neg.mif", output_header); - write_output (*uncorrected_pvalue_neg, v2v, uncorrected_pvalue_image_neg); + write_output (*uncorrected_pvalue_neg, v2v, prefix + "uncorrected_pvalue_neg.mif", output_header); ++progress; - vector_type fwe_pvalue_output_neg (num_vox); + vector_type fwe_pvalue_output_neg (num_voxels); Math::Stats::Permutation::statistic2pvalue (*perm_distribution_neg, *default_cluster_output_neg, fwe_pvalue_output_neg); - auto fwe_pvalue_image_neg = Image::create (prefix + "fwe_pvalue_neg.mif", output_header); - write_output (fwe_pvalue_output_neg, v2v, fwe_pvalue_image_neg); + write_output (fwe_pvalue_output_neg, v2v, prefix + "fwe_pvalue_neg.mif", output_header); } } diff --git a/core/filter/connected_components.h b/core/filter/connected_components.h index 3526eb0c83..23891f25d5 100644 --- a/core/filter/connected_components.h +++ b/core/filter/connected_components.h @@ -62,6 +62,11 @@ namespace MR data.clear(); } + void set_axes (const vector& i) { + enabled_axes = i; + data.clear(); + } + void initialise (const Header&, const Voxel2Vector&); const vector& operator[] (const size_t index) const { @@ -107,7 +112,7 @@ namespace MR Connector () { } - // Perform connected components on the mask. + // Perform connected components on vectorized binary data void run (vector&, vector&) const; template void run (vector&, vector&, @@ -116,6 +121,8 @@ namespace MR private: + // Utility functions that perform the actual connected + // components functionality bool next_neighbour (uint32_t&, vector&) const; template bool next_neighbour (uint32_t&, vector&, @@ -156,9 +163,9 @@ namespace MR template bool Connector::next_neighbour (uint32_t& node, - vector& labels, - const VectorType& data, - const float threshold) const + vector& labels, + const VectorType& data, + const float threshold) const { for (auto n : adjacency[node]) { if (!labels[n] && data[n] > threshold) { @@ -173,10 +180,10 @@ namespace MR template void Connector::depth_first_search (const uint32_t root, - Cluster& cluster, - vector& labels, - const VectorType& data, - const float threshold) const + Cluster& cluster, + vector& labels, + const VectorType& data, + const float threshold) const { uint32_t node = root; std::stack stack; @@ -222,15 +229,16 @@ namespace MR template ConnectedComponents (const HeaderType& in) : Base (in), + enabled_axes (ndim(), true), largest_only (false), do_26_connectivity (false) { if (this->ndim() > 4) throw Exception ("Cannot run connected components analysis with more than 4 dimensions"); datatype_ = DataType::UInt32; - dim_to_ignore.resize (this->ndim(), false); - if (this->ndim() == 4) // Ignore 4D unless explicitly instructed to - dim_to_ignore[3] = true; + // By default, ignore all axes above the three spatial dimensions + for (size_t axis = 3; axis < ndim(); ++axis) + enabled_axes[axis] = false; } template @@ -247,8 +255,7 @@ namespace MR Voxel2Vector v2v (in, *this); Connector connector; - for (size_t axis = 0; axis != dim_to_ignore.size(); ++axis) - connector.adjacency.toggle_axis (axis, !dim_to_ignore[axis]); + connector.adjacency.set_axes (enabled_axes); connector.adjacency.set_26_adjacency (do_26_connectivity); connector.adjacency.initialise (in, v2v); @@ -289,10 +296,17 @@ namespace MR - void set_ignore_dim (size_t dim, bool ignore) + void set_axes (const vector& i) { - assert (dim < this->ndim()); - dim_to_ignore[dim] = ignore; + const size_t max_axis = *std::max_element (i.begin(), i.end()); + if (max_axis >= ndim()) + throw Exception ("Requested axis for connected component filter (" + str(max_axis) + " is beyond the dimensionality of the image (" + str(ndim()) + "D)"); + enabled_axes.assign (std::max (max_axis+1, size_t(ndim())), false); + for (const auto& axis : i) { + if (axis < 0) + throw Exception ("Cannot specify negative axis index for connected-component filter"); + enabled_axes[axis] = true; + } } @@ -309,7 +323,7 @@ namespace MR protected: - vector dim_to_ignore; + vector enabled_axes; bool largest_only; bool do_26_connectivity; }; From ca19ceb1bb2e16dec28abee0e333856f2da5e658 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 6 Jul 2017 18:09:52 +1000 Subject: [PATCH 0054/1471] Stats: First compilation of multi-contrast support Note: None of this code has been tested whatsoever. --- cmd/connectomestats.cpp | 80 +++++----- cmd/fixelcfestats.cpp | 115 +++++++------- cmd/mrclusterstats.cpp | 140 +++++++---------- cmd/vectorstats.cpp | 51 +++---- core/fixel/legacy/image.h | 2 +- core/math/stats/glm.cpp | 194 +++++++++++++---------- core/math/stats/glm.h | 129 ++++++---------- core/math/stats/permutation.cpp | 36 ++--- core/math/stats/permutation.h | 3 +- core/types.h | 8 +- src/connectome/enhance.cpp | 9 +- src/connectome/enhance.h | 9 +- src/dwi/tractography/mapping/writer.h | 36 ++++- src/stats/cfe.cpp | 9 +- src/stats/cfe.h | 7 +- src/stats/cluster.cpp | 14 +- src/stats/cluster.h | 17 +-- src/stats/enhance.h | 16 +- src/stats/permtest.cpp | 211 +++++++++++--------------- src/stats/permtest.h | 89 +++++------ src/stats/tfce.cpp | 17 +-- src/stats/tfce.h | 11 +- testing/data | 2 +- 23 files changed, 564 insertions(+), 641 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 694fde1d17..cdac302d30 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -193,10 +193,10 @@ void run() // Load contrast matrix - matrix_type contrast = load_matrix (argument[3]); - if (contrast.cols() > design.cols()) - throw Exception ("too many contrasts for design matrix"); - contrast.conservativeResize (contrast.rows(), design.cols()); + const matrix_type contrast = load_matrix (argument[3]); + const size_t num_contrasts = contrast.rows(); + if (contrast.cols() != design.cols()) + throw Exception ("number of columns in contrast matrix (" + str(contrast.cols()) + ") does not match number of columns in design matrix (" + str(design.cols()) + ")"); const std::string output_prefix = argument[4]; @@ -234,83 +234,89 @@ void run() } } + // Only add contrast row number to image outputs if there's more than one contrast + auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; + { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation...", contrast.cols() + 3); const matrix_type betas = Math::Stats::GLM::solve_betas (data, design); for (size_t i = 0; i < size_t(contrast.cols()); ++i) { - save_matrix (mat2vec.V2M (betas.col(i)), output_prefix + "_beta_" + str(i) + ".csv"); + save_matrix (mat2vec.V2M (betas.col(i)), output_prefix + "_beta" + str(i) + ".csv"); ++progress; } const matrix_type abs_effects = Math::Stats::GLM::abs_effect_size (data, design, contrast); - save_matrix (mat2vec.V2M (abs_effects.col(0)), output_prefix + "_abs_effect.csv"); - ++progress; + for (size_t i = 0; i != num_contrasts; ++i) { + save_matrix (mat2vec.V2M (abs_effects.col(i)), output_prefix + "_abs_effect" + postfix(i) + ".csv"); + ++progress; + } const matrix_type std_effects = Math::Stats::GLM::std_effect_size (data, design, contrast); - matrix_type first_std_effect = mat2vec.V2M (std_effects.col (0)); - for (MR::Connectome::node_t i = 0; i != num_nodes; ++i) { - for (MR::Connectome::node_t j = 0; j != num_nodes; ++j) { - if (!std::isfinite (first_std_effect (i, j))) - first_std_effect (i, j) = 0.0; - } + for (size_t i = 0; i != num_contrasts; ++i) { + save_matrix (mat2vec.V2M (std_effects.col(i)), output_prefix + "_std_effect" + postfix(i) + ".csv"); + ++progress; } - save_matrix (first_std_effect, output_prefix + "_std_effect.csv"); - ++progress; const matrix_type stdevs = Math::Stats::GLM::stdev (data, design); - save_vector (stdevs.col(0), output_prefix + "_std_dev.csv"); + for (size_t i = 0; i != num_contrasts; ++i) { + save_matrix (mat2vec.V2M (stdevs.col(i)), output_prefix + "_std_dev" + postfix(i) + ".csv"); + ++progress; + } } std::shared_ptr glm_ttest (new Math::Stats::GLMTTestFixed (data, design, contrast)); // If performing non-stationarity adjustment we need to pre-compute the empirical statistic - vector_type empirical_statistic; + matrix_type empirical_statistic; if (do_nonstationary_adjustment) { if (permutations_nonstationary.size()) { - Stats::PermTest::PermutationStack perm_stack (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment..."); + Stats::PermTest::PermutationStack perm_stack (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment"); Stats::PermTest::precompute_empirical_stat (glm_ttest, enhancer, perm_stack, empirical_statistic); } else { - Stats::PermTest::PermutationStack perm_stack (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment...", true); + Stats::PermTest::PermutationStack perm_stack (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment", true); Stats::PermTest::precompute_empirical_stat (glm_ttest, enhancer, perm_stack, empirical_statistic); } - save_matrix (mat2vec.V2M (empirical_statistic), output_prefix + "_empirical.csv"); + for (size_t i = 0; i != num_contrasts; ++i) + save_matrix (mat2vec.V2M (empirical_statistic.row(i)), output_prefix + "_empirical" + postfix(i) + ".csv"); } // Precompute default statistic and enhanced statistic - vector_type tvalue_output (num_edges); - vector_type enhanced_output (num_edges); + matrix_type tvalue_output (num_contrasts, num_edges); + matrix_type enhanced_output (num_contrasts, num_edges); - Stats::PermTest::precompute_default_permutation (glm_ttest, enhancer, empirical_statistic, enhanced_output, std::shared_ptr(), tvalue_output); + Stats::PermTest::precompute_default_permutation (glm_ttest, enhancer, empirical_statistic, enhanced_output, tvalue_output); - save_matrix (mat2vec.V2M (tvalue_output), output_prefix + "_tvalue.csv"); - save_matrix (mat2vec.V2M (enhanced_output), output_prefix + "_enhanced.csv"); + for (size_t i = 0; i != num_contrasts; ++i) { + save_matrix (mat2vec.V2M (tvalue_output.row(i)), output_prefix + "_tvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (enhanced_output.row(i)), output_prefix + "_enhanced" + postfix(i) + ".csv"); + } // Perform permutation testing if (!get_options ("notest").size()) { // FIXME Getting NANs in the null distribution // Check: was result of pre-nulled subject data - vector_type null_distribution (num_perms); - vector_type uncorrected_pvalues (num_edges); + matrix_type null_distribution (num_contrasts, num_perms); + matrix_type uncorrected_pvalues (num_contrasts, num_edges); if (permutations.size()) { Stats::PermTest::run_permutations (permutations, glm_ttest, enhancer, empirical_statistic, - enhanced_output, std::shared_ptr(), - null_distribution, std::shared_ptr(), - uncorrected_pvalues, std::shared_ptr()); + enhanced_output, null_distribution, uncorrected_pvalues); } else { Stats::PermTest::run_permutations (num_perms, glm_ttest, enhancer, empirical_statistic, - enhanced_output, std::shared_ptr(), - null_distribution, std::shared_ptr(), - uncorrected_pvalues, std::shared_ptr()); + enhanced_output, null_distribution, uncorrected_pvalues); } - save_vector (null_distribution, output_prefix + "_null_dist.txt"); - vector_type pvalue_output (num_edges); + for (size_t i = 0; i != num_contrasts; ++i) + save_vector (null_distribution.row(i), output_prefix + "_null_dist" + postfix(i) + ".txt"); + + matrix_type pvalue_output (num_contrasts, num_edges); Math::Stats::Permutation::statistic2pvalue (null_distribution, enhanced_output, pvalue_output); - save_matrix (mat2vec.V2M (pvalue_output), output_prefix + "_fwe_pvalue.csv"); - save_matrix (mat2vec.V2M (uncorrected_pvalues), output_prefix + "_uncorrected_pvalue.csv"); + for (size_t i = 0; i != num_contrasts; ++i) { + save_matrix (mat2vec.V2M (pvalue_output.row(i)), output_prefix + "_fwe_pvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (uncorrected_pvalues.row(i)), output_prefix + "_uncorrected_pvalue" + postfix(i) + ".csv"); + } } diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index e5676e2ab9..c47ba736ce 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -72,7 +72,7 @@ void usage () + Argument ("design", "the design matrix. Note that a column of 1's will need to be added for correlations.").type_file_in () - + Argument ("contrast", "the contrast vector, specified as a single row of weights").type_file_in () + + Argument ("contrast", "the contrast matrix, specified as rows of weights").type_file_in () + Argument ("tracks", "the tracks used to determine fixel-fixel connectivity").type_tracks_in () @@ -99,9 +99,6 @@ void usage () + OptionGroup ("Additional options for fixelcfestats") - + Option ("negative", "automatically test the negative (opposite) contrast. By computing the opposite contrast simultaneously " - "the computation time is reduced.") - + Option ("column", "add a column to the design matrix corresponding to subject fixel-wise values " "(the contrast vector length must include columns for these additions)").allow_multiple() + Argument ("path").type_file_in() @@ -178,8 +175,6 @@ class SubjectFixelImport : public SubjectDataImportBase void run() { - auto opt = get_options ("negative"); - bool compute_negative_contrast = opt.size() ? true : false; const value_type cfe_dh = get_option_value ("cfe_dh", DEFAULT_CFE_DH); const value_type cfe_h = get_option_value ("cfe_h", DEFAULT_CFE_H); const value_type cfe_e = get_option_value ("cfe_e", DEFAULT_CFE_E); @@ -235,7 +230,7 @@ void run() throw Exception ("number of input files does not match number of rows in design matrix"); // Load permutations file if supplied - opt = get_options("permutations"); + auto opt = get_options("permutations"); vector > permutations; if (opt.size()) { permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); @@ -260,6 +255,7 @@ void run() // Load contrast matrix const matrix_type contrast = load_matrix (argument[3]); + const size_t num_contrasts = contrast.rows(); // Before validating the contrast matrix, we first need to see if there are any // additional design matrix columns coming from fixel-wise subject data @@ -274,8 +270,6 @@ void run() throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" + (extra_columns.size() ? " (in addition to the " + str(extra_columns.size()) + " uses of -column)" : "") + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")"); - if (contrast.rows() > 1) - throw Exception ("only a single contrast vector (defined as a row) is currently supported"); // Compute fixel-fixel connectivity vector > connectivity_matrix (num_fixels); @@ -391,22 +385,37 @@ void run() progress++; } + // Only add contrast row number to image outputs if there's more than one contrast + auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; + if (extra_columns.size()) { WARN ("Beta coefficients, effect size and standard deviation outputs not yet implemented for fixel-wise extra columns"); } else { - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation"); - auto temp = Math::Stats::GLM::solve_betas (data, design); + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + (3 * num_contrasts)); - for (ssize_t i = 0; i < contrast.cols(); ++i) { - write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), temp.row(i), output_header); + auto temp = Math::Stats::GLM::solve_betas (data, design); + for (size_t i = 0; i != size_t(contrast.cols()); ++i) { + write_fixel_output (Path::join (output_fixel_directory, "beta_" + str(i) + ".mif"), temp.row(i), output_header); + ++progress; + } + temp = Math::Stats::GLM::abs_effect_size (data, design, contrast); + ++progress; + for (size_t i = 0; i != num_contrasts; ++i) { + write_fixel_output (Path::join (output_fixel_directory, "abs_effect" + postfix(i) + ".mif"), temp.row(i), output_header); + ++progress; + } + temp = Math::Stats::GLM::std_effect_size (data, design, contrast); + ++progress; + for (size_t i = 0; i != num_contrasts; ++i) { + write_fixel_output (Path::join (output_fixel_directory, "std_effect" + postfix(i) + ".mif"), temp.row(i), output_header); + ++progress; + } + temp = Math::Stats::GLM::stdev (data, design); + ++progress; + for (size_t i = 0; i != num_contrasts; ++i) { + write_fixel_output (Path::join (output_fixel_directory, "std_dev" + postfix(i) + ".mif"), temp.row(i), output_header); ++progress; } - temp = Math::Stats::GLM::abs_effect_size (data, design, contrast); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "abs_effect.mif"), temp.row(0), output_header); ++progress; - temp = Math::Stats::GLM::std_effect_size (data, design, contrast); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "std_effect.mif"), temp.row(0), output_header); ++progress; - temp = Math::Stats::GLM::stdev (data, design); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), temp.row(0), output_header); } // Construct the class for performing the initial statistical tests @@ -421,7 +430,7 @@ void run() std::shared_ptr cfe_integrator (new Stats::CFE::Enhancer (connectivity_matrix, cfe_dh, cfe_e, cfe_h)); // If performing non-stationarity adjustment we need to pre-compute the empirical CFE statistic - vector_type empirical_cfe_statistic; + matrix_type empirical_cfe_statistic; if (do_nonstationary_adjustment) { if (permutations_nonstationary.size()) { @@ -432,63 +441,51 @@ void run() Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, permutations, empirical_cfe_statistic); } output_header.keyval()["nonstationary adjustment"] = str(true); - write_fixel_output (Path::join (output_fixel_directory, "cfe_empirical.mif"), empirical_cfe_statistic, output_header); + for (size_t i = 0; i != num_contrasts; ++i) + write_fixel_output (Path::join (output_fixel_directory, "cfe_empirical" + postfix(i) + ".mif"), empirical_cfe_statistic.row(i), output_header); } else { output_header.keyval()["nonstationary adjustment"] = str(false); } // Precompute default statistic and CFE statistic - vector_type cfe_output (num_fixels); - std::shared_ptr cfe_output_neg; - vector_type tvalue_output (num_fixels); - if (compute_negative_contrast) - cfe_output_neg.reset (new vector_type (num_fixels)); + matrix_type cfe_output (num_contrasts, num_fixels); + matrix_type tvalue_output (num_contrasts, num_fixels); - Stats::PermTest::precompute_default_permutation (glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, cfe_output_neg, tvalue_output); + Stats::PermTest::precompute_default_permutation (glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, tvalue_output); - write_fixel_output (Path::join (output_fixel_directory, "cfe.mif"), cfe_output, output_header); - write_fixel_output (Path::join (output_fixel_directory, "tvalue.mif"), tvalue_output, output_header); - if (compute_negative_contrast) - write_fixel_output (Path::join (output_fixel_directory, "cfe_neg.mif"), *cfe_output_neg, output_header); + for (size_t i = 0; i != num_contrasts; ++i) { + write_fixel_output (Path::join (output_fixel_directory, "cfe" + postfix(i) + ".mif"), cfe_output.row(i), output_header); + write_fixel_output (Path::join (output_fixel_directory, "tvalue" + postfix(i) + ".mif"), tvalue_output.row(i), output_header); + } // Perform permutation testing if (!get_options ("notest").size()) { - vector_type perm_distribution (num_perms); - std::shared_ptr perm_distribution_neg; - vector_type uncorrected_pvalues (num_fixels); - std::shared_ptr uncorrected_pvalues_neg; - - if (compute_negative_contrast) { - perm_distribution_neg.reset (new vector_type (num_perms)); - uncorrected_pvalues_neg.reset (new vector_type (num_fixels)); - } + matrix_type perm_distribution (num_contrasts, num_perms); + matrix_type uncorrected_pvalues (num_contrasts, num_fixels); if (permutations.size()) { Stats::PermTest::run_permutations (permutations, glm_test, cfe_integrator, empirical_cfe_statistic, - cfe_output, cfe_output_neg, - perm_distribution, perm_distribution_neg, - uncorrected_pvalues, uncorrected_pvalues_neg); + cfe_output, perm_distribution, uncorrected_pvalues); } else { Stats::PermTest::run_permutations (num_perms, glm_test, cfe_integrator, empirical_cfe_statistic, - cfe_output, cfe_output_neg, - perm_distribution, perm_distribution_neg, - uncorrected_pvalues, uncorrected_pvalues_neg); + cfe_output, perm_distribution, uncorrected_pvalues); } ProgressBar progress ("outputting final results"); - save_matrix (perm_distribution, Path::join (output_fixel_directory, "perm_dist.txt")); ++progress; - - vector_type pvalue_output (num_fixels); - Math::Stats::Permutation::statistic2pvalue (perm_distribution, cfe_output, pvalue_output); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue.mif"), pvalue_output, output_header); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "uncorrected_pvalue.mif"), uncorrected_pvalues, output_header); ++progress; - - if (compute_negative_contrast) { - save_matrix (*perm_distribution_neg, Path::join (output_fixel_directory, "perm_dist_neg.txt")); ++progress; - vector_type pvalue_output_neg (num_fixels); - Math::Stats::Permutation::statistic2pvalue (*perm_distribution_neg, *cfe_output_neg, pvalue_output_neg); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue_neg.mif"), pvalue_output_neg, output_header); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "uncorrected_pvalue_neg.mif"), *uncorrected_pvalues_neg, output_header); + for (size_t i = 0; i != num_contrasts; ++i) { + save_vector (perm_distribution.row(i), Path::join (output_fixel_directory, "perm_dist" + postfix(i) + ".txt")); + ++progress; } + + matrix_type pvalue_output (num_contrasts, num_fixels); + Math::Stats::Permutation::statistic2pvalue (perm_distribution, cfe_output, pvalue_output); + ++progress; + for (size_t i = 0; i != num_contrasts; ++i) { + write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue" + postfix(i) + ".mif"), pvalue_output.row(i), output_header); + ++progress; + write_fixel_output (Path::join (output_fixel_directory, "uncorrected_pvalue" + postfix(i) + ".mif"), uncorrected_pvalues.row(i), output_header); + ++progress; + } + } } diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 7f12c25e24..234c5840e3 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -74,9 +74,6 @@ void usage () + OptionGroup ("Additional options for mrclusterstats") - + Option ("negative", "automatically test the negative (opposite) contrast. By computing the opposite contrast simultaneously " - "the computation time is reduced.") - + Option ("threshold", "the cluster-forming threshold to use for a standard cluster-based analysis. " "This disables TFCE, which is the default otherwise.") + Argument ("value").type_float (1.0e-6) @@ -154,8 +151,9 @@ void run() { // Load contrast matrix const matrix_type contrast = load_matrix (argument[2]); + const size_t num_contrasts = contrast.rows(); if (contrast.cols() != design.cols()) - throw Exception ("the number of contrasts does not equal the number of columns in the design matrix"); + throw Exception ("the number of columns in the contrast matrix (" + str(contrast.cols()) + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")"); auto mask_header = Header::open (argument[3]); // Load Mask and compute adjacency @@ -201,14 +199,10 @@ void run() { } const std::string prefix (argument[4]); - bool compute_negative_contrast = get_options("negative").size(); - vector_type default_cluster_output (num_vox); - std::shared_ptr default_cluster_output_neg; - vector_type tvalue_output (num_vox); - vector_type empirical_enhanced_statistic; - if (compute_negative_contrast) - default_cluster_output_neg.reset (new vector_type (num_vox)); + matrix_type default_cluster_output (num_contrasts, num_vox); + matrix_type tvalue_output (num_contrasts, num_vox); + matrix_type empirical_enhanced_statistic; std::shared_ptr glm (new Math::Stats::GLMTTestFixed (data, design, contrast)); @@ -220,6 +214,9 @@ void run() { enhancer.reset (new Stats::Cluster::ClusterSize (connector, cluster_forming_threshold)); } + // Only add contrast row number to image outputs if there's more than one contrast + auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; + if (do_nonstationary_adjustment) { if (!use_tfce) throw Exception ("nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); @@ -231,109 +228,88 @@ void run() { Stats::PermTest::precompute_empirical_stat (glm, enhancer, permutations, empirical_enhanced_statistic); } - save_matrix (empirical_enhanced_statistic, prefix + "empirical.txt"); + for (size_t i = 0; i != num_contrasts; ++i) + save_vector (empirical_enhanced_statistic.row(i), prefix + "empirical" + postfix(i) + ".txt"); } Stats::PermTest::precompute_default_permutation (glm, enhancer, empirical_enhanced_statistic, - default_cluster_output, default_cluster_output_neg, tvalue_output); + default_cluster_output, tvalue_output); { - ProgressBar progress ("generating pre-permutation output", (compute_negative_contrast ? 3 : 2) + contrast.cols() + 3); - { - auto tvalue_image = Image::create (prefix + "tvalue.mif", output_header); - write_output (tvalue_output, mask_indices, tvalue_image); - } - ++progress; - { - auto cluster_image = Image::create (prefix + (use_tfce ? "tfce.mif" : "cluster_sizes.mif"), output_header); - write_output (default_cluster_output, mask_indices, cluster_image); - } - ++progress; - if (compute_negative_contrast) { - assert (default_cluster_output_neg); - auto cluster_image_neg = Image::create (prefix + (use_tfce ? "tfce_neg.mif" : "cluster_sizes_neg.mif"), output_header); - write_output (*default_cluster_output_neg, mask_indices, cluster_image_neg); + ProgressBar progress ("generating pre-permutation output", contrast.cols() + (5 * num_contrasts)); + for (size_t i = 0; i != num_contrasts; ++i) { + auto tvalue_image = Image::create (prefix + "tvalue" + postfix(i) + ".mif", output_header); + write_output (tvalue_output.row(i), mask_indices, tvalue_image); ++progress; } - auto temp = Math::Stats::GLM::solve_betas (data, design); - for (ssize_t i = 0; i < contrast.cols(); ++i) { - auto beta_image = Image::create (prefix + "beta" + str(i) + ".mif", output_header); - write_output (temp.row(i), mask_indices, beta_image); + for (size_t i = 0; i != num_contrasts; ++i) { + auto cluster_image = Image::create (prefix + (use_tfce ? "tfce" : "cluster_sizes") + postfix(i) + ".mif", output_header); + write_output (default_cluster_output.row(i), mask_indices, cluster_image); ++progress; } + { + const auto betas = Math::Stats::GLM::solve_betas (data, design); + for (size_t i = 0; i != size_t(contrast.cols()); ++i) { + auto beta_image = Image::create (prefix + "beta" + str(i) + ".mif", output_header); + write_output (betas.row(i), mask_indices, beta_image); + ++progress; + } + } { const auto temp = Math::Stats::GLM::abs_effect_size (data, design, contrast); - auto abs_effect_image = Image::create (prefix + "abs_effect.mif", output_header); - write_output (temp.row(0), mask_indices, abs_effect_image); + for (size_t i = 0; i != num_contrasts; ++i) { + auto abs_effect_image = Image::create (prefix + "abs_effect" + postfix(i) + ".mif", output_header); + write_output (temp.row(i), mask_indices, abs_effect_image); + ++progress; + } } - ++progress; { const auto temp = Math::Stats::GLM::std_effect_size (data, design, contrast); - auto std_effect_image = Image::create (prefix + "std_effect.mif", output_header); - write_output (temp.row(0), mask_indices, std_effect_image); + for (size_t i = 0; i != num_contrasts; ++i) { + auto std_effect_image = Image::create (prefix + "std_effect" + postfix(i) + ".mif", output_header); + write_output (temp.row(i), mask_indices, std_effect_image); + ++progress; + } } - ++progress; { const auto temp = Math::Stats::GLM::stdev (data, design); - auto std_dev_image = Image::create (prefix + "std_dev.mif", output_header); - write_output (temp.row(0), mask_indices, std_dev_image); + for (size_t i = 0; i != num_contrasts; ++i) { + auto std_dev_image = Image::create (prefix + "std_dev" + postfix(i) + ".mif", output_header); + write_output (temp.row(i), mask_indices, std_dev_image); + ++progress; + } } } if (!get_options ("notest").size()) { - vector_type perm_distribution (num_perms); - std::shared_ptr perm_distribution_neg; - vector_type uncorrected_pvalue (num_vox); - std::shared_ptr uncorrected_pvalue_neg; - - if (compute_negative_contrast) { - perm_distribution_neg.reset (new vector_type (num_perms)); - uncorrected_pvalue_neg.reset (new vector_type (num_vox)); - } + matrix_type perm_distribution (num_contrasts, num_perms); + matrix_type uncorrected_pvalue (num_contrasts, num_vox); if (permutations.size()) { Stats::PermTest::run_permutations (permutations, glm, enhancer, empirical_enhanced_statistic, - default_cluster_output, default_cluster_output_neg, - perm_distribution, perm_distribution_neg, - uncorrected_pvalue, uncorrected_pvalue_neg); + default_cluster_output, perm_distribution, uncorrected_pvalue); } else { Stats::PermTest::run_permutations (num_perms, glm, enhancer, empirical_enhanced_statistic, - default_cluster_output, default_cluster_output_neg, - perm_distribution, perm_distribution_neg, - uncorrected_pvalue, uncorrected_pvalue_neg); + default_cluster_output, perm_distribution, uncorrected_pvalue); } - save_matrix (perm_distribution, prefix + "perm_dist.txt"); - if (compute_negative_contrast) { - assert (perm_distribution_neg); - save_matrix (*perm_distribution_neg, prefix + "perm_dist_neg.txt"); - } + for (size_t i = 0; i != num_contrasts; ++i) + save_vector (perm_distribution.row(i), prefix + "perm_dist" + postfix(i) + ".txt"); - ProgressBar progress ("generating output", compute_negative_contrast ? 4 : 2); - { - auto uncorrected_pvalue_image = Image::create (prefix + "uncorrected_pvalue.mif", output_header); - write_output (uncorrected_pvalue, mask_indices, uncorrected_pvalue_image); - } - ++progress; - { - vector_type fwe_pvalue_output (num_vox); - Math::Stats::Permutation::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); - auto fwe_pvalue_image = Image::create (prefix + "fwe_pvalue.mif", output_header); - write_output (fwe_pvalue_output, mask_indices, fwe_pvalue_image); + ProgressBar progress ("generating output", 2); + for (size_t i = 0; i != num_contrasts; ++i) { + auto uncorrected_pvalue_image = Image::create (prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); + write_output (uncorrected_pvalue.row(i), mask_indices, uncorrected_pvalue_image); + ++progress; } - ++progress; - if (compute_negative_contrast) { - assert (uncorrected_pvalue_neg); - assert (perm_distribution_neg); - auto uncorrected_pvalue_image_neg = Image::create (prefix + "uncorrected_pvalue_neg.mif", output_header); - write_output (*uncorrected_pvalue_neg, mask_indices, uncorrected_pvalue_image_neg); + matrix_type fwe_pvalue_output (num_contrasts, num_vox); + Math::Stats::Permutation::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); + for (size_t i = 0; i != num_contrasts; ++i) { + auto fwe_pvalue_image = Image::create (prefix + "fwe_pvalue" + str(i) + ".mif", output_header); + write_output (fwe_pvalue_output.row(i), mask_indices, fwe_pvalue_image); ++progress; - vector_type fwe_pvalue_output_neg (num_vox); - Math::Stats::Permutation::statistic2pvalue (*perm_distribution_neg, *default_cluster_output_neg, fwe_pvalue_output_neg); - auto fwe_pvalue_image_neg = Image::create (prefix + "fwe_pvalue_neg.mif", output_header); - write_output (fwe_pvalue_output_neg, mask_indices, fwe_pvalue_image_neg); } - } + } } diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index a3f2983d4c..401e4c3fd9 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -102,10 +102,10 @@ void run() } // Load contrast matrix - matrix_type contrast = load_matrix (argument[3]); - if (contrast.cols() > design.cols()) - throw Exception ("too many contrasts for design matrix"); - contrast.conservativeResize (contrast.rows(), design.cols()); + const matrix_type contrast = load_matrix (argument[3]); + const size_t num_contrasts = contrast.rows(); + if (contrast.cols() != design.cols()) + throw Exception ("number of columns in contrast matrix (" + str(contrast.cols()) + ") does not match number of columns in design matrix (" + str(design.cols()) + ")"); const std::string output_prefix = argument[4]; @@ -133,29 +133,17 @@ void run() } { - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation...", contrast.cols() + 3); - const matrix_type betas = Math::Stats::GLM::solve_betas (data, design); - for (size_t i = 0; i < size_t(contrast.cols()); ++i) { - save_vector (betas.col(i), output_prefix + "_beta_" + str(i) + ".csv"); - ++progress; - } + CONSOLE ("Beta coefficients: " + str(betas)); const matrix_type abs_effects = Math::Stats::GLM::abs_effect_size (data, design, contrast); - save_vector (abs_effects.col(0), output_prefix + "_abs_effect.csv"); - ++progress; + CONSOLE ("Absolute effects: " + str(abs_effects)); const matrix_type std_effects = Math::Stats::GLM::std_effect_size (data, design, contrast); - vector_type first_std_effect = std_effects.col(0); - for (size_t i = 0; i != num_elements; ++i) { - if (!std::isfinite (first_std_effect[i])) - first_std_effect[i] = 0.0; - } - save_vector (first_std_effect, output_prefix + "_std_effect.csv"); - ++progress; + CONSOLE ("Standardised effects: " + str(std_effects)); const matrix_type stdevs = Math::Stats::GLM::stdev (data, design); - save_vector (stdevs.col(0), output_prefix + "_std_dev.csv"); + CONSOLE ("Standard deviations: " + str(stdevs)); } std::shared_ptr glm_ttest (new Math::Stats::GLMTTestFixed (data, design, contrast)); @@ -166,34 +154,29 @@ void run() vector default_permutation (filenames.size()); for (size_t i = 0; i != filenames.size(); ++i) default_permutation[i] = i; - vector_type default_tvalues; + matrix_type default_tvalues; (*glm_ttest) (default_permutation, default_tvalues); - save_vector (default_tvalues, output_prefix + "_tvalue.csv"); + CONSOLE ("T-values for default statistic: " + str(default_tvalues)); // Perform permutation testing if (!get_options ("notest").size()) { std::shared_ptr enhancer; - vector_type null_distribution (num_perms), uncorrected_pvalues (num_perms); - vector_type empirical_distribution; + matrix_type null_distribution (num_perms, num_contrasts), uncorrected_pvalues (num_perms, num_contrasts); + matrix_type empirical_distribution; if (permutations.size()) { Stats::PermTest::run_permutations (permutations, glm_ttest, enhancer, empirical_distribution, - default_tvalues, std::shared_ptr(), - null_distribution, std::shared_ptr(), - uncorrected_pvalues, std::shared_ptr()); + default_tvalues, null_distribution, uncorrected_pvalues); } else { Stats::PermTest::run_permutations (num_perms, glm_ttest, enhancer, empirical_distribution, - default_tvalues, std::shared_ptr(), - null_distribution, std::shared_ptr(), - uncorrected_pvalues, std::shared_ptr()); + default_tvalues, null_distribution, uncorrected_pvalues); } - vector_type default_pvalues (num_elements); + matrix_type default_pvalues (num_contrasts, num_elements); Math::Stats::Permutation::statistic2pvalue (null_distribution, default_tvalues, default_pvalues); - save_vector (default_pvalues, output_prefix + "_fwe_pvalue.csv"); - save_vector (uncorrected_pvalues, output_prefix + "_uncorrected_pvalue.csv"); + CONSOLE ("FWE-corrected p-values: " + str(default_pvalues)); + CONSOLE ("Uncorrected p-values: " + str(uncorrected_pvalues)); } - } diff --git a/core/fixel/legacy/image.h b/core/fixel/legacy/image.h index dca1c10729..1738fd8a0b 100644 --- a/core/fixel/legacy/image.h +++ b/core/fixel/legacy/image.h @@ -73,7 +73,7 @@ namespace MR friend std::ostream& operator<< (std::ostream& stream, const Value& value) { stream << "Position [ "; for (size_t n = 0; n < value.offsets.ndim(); ++n) - stream << value.offsets[n] << " "; + stream << value.offsets.index(n) << " "; stream << "], offset = " << value.offsets.value() << ", " << value.size() << " elements"; return stream; } diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 916701903a..3c6b201a64 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -28,59 +28,6 @@ namespace MR namespace GLM { - matrix_type scale_contrasts (const matrix_type& contrasts, const matrix_type& design, const size_t degrees_of_freedom) - { - assert (contrasts.cols() == design.cols()); - const matrix_type XtX = design.transpose() * design; - const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); - matrix_type scaled_contrasts (contrasts); - - for (size_t n = 0; n < size_t(contrasts.rows()); ++n) { - auto pinv_XtX_c = pinv_XtX * contrasts.row(n).transpose(); - scaled_contrasts.row(n) *= std::sqrt (value_type(degrees_of_freedom) / contrasts.row(n).dot (pinv_XtX_c)); - } - return scaled_contrasts; - } - - - - void ttest_prescaled (matrix_type& tvalues, - const matrix_type& design, - const matrix_type& pinv_design, - const matrix_type& measurements, - const matrix_type& scaled_contrasts, - matrix_type& betas, - matrix_type& residuals) - { - betas.noalias() = measurements * pinv_design; - residuals.noalias() = measurements - betas * design; - tvalues.noalias() = betas * scaled_contrasts; - for (size_t n = 0; n < size_t(tvalues.rows()); ++n) - tvalues.row(n).array() /= residuals.row(n).norm(); - } - - - - void ttest (matrix_type& tvalues, - const matrix_type& design, - const matrix_type& measurements, - const matrix_type& contrasts, - matrix_type& betas, - matrix_type& residuals) - { - const matrix_type pinv_design = Math::pinv (design); - betas.noalias() = measurements * pinv_design; - residuals.noalias() = measurements - betas * design; - const matrix_type XtX = design.transpose() * design; - const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); - const size_t degrees_of_freedom = design.rows() - rank(design); - tvalues.noalias() = betas * contrasts; - for (size_t n = 0; n != size_t(tvalues.rows()); ++n) { - const default_type variance = residuals.row(n).squaredNorm() / degrees_of_freedom; - tvalues.row(n).array() /= sqrt(variance * contrasts.row(n).dot (pinv_XtX * contrasts.row(n).transpose())); - } - } - @@ -123,13 +70,13 @@ namespace MR GLMTTestFixed::GLMTTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast) : GLMTestBase (measurements, design, contrast), pinvX (Math::pinv (X)), - scaled_contrasts (GLM::scale_contrasts (contrast, X, X.rows()-rank(X)).transpose()) { } + scaled_contrasts (calc_scaled_contrasts()) { } - void GLMTTestFixed::operator() (const vector& perm_labelling, vector_type& output) const + void GLMTTestFixed::operator() (const vector& perm_labelling, matrix_type& output) const { - output = vector_type::Zero (y.rows()); + output = matrix_type::Zero (num_elements(), num_outputs()); matrix_type tvalues, betas, residuals, SX, pinvSX; // TODO Currently the entire design matrix is permuted; @@ -148,19 +95,87 @@ namespace MR SX.transposeInPlace(); pinvSX.transposeInPlace(); for (ssize_t i = 0; i < y.rows(); i += GLM_BATCH_SIZE) { - const matrix_type tmp = y.block (i, 0, std::min (GLM_BATCH_SIZE, (int)(y.rows()-i)), y.cols()); - GLM::ttest_prescaled (tvalues, SX, pinvSX, tmp, scaled_contrasts, betas, residuals); - for (ssize_t n = 0; n < tvalues.rows(); ++n) { - value_type val = tvalues(n,0); - if (!std::isfinite (val)) - val = value_type(0); - output[i+n] = val; + const auto tmp = y.block (i, 0, std::min (GLM_BATCH_SIZE, (int)(y.rows()-i)), y.cols()); + ttest (tvalues, SX, pinvSX, tmp, betas, residuals); + for (size_t col = 0; col != num_outputs(); ++col) { + for (size_t n = 0; n != size_t(tvalues.rows()); ++n) { + value_type val = tvalues(n, col); + if (!std::isfinite (val)) + val = value_type(0); + output(i+n, col) = val; + } } } } + // scale contrasts for use in ttest() member function + /* This pre-scales the contrast matrix in order to make conversion from GLM betas + * to t-values more computationally efficient. + * + * For design matrix X, contrast matrix c, beta vector b and variance o^2, the t-value is calculated as: + * c^T.b + * t = -------------------------- + * sqrt(o^2.c^T.(X^T.X)^-1.c) + * + * Definition of variance (for vector of residuals e): + * e^T.e + * o^2 = ------ + * DOF(X) + * + * (Note that the above equations are used directly in GLMTTestVariable) + * + * This function will generate scaled contrasts c' from c, such that: + * DOF(X) + * c' = c.sqrt(------------------) + * c^T.(X^T.X)^-1.c + * + * c'^T.b + * t = ----------- + * sqrt(e^T.e) + * + * Note each row of the contrast matrix will still be treated as an independent contrast. The number + * of elements in each contrast vector must equal the number of columns in the design matrix. + */ + matrix_type GLMTTestFixed::calc_scaled_contrasts() const + { + const size_t dof = X.rows() - rank(X); + const matrix_type XtX = X.transpose() * X; + const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); + matrix_type result = c; + for (size_t n = 0; n < size_t(c.rows()); ++n) { + auto pinv_XtX_c = pinv_XtX * c.row(n).transpose(); + result.row(n) *= std::sqrt (value_type(dof) / c.row(n).dot (pinv_XtX_c)); + } + return result.transpose(); + } + + + + void GLMTTestFixed::ttest (matrix_type& tvalues, + const matrix_type& design, + const matrix_type& pinv_design, + Eigen::Block measurements, + matrix_type& betas, + matrix_type& residuals) const + { + betas.noalias() = measurements * pinv_design; + residuals.noalias() = measurements - betas * design; + tvalues.noalias() = betas * scaled_contrasts; + for (size_t n = 0; n < size_t(tvalues.rows()); ++n) + tvalues.row(n).array() /= residuals.row(n).norm(); + } + + + + + + + + + + GLMTTestVariable::GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : GLMTestBase (measurements, design, contrasts), importers (importers) @@ -172,9 +187,9 @@ namespace MR - void GLMTTestVariable::operator() (const vector& perm_labelling, vector_type& output) const + void GLMTTestVariable::operator() (const vector& perm_labelling, matrix_type& output) const { - output = vector_type::Zero (y.rows()); + output = matrix_type::Zero (num_elements(), num_outputs()); matrix_type tvalues, betas, residuals; // Set the size of the permuted design matrix to include the additional columns @@ -203,20 +218,16 @@ namespace MR for (ssize_t row = 0; row != X.rows(); ++row) SX.block(row, X.cols(), 1, importers.size()) = extra_data.row(perm_labelling[row]); - // Need to pre-scale contrasts if we want to use the ttest() function; - // otherwise, need to define a different function that doesn't rely on pre-scaling - // Went for the latter option; this call doesn't need pre-scaling of contrasts, + // This call doesn't need pre-scaling of contrasts, // nor does it need a pre-computed pseudo-inverse of the design matrix - GLM::ttest (tvalues, SX.transpose(), y.row(element), c, betas, residuals); + ttest (tvalues, SX.transpose(), y.row(element), betas, residuals); - // FIXME - // Currently output only the first contrast, as is done in GLMTTestFixed - // tvalues should have one row only (since we're only testing a single row), and - // number of columns equal to the number of contrasts - value_type val = tvalues (element, 0); - if (!std::isfinite (val)) - val = value_type(0); - output[element] = val; + for (size_t col = 0; col != num_outputs(); ++col) { + value_type val = tvalues (element, col); + if (!std::isfinite (val)) + val = value_type(0); + output(element, col) = val; + } } } @@ -234,6 +245,33 @@ namespace MR + void GLMTTestVariable::ttest (matrix_type& tvalues, + const matrix_type& design, + const matrix_type& measurements, + matrix_type& betas, + matrix_type& residuals) const + { + const matrix_type pinv_design = Math::pinv (design); + betas.noalias() = measurements * pinv_design; + residuals.noalias() = measurements - betas * design; + const matrix_type XtX = design.transpose() * design; + const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); + const size_t degrees_of_freedom = design.rows() - rank(design); + tvalues.noalias() = betas * c; + for (size_t n = 0; n != size_t(tvalues.rows()); ++n) { + const default_type variance = residuals.row(n).squaredNorm() / degrees_of_freedom; + tvalues.row(n).array() /= sqrt(variance * c.row(n).dot (pinv_XtX * c.row(n).transpose())); + } + } + + + + + + + + + GLMFTestFixed::GLMFTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const matrix_type& ftests) : GLMTestBase (measurements, design, contrasts), @@ -241,7 +279,7 @@ namespace MR - void GLMFTestFixed::operator() (const vector& perm_labelling, vector_type& output) const + void GLMFTestFixed::operator() (const vector& perm_labelling, matrix_type& output) const { } diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 035fc919cb..5b2c69d93a 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -32,73 +32,6 @@ namespace MR { - // TODO With the upcoming changes, many of these 'loose' functions become specific to the GLMTTestFixed class - // Therefore they should be moved - - - //! scale contrasts for use in t-test - /*! This function pre-scales a contrast matrix in order to make conversion from GLM betas - * to t-values more computationally efficient. - * - * For design matrix X, contrast matrix c, beta vector b and variance o^2, the t-value is calculated as: - * c^T.b - * t = -------------------------- - * sqrt(o^2.c^T.(X^T.X)^-1.c) - * - * Definition of variance (for vector of residuals e): - * e^T.e - * o^2 = ------ - * DOF(X) - * - * This function will generate scaled contrasts c' from c, such that: - * DOF(X) - * c' = c.sqrt(------------------) - * c^T.(X^T.X)^-1.c - * - * c'^T.b - * t = ----------- - * sqrt(e^T.e) - * - * Note each row of the contrast matrix will still be treated as an independent contrast. The number - * of elements in each contrast vector must equal the number of columns in the design matrix */ - matrix_type scale_contrasts (const matrix_type& contrasts, const matrix_type& design, const size_t degrees_of_freedom); - - - - //! generic GLM t-test - /*! note that the data, effects, and residual matrices are transposed. - * This is to take advantage of Eigen's convention of storing - * matrices in column-major format by default. - * - * Note also that the contrast matrix should already have been scaled - * using the GLM::scale_contrasts() function. */ - void ttest_prescaled (matrix_type& tvalues, - const matrix_type& design, - const matrix_type& pinv_design, - const matrix_type& measurements, - const matrix_type& scaled_contrasts, - matrix_type& betas, - matrix_type& residuals); - - - //! generic GLM t-test - /*! note that the data, effects, and residual matrices are transposed. - * This is to take advantage of Eigen's convention of storing - * matrices in column-major format by default. - * - * This version does not require, or take advantage of, pre-calculation - * of the pseudo-inverse of the design matrix. - * - * Note that for this version the contrast matrix should NOT have been scaled - * using the GLM::scale_contrasts() function. */ - void ttest (matrix_type& tvalues, - const matrix_type& design, - const matrix_type& measurements, - const matrix_type& contrasts, - matrix_type& betas, - matrix_type& residuals); - - /** \addtogroup Statistics @{ */ @@ -160,9 +93,9 @@ namespace MR /*! Compute the statistics * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) - * @param stats the vector containing the output statistics + * @param output the matrix containing the output statistics (one vector per contrast) */ - virtual void operator() (const vector& perm_labelling, vector_type& output) const = 0; + virtual void operator() (const vector& perm_labelling, matrix_type& output) const = 0; size_t num_subjects () const { return y.cols(); } size_t num_elements () const { return y.rows(); } @@ -185,7 +118,6 @@ namespace MR * tested; able to pre-compute a number of matrices before testing, improving * execution speed. */ - // TODO Currently this appears to only support a single contrast, since the output is a vector_type class GLMTTestFixed : public GLMTestBase { MEMALIGN(GLMTTestFixed) public: /*! @@ -197,14 +129,32 @@ namespace MR /*! Compute the t-statistics * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) - * @param stats the vector containing the output t-statistics - * @param max_stat the maximum t-statistic - * @param min_stat the minimum t-statistic + * @param output the vector containing the output t-statistics (one vector per contrast) */ - void operator() (const vector& perm_labelling, vector_type& output) const override; + void operator() (const vector& perm_labelling, matrix_type& output) const override; protected: const matrix_type pinvX, scaled_contrasts; + + private: + + /*! This function pre-scales a contrast matrix in order to make conversion from GLM betas + * to t-values more computationally efficient. */ + matrix_type calc_scaled_contrasts() const; + + //! generic GLM t-test + /*! note that the data, effects, and residual matrices are transposed. + * This is to take advantage of Eigen's convention of storing + * matrices in column-major format by default. + * + * Note also that the contrast matrix should already have been scaled + * using the GLM::scale_contrasts() function. */ + void ttest (matrix_type& tvalues, + const matrix_type& design, + const matrix_type& pinv_design, + Eigen::Block measurements, + matrix_type& betas, + matrix_type& residuals) const; }; //! @} @@ -222,30 +172,39 @@ namespace MR * particular type of data being tested. Therefore an Importer class must be * defined that is responsible for acquiring and vectorising these data. */ - // TODO Define a "standard" interface for data import: Receives as input a - // text string corresponding to a file, and writes the result to a - // vector / block vector - // If this could be defined using a base class, it would remove the templating here... - // The same class would also be used in the cmd/ files to do the initial measurement matrix fill class GLMTTestVariable : public GLMTestBase { NOMEMALIGN public: GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts); /*! Compute the t-statistics * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) - * @param stats the vector containing the output t-statistics + * @param output the vector containing the output t-statistics * - * TODO In GLMTTestVariable, this function will additionally need to import the + * In GLMTTestVariable, this function additionally needs to import the * extra external data individually for each element tested. */ - void operator() (const vector& perm_labelling, vector_type& stats) const override; + void operator() (const vector& perm_labelling, matrix_type& output) const override; - // TODO A function to acquire the design matrix for the default permutation + // A function to acquire the design matrix for the default permutation // (note that this needs to be re-run for each element being tested) matrix_type default_design (const matrix_type& design, const size_t index) const; protected: const vector& importers; + + //! generic GLM t-test + /*! note that the data, effects, and residual matrices are transposed. + * This is to take advantage of Eigen's convention of storing + * matrices in column-major format by default. + * + * This version does not require, or take advantage of, pre-calculation + * of the pseudo-inverse of the design matrix, or pre-scaling of contrasts. + */ + void ttest (matrix_type& tvalues, + const matrix_type& design, + const matrix_type& measurements, + matrix_type& betas, + matrix_type& residuals) const; }; @@ -265,9 +224,9 @@ namespace MR /*! Compute the F-statistics * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) - * @param stats the vector containing the output f-statistics + * @param output the vector containing the output f-statistics */ - void operator() (const vector& perm_labelling, vector_type& stats) const override; + void operator() (const vector& perm_labelling, matrix_type& output) const override; protected: // TODO How to deal with contrast scaling? diff --git a/core/math/stats/permutation.cpp b/core/math/stats/permutation.cpp index 02a6719847..3213e9f58b 100644 --- a/core/math/stats/permutation.cpp +++ b/core/math/stats/permutation.cpp @@ -74,26 +74,28 @@ namespace MR - void statistic2pvalue (const vector_type& perm_dist, const vector_type& stats, vector_type& pvalues) + void statistic2pvalue (const matrix_type& null_dist, const matrix_type& stats, matrix_type& pvalues) { - vector permutations; - permutations.reserve (perm_dist.size()); - for (ssize_t i = 0; i != perm_dist.size(); ++i) - permutations.push_back (perm_dist[i]); - std::sort (permutations.begin(), permutations.end()); - pvalues.resize (stats.size()); - for (size_t i = 0; i < size_t(stats.size()); ++i) { - if (stats[i] > 0.0) { - value_type pvalue = 1.0; - for (size_t j = 0; j < size_t(permutations.size()); ++j) { - if (stats[i] < permutations[j]) { - pvalue = value_type(j) / value_type(permutations.size()); - break; + pvalues.resize (stats.rows(), stats.cols()); + for (size_t row = 0; row != stats.rows(); ++row) { + vector sorted_null_dist; + sorted_null_dist.reserve (null_dist.cols()); + for (size_t i = 0; i != null_dist.size(); ++i) + sorted_null_dist.push_back (null_dist(row, i)); + std::sort (sorted_null_dist.begin(), sorted_null_dist.end()); + for (size_t i = 0; i != size_t(stats.cols()); ++i) { + if (stats(row, i) > 0.0) { + value_type pvalue = 1.0; + for (size_t j = 0; j < size_t(sorted_null_dist.size()); ++j) { + if (stats(row, i) < sorted_null_dist[j]) { + pvalue = value_type(j) / value_type(sorted_null_dist.size()); + break; + } } + pvalues(row, i) = pvalue; + } else { + pvalues(row, i) = 0.0; } - pvalues[i] = pvalue; - } else { - pvalues[i] = 0.0; } } } diff --git a/core/math/stats/permutation.h b/core/math/stats/permutation.h index a25ce7f4fc..2e9610b996 100644 --- a/core/math/stats/permutation.h +++ b/core/math/stats/permutation.h @@ -32,6 +32,7 @@ namespace MR typedef Math::Stats::value_type value_type; typedef Math::Stats::vector_type vector_type; + typedef Math::Stats::matrix_type matrix_type; @@ -46,7 +47,7 @@ namespace MR vector >& permutations, const bool include_default); - void statistic2pvalue (const vector_type& perm_dist, const vector_type& stats, vector_type& pvalues); + void statistic2pvalue (const matrix_type& null_dist, const matrix_type& stats, matrix_type& pvalues); vector > load_permutations_file (const std::string& filename); diff --git a/core/types.h b/core/types.h index 8866b6ea1d..f4133966c5 100644 --- a/core/types.h +++ b/core/types.h @@ -246,9 +246,11 @@ namespace std { // these are not defined in the standard, but are needed // for use in generic templates: - inline uint8_t abs (uint8_t x) { return x; } - inline uint16_t abs (uint16_t x) { return x; } - inline uint32_t abs (uint32_t x) { return x; } +#ifndef _GLIBCXX_BITS_STD_ABS_H + FORCE_INLINE uint8_t abs (uint8_t x) { return x; } + FORCE_INLINE uint16_t abs (uint16_t x) { return x; } + FORCE_INLINE uint32_t abs (uint32_t x) { return x; } +#endif template inline ostream& operator<< (ostream& stream, const vector& V) diff --git a/src/connectome/enhance.cpp b/src/connectome/enhance.cpp index 741990d148..12dd7a052f 100644 --- a/src/connectome/enhance.cpp +++ b/src/connectome/enhance.cpp @@ -26,18 +26,16 @@ namespace MR { - value_type PassThrough::operator() (const vector_type& in, vector_type& out) const + void PassThrough::operator() (in_column_type in, out_column_type out) const { out = in; - return out.maxCoeff(); } - value_type NBS::operator() (const vector_type& in, const value_type T, vector_type& out) const + void NBS::operator() (in_column_type in, const value_type T, out_column_type out) const { out = vector_type::Zero (in.size()); - value_type max_value = value_type(0); for (ssize_t seed = 0; seed != in.size(); ++seed) { if (std::isfinite (in[seed]) && in[seed] >= T && !out[seed]) { @@ -62,14 +60,11 @@ namespace MR { } - max_value = std::max (max_value, value_type(cluster_size)); for (ssize_t i = 0; i != in.size(); ++i) out[i] += (visited[i] ? 1.0 : 0.0) * cluster_size; } } - - return max_value; } diff --git a/src/connectome/enhance.h b/src/connectome/enhance.h index 67e1756086..fb75935ec3 100644 --- a/src/connectome/enhance.h +++ b/src/connectome/enhance.h @@ -35,6 +35,7 @@ namespace MR { typedef Math::Stats::value_type value_type; typedef Math::Stats::vector_type vector_type; + typedef Math::Stats::matrix_type matrix_type; @@ -46,7 +47,7 @@ namespace MR { ~PassThrough() { } private: - value_type operator() (const vector_type&, vector_type&) const override; + void operator() (in_column_type, out_column_type) const override; }; @@ -64,11 +65,11 @@ namespace MR { void set_threshold (const value_type t) { threshold = t; } - value_type operator() (const vector_type& in, vector_type& out) const override { - return (*this) (in, threshold, out); + void operator() (in_column_type in, out_column_type out) const override { + (*this) (in, threshold, out); } - value_type operator() (const vector_type&, const value_type, vector_type&) const override; + void operator() (in_column_type, const value_type, out_column_type) const override; protected: std::shared_ptr< vector< vector > > adjacency; diff --git a/src/dwi/tractography/mapping/writer.h b/src/dwi/tractography/mapping/writer.h index 13ddf28fc3..e94ba69f2b 100644 --- a/src/dwi/tractography/mapping/writer.h +++ b/src/dwi/tractography/mapping/writer.h @@ -254,6 +254,10 @@ namespace MR { template void receive_dixel (const Cont&); template void receive_tod (const Cont&); + // Partially specialized template function to shut up modern compilers + // regarding using multiplication in a boolean context + FORCE_INLINE void add (const default_type, const default_type); + // These acquire the TWI factor at any point along the streamline; // For the standard SetVoxel classes, this is a single value 'factor' for the set as // stored in SetVoxelExtras @@ -294,11 +298,11 @@ namespace MR { const default_type factor = get_factor (i, in); const default_type weight = in.weight * i.get_length(); switch (voxel_statistic) { - case V_SUM: buffer.value() += weight * factor; break; + case V_SUM: add (weight, factor); break; case V_MIN: buffer.value() = std::min (default_type (buffer.value()), factor); break; case V_MAX: buffer.value() = std::max (default_type (buffer.value()), factor); break; case V_MEAN: - buffer.value() += weight * factor; + add (weight, factor); assert (counts); assign_pos_of (i).to (*counts); counts->value() += weight; @@ -363,11 +367,11 @@ namespace MR { const default_type factor = get_factor (i, in); const default_type weight = in.weight * i.get_length(); switch (voxel_statistic) { - case V_SUM: buffer.value() += weight * factor; break; + case V_SUM: add (weight, factor); break; case V_MIN: buffer.value() = std::min (default_type (buffer.value()), factor); break; case V_MAX: buffer.value() = std::max (default_type (buffer.value()), factor); break; case V_MEAN: - buffer.value() += weight * factor; + add (weight, factor); assert (counts); assign_pos_of (i, 0, 3).to (*counts); counts->index(3) = i.get_dir(); @@ -435,6 +439,22 @@ namespace MR { + template <> + void MapWriter::add (const default_type weight, const default_type factor) + { + if (weight && factor) + buffer.value() = true; + } + + template + void MapWriter::add (const default_type weight, const default_type factor) + { + buffer.value() += weight * factor; + } + + + + template Eigen::Vector3 MapWriter::get_dec () @@ -442,8 +462,8 @@ namespace MR { assert (type == DEC); Eigen::Vector3 value; buffer.index(3) = 0; value[0] = buffer.value(); - ++buffer.index(3); value[1] = buffer.value(); - ++buffer.index(3); value[2] = buffer.value(); + buffer.index(3)++; value[1] = buffer.value(); + buffer.index(3)++; value[2] = buffer.value(); return value; } @@ -452,8 +472,8 @@ namespace MR { { assert (type == DEC); buffer.index(3) = 0; buffer.value() = value[0]; - ++buffer.index(3); buffer.value() = value[1]; - ++buffer.index(3); buffer.value() = value[2]; + buffer.index(3)++; buffer.value() = value[1]; + buffer.index(3)++; buffer.value() = value[2]; } diff --git a/src/stats/cfe.cpp b/src/stats/cfe.cpp index fcd6b2b146..7750257cd1 100644 --- a/src/stats/cfe.cpp +++ b/src/stats/cfe.cpp @@ -97,10 +97,9 @@ namespace MR - value_type Enhancer::operator() (const vector_type& stats, vector_type& enhanced_stats) const + void Enhancer::operator() (in_column_type stats, out_column_type enhanced_stats) const { - enhanced_stats = vector_type::Zero (stats.size()); - value_type max_enhanced_stat = 0.0; + enhanced_stats.setZero(); for (size_t fixel = 0; fixel < connectivity_map.size(); ++fixel) { std::map::const_iterator connected_fixel; for (value_type h = this->dh; h < stats[fixel]; h += this->dh) { @@ -110,11 +109,7 @@ namespace MR extent += connected_fixel->second.value; enhanced_stats[fixel] += std::pow (extent, E) * std::pow (h, H); } - if (enhanced_stats[fixel] > max_enhanced_stat) - max_enhanced_stat = enhanced_stats[fixel]; } - - return max_enhanced_stat; } diff --git a/src/stats/cfe.h b/src/stats/cfe.h index 242a407b72..8aa799e698 100644 --- a/src/stats/cfe.h +++ b/src/stats/cfe.h @@ -32,6 +32,7 @@ namespace MR typedef Math::Stats::value_type value_type; typedef Math::Stats::vector_type vector_type; + typedef Math::Stats::matrix_type matrix_type; typedef float connectivity_value_type; typedef Eigen::Matrix direction_type; typedef Eigen::Array connectivity_vector_type; @@ -82,13 +83,11 @@ namespace MR Enhancer (const vector >& connectivity_map, const value_type dh, const value_type E, const value_type H); - - value_type operator() (const vector_type& stats, vector_type& enhanced_stats) const override; - - protected: const vector >& connectivity_map; const value_type dh, E, H; + + void operator() (in_column_type, out_column_type) const override; }; diff --git a/src/stats/cluster.cpp b/src/stats/cluster.cpp index ecdca0629c..eb386e8bdf 100644 --- a/src/stats/cluster.cpp +++ b/src/stats/cluster.cpp @@ -25,16 +25,14 @@ namespace MR - value_type ClusterSize::operator() (const vector_type& stats, const value_type T, vector_type& get_cluster_sizes) const + void ClusterSize::operator() (in_column_type input, const value_type T, out_column_type output) const { vector clusters; - vector labels (stats.size(), 0); - connector.run (clusters, labels, stats, T); - get_cluster_sizes.resize (stats.size()); - for (size_t i = 0; i < size_t(stats.size()); ++i) - get_cluster_sizes[i] = labels[i] ? clusters[labels[i]-1].size : 0.0; - - return clusters.size() ? std::max_element (clusters.begin(), clusters.end())->size : 0.0; + vector labels (input.size(), 0); + connector.run (clusters, labels, input, T); + output.resize (input.size()); + for (size_t i = 0; i < size_t(input.size()); ++i) + output[i] = labels[i] ? clusters[labels[i]-1].size : 0.0; } diff --git a/src/stats/cluster.h b/src/stats/cluster.h index 69d91c7cf6..42982bf0a5 100644 --- a/src/stats/cluster.h +++ b/src/stats/cluster.h @@ -37,24 +37,23 @@ namespace MR /** \addtogroup Statistics @{ */ - class ClusterSize : public Stats::TFCE::EnhancerBase { MEMALIGN (ClusterSize) + class ClusterSize : public Stats::TFCE::EnhancerBase + { MEMALIGN (ClusterSize) public: ClusterSize (const Filter::Connector& connector, const value_type T) : connector (connector), threshold (T) { } void set_threshold (const value_type T) { threshold = T; } - - value_type operator() (const vector_type& in, vector_type& out) const override { - return (*this) (in, threshold, out); - } - - value_type operator() (const vector_type&, const value_type, vector_type&) const override; - - protected: const Filter::Connector& connector; value_type threshold; + + void operator() (in_column_type in, out_column_type out) const override { + (*this) (in, threshold, out); + } + + void operator() (in_column_type, const value_type, out_column_type) const override; }; //! @} diff --git a/src/stats/enhance.h b/src/stats/enhance.h index 80482190d3..2df51abfb7 100644 --- a/src/stats/enhance.h +++ b/src/stats/enhance.h @@ -30,8 +30,20 @@ namespace MR { NOMEMALIGN public: - // Return value is the maximal enhanced statistic - virtual Math::Stats::value_type operator() (const Math::Stats::vector_type& /*input_statistics*/, Math::Stats::vector_type& /*enhanced_statistics*/) const = 0; + // Perform statistical enhancement once for each column in the matrix + // (correspond to different contrasts) + void operator() (const Math::Stats::matrix_type& input_statistics, + Math::Stats::matrix_type& enhanced_statistics) const + { + for (ssize_t col = 0; col != input_statistics.cols(); ++col) + (*this) (input_statistics.col (col), enhanced_statistics.col (col)); + } + + protected: + typedef Math::Stats::matrix_type::ConstColXpr in_column_type; + typedef Math::Stats::matrix_type::ColXpr out_column_type; + // Derived classes should override this function + virtual void operator() (in_column_type, out_column_type) const = 0; }; diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index a62ce6ff2c..c098dbfb70 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -58,13 +58,17 @@ namespace MR PreProcessor::PreProcessor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - vector_type& global_enhanced_sum, - vector& global_enhanced_count) : + matrix_type& global_enhanced_sum, + vector>& global_enhanced_count) : stats_calculator (stats_calculator), - enhancer (enhancer), global_enhanced_sum (global_enhanced_sum), - global_enhanced_count (global_enhanced_count), enhanced_sum (vector_type::Zero (global_enhanced_sum.size())), - enhanced_count (global_enhanced_sum.size(), 0.0), stats (global_enhanced_sum.size()), - enhanced_stats (global_enhanced_sum.size()), mutex (new std::mutex()) + enhancer (enhancer), + global_enhanced_sum (global_enhanced_sum), + global_enhanced_count (global_enhanced_count), + enhanced_sum (vector_type::Zero (global_enhanced_sum.size())), + enhanced_count (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)), + stats (global_enhanced_sum.rows(), global_enhanced_sum.cols()), + enhanced_stats (global_enhanced_sum.rows(), global_enhanced_sum.cols()), + mutex (new std::mutex()) { assert (stats_calculator); assert (enhancer); @@ -75,9 +79,10 @@ namespace MR PreProcessor::~PreProcessor () { std::lock_guard lock (*mutex); - for (ssize_t i = 0; i < global_enhanced_sum.size(); ++i) { - global_enhanced_sum[i] += enhanced_sum[i]; - global_enhanced_count[i] += enhanced_count[i]; + global_enhanced_sum.array() += enhanced_sum.array(); + for (ssize_t row = 0; row != global_enhanced_sum.rows(); ++row) { + for (ssize_t col = 0; col != global_enhanced_sum.cols(); ++col) + global_enhanced_count[row][col] += enhanced_count[row][col]; } } @@ -87,10 +92,12 @@ namespace MR { (*stats_calculator) (permutation.data, stats); (*enhancer) (stats, enhanced_stats); - for (ssize_t i = 0; i < enhanced_stats.size(); ++i) { - if (enhanced_stats[i] > 0.0) { - enhanced_sum[i] += enhanced_stats[i]; - enhanced_count[i]++; + for (ssize_t c = 0; c != enhanced_stats.rows(); ++c) { + for (ssize_t i = 0; i < enhanced_stats.cols(); ++i) { + if (enhanced_stats(c, i) > 0.0) { + enhanced_sum(c, i) += enhanced_stats(c, i); + enhanced_count[c][i]++; + } } } return true; @@ -104,26 +111,22 @@ namespace MR Processor::Processor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistics, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector& global_uncorrected_pvalue_counter, - std::shared_ptr< vector > global_uncorrected_pvalue_counter_neg) : + const matrix_type& empirical_enhanced_statistics, + const matrix_type& default_enhanced_statistics, + matrix_type& perm_dist, + vector>& global_uncorrected_pvalue_counter) : stats_calculator (stats_calculator), - enhancer (enhancer), empirical_enhanced_statistics (empirical_enhanced_statistics), - default_enhanced_statistics (default_enhanced_statistics), default_enhanced_statistics_neg (default_enhanced_statistics_neg), - statistics (stats_calculator->num_elements()), enhanced_statistics (stats_calculator->num_elements()), - uncorrected_pvalue_counter (stats_calculator->num_elements(), 0), - perm_dist_pos (perm_dist_pos), perm_dist_neg (perm_dist_neg), + enhancer (enhancer), + empirical_enhanced_statistics (empirical_enhanced_statistics), + default_enhanced_statistics (default_enhanced_statistics), + statistics (stats_calculator->num_outputs(), stats_calculator->num_elements()), + enhanced_statistics (stats_calculator->num_outputs(), stats_calculator->num_elements()), + uncorrected_pvalue_counter (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)), + perm_dist (perm_dist), global_uncorrected_pvalue_counter (global_uncorrected_pvalue_counter), - global_uncorrected_pvalue_counter_neg (global_uncorrected_pvalue_counter_neg), mutex (new std::mutex()) { assert (stats_calculator); - if (global_uncorrected_pvalue_counter_neg) - uncorrected_pvalue_counter_neg.reset (new vector(stats_calculator->num_elements(), 0)); } @@ -131,10 +134,9 @@ namespace MR Processor::~Processor () { std::lock_guard lock (*mutex); - for (size_t i = 0; i < stats_calculator->num_elements(); ++i) { - global_uncorrected_pvalue_counter[i] += uncorrected_pvalue_counter[i]; - if (global_uncorrected_pvalue_counter_neg) - (*global_uncorrected_pvalue_counter_neg)[i] = (*uncorrected_pvalue_counter_neg)[i]; + for (size_t row = 0; row != stats_calculator->num_outputs(); ++row) { + for (size_t i = 0; i < stats_calculator->num_elements(); ++i) + global_uncorrected_pvalue_counter[row][i] += uncorrected_pvalue_counter[row][i]; } } @@ -143,45 +145,23 @@ namespace MR bool Processor::operator() (const Permutation& permutation) { (*stats_calculator) (permutation.data, statistics); - if (enhancer) { - perm_dist_pos[permutation.index] = (*enhancer) (statistics, enhanced_statistics); - } else { + if (enhancer) + (*enhancer) (statistics, enhanced_statistics); + else enhanced_statistics = statistics; - perm_dist_pos[permutation.index] = enhanced_statistics.maxCoeff(); - } - if (empirical_enhanced_statistics.size()) { - perm_dist_pos[permutation.index] = 0.0; - for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { - enhanced_statistics[i] /= empirical_enhanced_statistics[i]; - perm_dist_pos[permutation.index] = std::max(perm_dist_pos[permutation.index], enhanced_statistics[i]); - } - } + if (empirical_enhanced_statistics.size()) + enhanced_statistics.array() /= empirical_enhanced_statistics.array(); - for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { - if (default_enhanced_statistics[i] > enhanced_statistics[i]) - uncorrected_pvalue_counter[i]++; - } - - // Compute the opposite contrast - if (perm_dist_neg) { - statistics = -statistics; - - (*perm_dist_neg)[permutation.index] = (*enhancer) (statistics, enhanced_statistics); - - if (empirical_enhanced_statistics.size()) { - (*perm_dist_neg)[permutation.index] = 0.0; - for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { - enhanced_statistics[i] /= empirical_enhanced_statistics[i]; - (*perm_dist_neg)[permutation.index] = std::max ((*perm_dist_neg)[permutation.index], enhanced_statistics[i]); - } - } + perm_dist.col(permutation.index) = enhanced_statistics.rowwise().maxCoeff(); - for (ssize_t i = 0; i < enhanced_statistics.size(); ++i) { - if ((*default_enhanced_statistics_neg)[i] > enhanced_statistics[i]) - (*uncorrected_pvalue_counter_neg)[i]++; + for (ssize_t row = 0; row != enhanced_statistics.rows(); ++row) { + for (ssize_t i = 0; i != enhanced_statistics.cols(); ++i) { + if (default_enhanced_statistics(row, i) > enhanced_statistics(row, i)) + uncorrected_pvalue_counter[row][i]++; } } + return true; } @@ -193,16 +173,18 @@ namespace MR void precompute_empirical_stat (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - PermutationStack& perm_stack, vector_type& empirical_statistic) + PermutationStack& perm_stack, matrix_type& empirical_statistic) { - vector global_enhanced_count (empirical_statistic.size(), 0); + vector> global_enhanced_count (empirical_statistic.rows(), vector (empirical_statistic.cols(), 0)); { PreProcessor preprocessor (stats_calculator, enhancer, empirical_statistic, global_enhanced_count); Thread::run_queue (perm_stack, Permutation(), Thread::multi (preprocessor)); } - for (ssize_t i = 0; i < empirical_statistic.size(); ++i) { - if (global_enhanced_count[i] > 0) - empirical_statistic[i] /= static_cast (global_enhanced_count[i]); + for (ssize_t row = 0; row != empirical_statistic.rows(); ++row) { + for (ssize_t i = 0; i != empirical_statistic.cols(); ++i) { + if (global_enhanced_count[row][i] > 0.0) + empirical_statistic(row, i) /= static_cast (global_enhanced_count[row][i]); + } } } @@ -211,32 +193,23 @@ namespace MR void precompute_default_permutation (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - vector_type& default_enhanced_statistics, - std::shared_ptr default_enhanced_statistics_neg, - vector_type& default_statistics) + const matrix_type& empirical_enhanced_statistic, + matrix_type& default_enhanced_statistics, + matrix_type& default_statistics) { vector default_labelling (stats_calculator->num_subjects()); for (size_t i = 0; i < default_labelling.size(); ++i) default_labelling[i] = i; - (*stats_calculator) (default_labelling, default_statistics); - (*enhancer) (default_statistics, default_enhanced_statistics); - - if (empirical_enhanced_statistic.size()) - default_enhanced_statistics /= empirical_enhanced_statistic; - // Compute the opposite contrast - if (default_enhanced_statistics_neg) { - default_statistics = -default_statistics; - - (*enhancer) (default_statistics, *default_enhanced_statistics_neg); + (*stats_calculator) (default_labelling, default_statistics); - if (empirical_enhanced_statistic.size()) - (*default_enhanced_statistics_neg) /= empirical_enhanced_statistic; + if (enhancer) + (*enhancer) (default_statistics, default_enhanced_statistics); + else + default_enhanced_statistics = default_statistics; - // revert default_statistics to positive contrast for output - default_statistics = -default_statistics; - } + if (empirical_enhanced_statistic.size()) + default_enhanced_statistics.array() /= empirical_enhanced_statistic.array(); } @@ -245,32 +218,24 @@ namespace MR void run_permutations (PermutationStack& perm_stack, const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector_type& uncorrected_pvalues, - std::shared_ptr uncorrected_pvalues_neg) + const matrix_type& empirical_enhanced_statistic, + const matrix_type& default_enhanced_statistics, + matrix_type& perm_dist, + matrix_type& uncorrected_pvalues) { - vector global_uncorrected_pvalue_count (stats_calculator->num_elements(), 0); - std::shared_ptr< vector > global_uncorrected_pvalue_count_neg; - if (perm_dist_neg) - global_uncorrected_pvalue_count_neg.reset (new vector (stats_calculator->num_elements(), 0)); - + vector> global_uncorrected_pvalue_count (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)); { Processor processor (stats_calculator, enhancer, empirical_enhanced_statistic, - default_enhanced_statistics, default_enhanced_statistics_neg, - perm_dist_pos, perm_dist_neg, - global_uncorrected_pvalue_count, global_uncorrected_pvalue_count_neg); + default_enhanced_statistics, + perm_dist, + global_uncorrected_pvalue_count); Thread::run_queue (perm_stack, Permutation(), Thread::multi (processor)); } - for (size_t i = 0; i < stats_calculator->num_elements(); ++i) { - uncorrected_pvalues[i] = global_uncorrected_pvalue_count[i] / default_type(perm_stack.num_permutations); - if (perm_dist_neg) - (*uncorrected_pvalues_neg)[i] = (*global_uncorrected_pvalue_count_neg)[i] / default_type(perm_stack.num_permutations); + for (size_t row = 0; row != stats_calculator->num_outputs(); ++row) { + for (size_t i = 0; i < stats_calculator->num_elements(); ++i) + uncorrected_pvalues(row, i) = global_uncorrected_pvalue_count[row][i] / default_type(perm_stack.num_permutations); } } @@ -280,18 +245,15 @@ namespace MR void run_permutations (const vector>& permutations, const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector_type& uncorrected_pvalues, - std::shared_ptr uncorrected_pvalues_neg) + const matrix_type& empirical_enhanced_statistic, + const matrix_type& default_enhanced_statistics, + matrix_type& perm_dist, + matrix_type& uncorrected_pvalues) { PermutationStack perm_stack (permutations, "running " + str(permutations.size()) + " permutations"); - run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, default_enhanced_statistics, default_enhanced_statistics_neg, - perm_dist_pos, perm_dist_neg, uncorrected_pvalues, uncorrected_pvalues_neg); + run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, + default_enhanced_statistics, perm_dist, uncorrected_pvalues); } @@ -300,18 +262,15 @@ namespace MR void run_permutations (const size_t num_permutations, const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector_type& uncorrected_pvalues, - std::shared_ptr uncorrected_pvalues_neg) + const matrix_type& empirical_enhanced_statistic, + const matrix_type& default_enhanced_statistics, + matrix_type& perm_dist, + matrix_type& uncorrected_pvalues) { PermutationStack perm_stack (num_permutations, stats_calculator->num_subjects(), "running " + str(num_permutations) + " permutations"); - run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, default_enhanced_statistics, default_enhanced_statistics_neg, - perm_dist_pos, perm_dist_neg, uncorrected_pvalues, uncorrected_pvalues_neg); + run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, + default_enhanced_statistics, perm_dist, uncorrected_pvalues); } diff --git a/src/stats/permtest.h b/src/stats/permtest.h index 0613a62e50..9ce61b84f2 100644 --- a/src/stats/permtest.h +++ b/src/stats/permtest.h @@ -46,6 +46,7 @@ namespace MR typedef Math::Stats::value_type value_type; typedef Math::Stats::vector_type vector_type; + typedef Math::Stats::matrix_type matrix_type; @@ -57,8 +58,8 @@ namespace MR public: PreProcessor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - vector_type& global_enhanced_sum, - vector& global_enhanced_count); + matrix_type& global_enhanced_sum, + vector>& global_enhanced_count); ~PreProcessor(); @@ -67,12 +68,12 @@ namespace MR protected: std::shared_ptr stats_calculator; std::shared_ptr enhancer; - vector_type& global_enhanced_sum; - vector& global_enhanced_count; - vector_type enhanced_sum; - vector enhanced_count; - vector_type stats; - vector_type enhanced_stats; + matrix_type& global_enhanced_sum; + vector>& global_enhanced_count; + matrix_type enhanced_sum; + vector> enhanced_count; + matrix_type stats; + matrix_type enhanced_stats; std::shared_ptr mutex; }; @@ -84,13 +85,10 @@ namespace MR public: Processor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistics, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector& global_uncorrected_pvalue_counter, - std::shared_ptr< vector > global_uncorrected_pvalue_counter_neg); + const matrix_type& empirical_enhanced_statistics, + const matrix_type& default_enhanced_statistics, + matrix_type& perm_dist, + vector>& global_uncorrected_pvalue_counter); ~Processor(); @@ -99,18 +97,13 @@ namespace MR protected: std::shared_ptr stats_calculator; std::shared_ptr enhancer; - const vector_type& empirical_enhanced_statistics; - const vector_type& default_enhanced_statistics; - const std::shared_ptr default_enhanced_statistics_neg; - vector_type statistics; - vector_type enhanced_statistics; - vector uncorrected_pvalue_counter; - std::shared_ptr > uncorrected_pvalue_counter_neg; - vector_type& perm_dist_pos; - std::shared_ptr perm_dist_neg; - - vector& global_uncorrected_pvalue_counter; - std::shared_ptr > global_uncorrected_pvalue_counter_neg; + const matrix_type& empirical_enhanced_statistics; + const matrix_type& default_enhanced_statistics; + matrix_type statistics; + matrix_type enhanced_statistics; + vector> uncorrected_pvalue_counter; + matrix_type& perm_dist; + vector>& global_uncorrected_pvalue_counter; std::shared_ptr mutex; }; @@ -120,7 +113,7 @@ namespace MR // Precompute the empircal test statistic for non-stationarity adjustment void precompute_empirical_stat (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - PermutationStack& perm_stack, vector_type& empirical_statistic); + PermutationStack& perm_stack, matrix_type& empirical_statistic); @@ -128,10 +121,9 @@ namespace MR // Precompute the default statistic image and enhanced statistic. We need to precompute this for calculating the uncorrected p-values. void precompute_default_permutation (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - vector_type& default_enhanced_statistics, - std::shared_ptr default_enhanced_statistics_neg, - vector_type& default_statistics); + const matrix_type& empirical_enhanced_statistic, + matrix_type& default_enhanced_statistics, + matrix_type& default_statistics); @@ -143,37 +135,28 @@ namespace MR void run_permutations (PermutationStack& perm_stack, const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector_type& uncorrected_pvalues, - std::shared_ptr uncorrected_pvalues_neg); + const matrix_type& empirical_enhanced_statistic, + const matrix_type& default_enhanced_statistics, + matrix_type& perm_dist, + matrix_type& uncorrected_pvalues); void run_permutations (const vector>& permutations, const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector_type& uncorrected_pvalues, - std::shared_ptr uncorrected_pvalues_neg); + const matrix_type& empirical_enhanced_statistic, + const matrix_type& default_enhanced_statistics, + matrix_type& perm_dist, + matrix_type& uncorrected_pvalues); void run_permutations (const size_t num_permutations, const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - const vector_type& empirical_enhanced_statistic, - const vector_type& default_enhanced_statistics, - const std::shared_ptr default_enhanced_statistics_neg, - vector_type& perm_dist_pos, - std::shared_ptr perm_dist_neg, - vector_type& uncorrected_pvalues, - std::shared_ptr uncorrected_pvalues_neg); + const matrix_type& empirical_enhanced_statistic, + const matrix_type& default_enhanced_statistics, + matrix_type& perm_dist, + matrix_type& uncorrected_pvalues); //! @} diff --git a/src/stats/tfce.cpp b/src/stats/tfce.cpp index 0dc3768b93..7b17076f37 100644 --- a/src/stats/tfce.cpp +++ b/src/stats/tfce.cpp @@ -41,20 +41,17 @@ namespace MR - value_type Wrapper::operator() (const vector_type& in, vector_type& out) const + void Wrapper::operator() (in_column_type in, out_column_type out) const { - out = vector_type::Zero (in.size()); + out.setZero(); const value_type max_input_value = in.maxCoeff(); for (value_type h = dH; (h-dH) < max_input_value; h += dH) { - vector_type temp; - const value_type max = (*enhancer) (in, h, temp); - if (max) { - const value_type h_multiplier = std::pow (h, H); - for (size_t index = 0; index != size_t(in.size()); ++index) - out[index] += (std::pow (temp[index], E) * h_multiplier); - } + matrix_type temp (in.size(), 1); + (*enhancer) (in, h, temp.col(0)); + const value_type h_multiplier = std::pow (h, H); + for (size_t index = 0; index != size_t(in.size()); ++index) + out[index] += (std::pow (temp(index,0), E) * h_multiplier); } - return out.maxCoeff(); } diff --git a/src/stats/tfce.h b/src/stats/tfce.h index 55517ba75a..ab76da528a 100644 --- a/src/stats/tfce.h +++ b/src/stats/tfce.h @@ -38,16 +38,17 @@ namespace MR typedef Math::Stats::value_type value_type; typedef Math::Stats::vector_type vector_type; + typedef Math::Stats::matrix_type matrix_type; class EnhancerBase : public Stats::EnhancerBase { MEMALIGN (EnhancerBase) - public: + protected: // Alternative functor that also takes the threshold value; // makes TFCE integration cleaner - virtual value_type operator() (const vector_type& /*input_statistics*/, const value_type /*threshold*/, vector_type& /*enhanced_statistics*/) const = 0; - + virtual void operator() (in_column_type /*input_statistics*/, const value_type /*threshold*/, out_column_type /*enhanced_statistics*/) const = 0; + friend class Wrapper; }; @@ -68,11 +69,11 @@ namespace MR H = height; } - value_type operator() (const vector_type&, vector_type&) const override; - private: std::shared_ptr enhancer; value_type dH, E, H; + + void operator() (in_column_type, out_column_type) const override; }; diff --git a/testing/data b/testing/data index add3412305..2530a7f752 160000 --- a/testing/data +++ b/testing/data @@ -1 +1 @@ -Subproject commit add34123053ec1ab5fb207f5225204c3eadf4e84 +Subproject commit 2530a7f7525ff6ad05b0c7618978c8f3c46685e7 From eec7e18c8ada9717de4695d073b4aac98479b776 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 12 Jul 2017 20:17:50 +1000 Subject: [PATCH 0055/1471] vectorstats: Revert outputting to matrix files --- cmd/vectorstats.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 401e4c3fd9..5b3d1eb0f1 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -87,7 +87,7 @@ void run() size_t num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); // Load design matrix - const matrix_type design = load_matrix (argument[2]); + const matrix_type design = load_matrix (argument[1]); if (size_t(design.rows()) != filenames.size()) throw Exception ("number of subjects does not match number of rows in design matrix"); @@ -102,12 +102,12 @@ void run() } // Load contrast matrix - const matrix_type contrast = load_matrix (argument[3]); + const matrix_type contrast = load_matrix (argument[2]); const size_t num_contrasts = contrast.rows(); if (contrast.cols() != design.cols()) throw Exception ("number of columns in contrast matrix (" + str(contrast.cols()) + ") does not match number of columns in design matrix (" + str(design.cols()) + ")"); - const std::string output_prefix = argument[4]; + const std::string output_prefix = argument[3]; // Load input data matrix_type data (num_elements, filenames.size()); @@ -120,7 +120,7 @@ void run() try { subject_data = load_vector (path); } catch (Exception& e) { - throw Exception (e, "Error loading vector data for subject #" + str(subject) + " (file \"" + path + "\""); + throw Exception (e, "Error loading vector data for subject #" + str(subject) + " (file \"" + path + "\")"); } if (size_t(subject_data.size()) != num_elements) @@ -134,16 +134,16 @@ void run() { const matrix_type betas = Math::Stats::GLM::solve_betas (data, design); - CONSOLE ("Beta coefficients: " + str(betas)); + save_matrix (betas, output_prefix + "betas.csv"); const matrix_type abs_effects = Math::Stats::GLM::abs_effect_size (data, design, contrast); - CONSOLE ("Absolute effects: " + str(abs_effects)); + save_matrix (abs_effects, output_prefix + "abs_effect.csv"); const matrix_type std_effects = Math::Stats::GLM::std_effect_size (data, design, contrast); - CONSOLE ("Standardised effects: " + str(std_effects)); + save_matrix (std_effects, output_prefix + "std_effect.csv"); const matrix_type stdevs = Math::Stats::GLM::stdev (data, design); - CONSOLE ("Standard deviations: " + str(stdevs)); + save_matrix (stdevs, output_prefix + "std_dev.csv"); } std::shared_ptr glm_ttest (new Math::Stats::GLMTTestFixed (data, design, contrast)); @@ -156,7 +156,7 @@ void run() default_permutation[i] = i; matrix_type default_tvalues; (*glm_ttest) (default_permutation, default_tvalues); - CONSOLE ("T-values for default statistic: " + str(default_tvalues)); + save_matrix (default_tvalues, output_prefix + "tvalue.csv"); // Perform permutation testing if (!get_options ("notest").size()) { @@ -175,8 +175,8 @@ void run() matrix_type default_pvalues (num_contrasts, num_elements); Math::Stats::Permutation::statistic2pvalue (null_distribution, default_tvalues, default_pvalues); - CONSOLE ("FWE-corrected p-values: " + str(default_pvalues)); - CONSOLE ("Uncorrected p-values: " + str(uncorrected_pvalues)); + save_matrix (default_pvalues, output_prefix + "fwe_pvalue.csv"); + save_matrix (uncorrected_pvalues, output_prefix + "uncorrected_pvalue.csv"); } } From 3a3af1d6f45633aa58342afaf97d2133472360a5 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 13 Jul 2017 18:12:56 +1000 Subject: [PATCH 0056/1471] vectorstats: First working version with multiple contrasts --- cmd/vectorstats.cpp | 17 +++++++---------- core/math/stats/glm.cpp | 6 +++++- core/math/stats/permutation.cpp | 18 +++++++++--------- src/stats/permtest.cpp | 28 +++++++++++++++------------- 4 files changed, 36 insertions(+), 33 deletions(-) diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 5b3d1eb0f1..80ebe33215 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -89,7 +89,7 @@ void run() // Load design matrix const matrix_type design = load_matrix (argument[1]); if (size_t(design.rows()) != filenames.size()) - throw Exception ("number of subjects does not match number of rows in design matrix"); + throw Exception ("Number of subjects (" + str(filenames.size()) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); // Load permutations file if supplied auto opt = get_options("permutations"); @@ -133,16 +133,11 @@ void run() } { - const matrix_type betas = Math::Stats::GLM::solve_betas (data, design); + matrix_type betas, abs_effects, std_effects, stdevs; + Math::Stats::GLM::all_stats (data, design, contrast, betas, abs_effects, std_effects, stdevs); save_matrix (betas, output_prefix + "betas.csv"); - - const matrix_type abs_effects = Math::Stats::GLM::abs_effect_size (data, design, contrast); save_matrix (abs_effects, output_prefix + "abs_effect.csv"); - - const matrix_type std_effects = Math::Stats::GLM::std_effect_size (data, design, contrast); save_matrix (std_effects, output_prefix + "std_effect.csv"); - - const matrix_type stdevs = Math::Stats::GLM::stdev (data, design); save_matrix (stdevs, output_prefix + "std_dev.csv"); } @@ -162,7 +157,8 @@ void run() if (!get_options ("notest").size()) { std::shared_ptr enhancer; - matrix_type null_distribution (num_perms, num_contrasts), uncorrected_pvalues (num_perms, num_contrasts); + matrix_type null_distribution (num_perms, num_contrasts); + matrix_type uncorrected_pvalues (num_elements, num_contrasts); matrix_type empirical_distribution; if (permutations.size()) { @@ -173,10 +169,11 @@ void run() default_tvalues, null_distribution, uncorrected_pvalues); } - matrix_type default_pvalues (num_contrasts, num_elements); + matrix_type default_pvalues (num_elements, num_contrasts); Math::Stats::Permutation::statistic2pvalue (null_distribution, default_tvalues, default_pvalues); save_matrix (default_pvalues, output_prefix + "fwe_pvalue.csv"); save_matrix (uncorrected_pvalues, output_prefix + "uncorrected_pvalue.csv"); } } + diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 32ca3128ca..118aa43b03 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -81,7 +81,11 @@ namespace MR //VAR (Math::rank (design)); stdev = (one_over_dof * residuals).array().sqrt(); //std::cerr << "stdev: " << stdev.rows() << " x " << stdev.cols() << ", max " << stdev.array().maxCoeff() << "\n"; - std_effect_size = abs_effect_size.array() / stdev.array(); + // TODO Should be a cleaner way of doing this (broadcasting?) + matrix_type stdev_fill (abs_effect_size.rows(), abs_effect_size.cols()); + for (size_t i = 0; i != stdev_fill.rows(); ++i) + stdev_fill.row(i) = stdev; + std_effect_size = abs_effect_size.array() / stdev_fill.array(); //std::cerr << "std_effect_size: " << std_effect_size.rows() << " x " << std_effect_size.cols() << ", max " << std_effect_size.array().maxCoeff() << "\n"; //TRACE; } diff --git a/core/math/stats/permutation.cpp b/core/math/stats/permutation.cpp index 3213e9f58b..cf2f9088df 100644 --- a/core/math/stats/permutation.cpp +++ b/core/math/stats/permutation.cpp @@ -77,24 +77,24 @@ namespace MR void statistic2pvalue (const matrix_type& null_dist, const matrix_type& stats, matrix_type& pvalues) { pvalues.resize (stats.rows(), stats.cols()); - for (size_t row = 0; row != stats.rows(); ++row) { + for (size_t contrast = 0; contrast != stats.cols(); ++contrast) { vector sorted_null_dist; - sorted_null_dist.reserve (null_dist.cols()); - for (size_t i = 0; i != null_dist.size(); ++i) - sorted_null_dist.push_back (null_dist(row, i)); + sorted_null_dist.reserve (null_dist.rows()); + for (size_t perm = 0; perm != null_dist.rows(); ++perm) + sorted_null_dist.push_back (null_dist(perm, contrast)); std::sort (sorted_null_dist.begin(), sorted_null_dist.end()); - for (size_t i = 0; i != size_t(stats.cols()); ++i) { - if (stats(row, i) > 0.0) { + for (size_t element = 0; element != size_t(stats.rows()); ++element) { + if (stats(element, contrast) > 0.0) { value_type pvalue = 1.0; for (size_t j = 0; j < size_t(sorted_null_dist.size()); ++j) { - if (stats(row, i) < sorted_null_dist[j]) { + if (stats(element, contrast) < sorted_null_dist[j]) { pvalue = value_type(j) / value_type(sorted_null_dist.size()); break; } } - pvalues(row, i) = pvalue; + pvalues(element, contrast) = pvalue; } else { - pvalues(row, i) = 0.0; + pvalues(element, contrast) = 0.0; } } } diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index e2894e87c4..4b6a3fe8ec 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -121,8 +121,10 @@ namespace MR enhancer (enhancer), empirical_enhanced_statistics (empirical_enhanced_statistics), default_enhanced_statistics (default_enhanced_statistics), - statistics (stats_calculator->num_outputs(), stats_calculator->num_elements()), - enhanced_statistics (stats_calculator->num_outputs(), stats_calculator->num_elements()), + statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), + enhanced_statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), + // NOTE: uncorrected_pvalue_counter currently transposed with respect to matrices + // TODO Consider changing to Eigen::Array uncorrected_pvalue_counter (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)), perm_dist (perm_dist), global_uncorrected_pvalue_counter (global_uncorrected_pvalue_counter), @@ -136,9 +138,9 @@ namespace MR Processor::~Processor () { std::lock_guard lock (*mutex); - for (size_t row = 0; row != stats_calculator->num_outputs(); ++row) { - for (size_t i = 0; i < stats_calculator->num_elements(); ++i) - global_uncorrected_pvalue_counter[row][i] += uncorrected_pvalue_counter[row][i]; + for (size_t contrast = 0; contrast != stats_calculator->num_outputs(); ++contrast) { + for (size_t element = 0; element != stats_calculator->num_elements(); ++element) + global_uncorrected_pvalue_counter[contrast][element] += uncorrected_pvalue_counter[contrast][element]; } } @@ -155,12 +157,12 @@ namespace MR if (empirical_enhanced_statistics.size()) enhanced_statistics.array() /= empirical_enhanced_statistics.array(); - perm_dist.col(permutation.index) = enhanced_statistics.rowwise().maxCoeff(); + perm_dist.row(permutation.index) = enhanced_statistics.colwise().maxCoeff(); - for (ssize_t row = 0; row != enhanced_statistics.rows(); ++row) { - for (ssize_t i = 0; i != enhanced_statistics.cols(); ++i) { - if (default_enhanced_statistics(row, i) > enhanced_statistics(row, i)) - uncorrected_pvalue_counter[row][i]++; + for (ssize_t contrast = 0; contrast != enhanced_statistics.cols(); ++contrast) { + for (ssize_t element = 0; element != enhanced_statistics.rows(); ++element) { + if (default_enhanced_statistics(element, contrast) > enhanced_statistics(element, contrast)) + uncorrected_pvalue_counter[contrast][element]++; } } @@ -235,9 +237,9 @@ namespace MR Thread::run_queue (perm_stack, Permutation(), Thread::multi (processor)); } - for (size_t row = 0; row != stats_calculator->num_outputs(); ++row) { - for (size_t i = 0; i < stats_calculator->num_elements(); ++i) - uncorrected_pvalues(row, i) = global_uncorrected_pvalue_count[row][i] / default_type(perm_stack.num_permutations); + for (size_t contrast = 0; contrast != stats_calculator->num_outputs(); ++contrast) { + for (size_t element = 0; element != stats_calculator->num_elements(); ++element) + uncorrected_pvalues(element, contrast) = global_uncorrected_pvalue_count[contrast][element] / default_type(perm_stack.num_permutations); } } From f73422e105affb3234d229946be55d8747fc7b87 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 24 Aug 2017 16:29:33 +1000 Subject: [PATCH 0057/1471] Minor updates to stats code - Provide a text string describing the fact that a column of ones is not automatically added to GLM design matrices, and add this string to the DESCRIPTION field of all statistical inference commands. Also fix up some code comments regarding the purpose of this column. - Provide a test for vectorstats; its output is not yet tested however. --- cmd/connectomestats.cpp | 5 ++- cmd/fixelcfestats.cpp | 5 ++- cmd/mrclusterstats.cpp | 5 ++- cmd/vectorstats.cpp | 14 ++++++-- core/math/stats/glm.cpp | 15 +++++++- core/math/stats/glm.h | 18 ++++++---- docs/reference/commands/connectomestats.rst | 9 ++++- docs/reference/commands/fixelcfestats.rst | 13 ++++--- docs/reference/commands/mrclusterstats.rst | 11 ++++-- docs/reference/commands/mrregister.rst | 2 +- docs/reference/commands/mrstats.rst | 2 -- docs/reference/commands/transformcalc.rst | 2 +- docs/reference/commands/vectorstats.rst | 11 ++++-- docs/reference/commands_list.rst | 2 +- src/dwi/tractography/mapping/writer.h | 26 +++++++------- testing/tests/vectorstats | 40 +++++++++++++++++++++ 16 files changed, 137 insertions(+), 43 deletions(-) create mode 100644 testing/tests/vectorstats diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 963d436773..705fcd16c9 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -54,6 +54,9 @@ void usage () SYNOPSIS = "Connectome group-wise statistics at the edge level using non-parametric permutation testing"; + DESCRIPTION + + Math::Stats::glm_column_ones_description; + ARGUMENTS + Argument ("input", "a text file listing the file names of the input connectomes").type_file_in () @@ -61,7 +64,7 @@ void usage () + Argument ("algorithm", "the algorithm to use in network-based clustering/enhancement. " "Options are: " + join(algorithms, ", ")).type_choice (algorithms) - + Argument ("design", "the design matrix. Note that a column of 1's will need to be added for correlations.").type_file_in () + + Argument ("design", "the design matrix").type_file_in () + Argument ("contrast", "the contrast vector, specified as a single row of weights").type_file_in () diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index b185d43dc0..4b525c812a 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -55,6 +55,9 @@ void usage () SYNOPSIS = "Fixel-based analysis using connectivity-based fixel enhancement and non-parametric permutation testing"; + DESCRIPTION + + Math::Stats::glm_column_ones_description; + REFERENCES + "Raffelt, D.; Smith, RE.; Ridgway, GR.; Tournier, JD.; Vaughan, DN.; Rose, S.; Henderson, R.; Connelly, A." // Internal "Connectivity-based fixel enhancement: Whole-brain statistical analysis of diffusion MRI measures in the presence of crossing fibres. \n" @@ -71,7 +74,7 @@ void usage () + Argument ("subjects", "a text file listing the subject identifiers (one per line). This should correspond with the filenames " "in the fixel directory (including the file extension), and be listed in the same order as the rows of the design matrix.").type_image_in () - + Argument ("design", "the design matrix. Note that a column of 1's will need to be added for correlations.").type_file_in () + + Argument ("design", "the design matrix").type_file_in () + Argument ("contrast", "the contrast matrix, specified as rows of weights").type_file_in () diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 4a305c8aa3..d47198257d 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -47,6 +47,9 @@ void usage () SYNOPSIS = "Voxel-based analysis using permutation testing and threshold-free cluster enhancement"; + DESCRIPTION + + Math::Stats::glm_column_ones_description; + REFERENCES + "* If not using the -threshold command-line option:\n" "Smith, S. M. & Nichols, T. E. " @@ -61,7 +64,7 @@ void usage () ARGUMENTS + Argument ("input", "a text file containing the file names of the input images, one file per line").type_file_in() - + Argument ("design", "the design matrix, rows should correspond with images in the input image text file").type_file_in() + + Argument ("design", "the design matrix").type_file_in() + Argument ("contrast", "the contrast matrix, only specify one contrast as it will automatically compute the opposite contrast.").type_file_in() diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 80ebe33215..b960721b9a 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -36,15 +36,18 @@ void usage () SYNOPSIS = "Statistical testing of vector data using non-parametric permutation testing"; + DESCRIPTION + + Math::Stats::glm_column_ones_description; + ARGUMENTS + Argument ("input", "a text file listing the file names of the input subject data").type_file_in () - + Argument ("design", "the design matrix. Note that a column of 1's will need to be added for correlations.").type_file_in () + + Argument ("design", "the design matrix").type_file_in () - + Argument ("contrast", "the contrast vector, specified as a single row of weights").type_file_in () + + Argument ("contrast", "the contrast matrix").type_file_in () - + Argument ("output", "the filename prefix for all output.").type_text(); + + Argument ("output", "the filename prefix for all output").type_text(); OPTIONS @@ -59,6 +62,10 @@ using Math::Stats::vector_type; +// TODO Implement subject data import class, per-datum design matrices + + + void run() { @@ -110,6 +117,7 @@ void run() const std::string output_prefix = argument[3]; // Load input data + // TODO Define subject data import class matrix_type data (num_elements, filenames.size()); { ProgressBar progress ("Loading input vector data", filenames.size()); diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 118aa43b03..24096a8630 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -28,6 +28,19 @@ namespace MR + const char* const glm_column_ones_description = + "In some software packages, a column of ones is automatically added to the " + "GLM design matrix; the purpose of this column is to estimate the \"global " + "intercept\", which is the predicted value of the observed variable if all " + "explanatory variables were to be zero. However there are rare situations " + "where including such a column would not be appropriate for a particular " + "experiment al design; hence, in MRtrix3 statistical inference commands, " + "it is up to the user to determine whether or not this column of ones should " + "be included in their design matrix, and add it explicitly if necessary. " + "The contrast matrix must also reflect the presence of this additional column."; + + + namespace GLM { @@ -81,7 +94,7 @@ namespace MR //VAR (Math::rank (design)); stdev = (one_over_dof * residuals).array().sqrt(); //std::cerr << "stdev: " << stdev.rows() << " x " << stdev.cols() << ", max " << stdev.array().maxCoeff() << "\n"; - // TODO Should be a cleaner way of doing this (broadcasting?) + // TODO Should be a cleaner way of doing this (broadcasting?) matrix_type stdev_fill (abs_effect_size.rows(), abs_effect_size.cols()); for (size_t i = 0; i != stdev_fill.rows(); ++i) stdev_fill.row(i) = stdev; diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 16eb001f19..1216a6a459 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -28,6 +28,10 @@ namespace MR + extern const char* const glm_column_ones_description; + + + namespace GLM { @@ -35,7 +39,7 @@ namespace MR @{ */ /*! Compute a matrix of the beta coefficients * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added to estimate the global intercept) + * @param design the design matrix * @return the matrix containing the output GLM betas */ matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design); @@ -44,7 +48,7 @@ namespace MR /*! Compute the effect of interest * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added to estimate the global intercept) + * @param design the design matrix * @param contrast a matrix defining the group difference * @return the matrix containing the output effect */ @@ -54,7 +58,7 @@ namespace MR /*! Compute the pooled standard deviation * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added to estimate the global intercept) + * @param design the design matrix * @return the matrix containing the output standard deviation */ matrix_type stdev (const matrix_type& measurements, const matrix_type& design); @@ -63,7 +67,7 @@ namespace MR /*! Compute cohen's d, the standardised effect size between two means * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added to estimate the global intercept) + * @param design the design matrix * @param contrast a matrix defining the group difference * @return the matrix containing the output standardised effect size */ @@ -73,7 +77,7 @@ namespace MR /*! Compute all GLM-related statistics * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) + * @param design the design matrix * @param contrast a matrix defining the group difference * @param betas the matrix containing the output GLM betas * @param abs_effect_size the matrix containing the output effect @@ -139,7 +143,7 @@ namespace MR public: /*! * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) + * @param design the design matrix * @param contrast a matrix containing the contrast of interest. */ GLMTTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); @@ -246,7 +250,7 @@ namespace MR public: /*! * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix (unlike other packages a column of ones is NOT automatically added for correlation analysis) + * @param design the design matrix * @param contrast a matrix containing the contrast of interest. */ GLMFTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const matrix_type& ftests); diff --git a/docs/reference/commands/connectomestats.rst b/docs/reference/commands/connectomestats.rst index 4109718d39..bd2e444f47 100644 --- a/docs/reference/commands/connectomestats.rst +++ b/docs/reference/commands/connectomestats.rst @@ -17,10 +17,15 @@ Usage - *input*: a text file listing the file names of the input connectomes - *algorithm*: the algorithm to use in network-based clustering/enhancement. Options are: nbs, nbse, none -- *design*: the design matrix. Note that a column of 1's will need to be added for correlations. +- *design*: the design matrix - *contrast*: the contrast vector, specified as a single row of weights - *output*: the filename prefix for all output. +Description +----------- + +In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experiment al design; hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. + Options ------- @@ -53,6 +58,8 @@ Additional options for connectomestats - **-threshold value** the t-statistic value to use in threshold-based clustering algorithms +- **-column path** add a column to the design matrix corresponding to subject edge-wise values (the contrast vector length must include columns for these additions) + Standard options ^^^^^^^^^^^^^^^^ diff --git a/docs/reference/commands/fixelcfestats.rst b/docs/reference/commands/fixelcfestats.rst index 27b6f4f092..89e3f0d722 100644 --- a/docs/reference/commands/fixelcfestats.rst +++ b/docs/reference/commands/fixelcfestats.rst @@ -17,11 +17,16 @@ Usage - *in_fixel_directory*: the fixel directory containing the data files for each subject (after obtaining fixel correspondence - *subjects*: a text file listing the subject identifiers (one per line). This should correspond with the filenames in the fixel directory (including the file extension), and be listed in the same order as the rows of the design matrix. -- *design*: the design matrix. Note that a column of 1's will need to be added for correlations. -- *contrast*: the contrast vector, specified as a single row of weights +- *design*: the design matrix +- *contrast*: the contrast matrix, specified as rows of weights - *tracks*: the tracks used to determine fixel-fixel connectivity - *out_fixel_directory*: the output directory where results will be saved. Will be created if it does not exist +Description +----------- + +In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experiment al design; hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. + Options ------- @@ -54,7 +59,7 @@ Parameters for the Connectivity-based Fixel Enhancement algorithm Additional options for fixelcfestats ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-negative** automatically test the negative (opposite) contrast. By computing the opposite contrast simultaneously the computation time is reduced. +- **-column path** add a column to the design matrix corresponding to subject fixel-wise values (the contrast vector length must include columns for these additions) - **-smooth FWHM** smooth the fixel value along the fibre tracts using a Gaussian kernel with the supplied FWHM (default: 10mm) @@ -92,7 +97,7 @@ Raffelt, D.; Smith, RE.; Ridgway, GR.; Tournier, JD.; Vaughan, DN.; Rose, S.; He -**Author:** David Raffelt (david.raffelt@florey.edu.au) +**Author:** David Raffelt (david.raffelt@florey.edu.au) and Robert E. Smith (robert.smith@florey.edu.au) **Copyright:** Copyright (c) 2008-2017 the MRtrix3 contributors diff --git a/docs/reference/commands/mrclusterstats.rst b/docs/reference/commands/mrclusterstats.rst index 6b577c0136..4ab07c910e 100644 --- a/docs/reference/commands/mrclusterstats.rst +++ b/docs/reference/commands/mrclusterstats.rst @@ -16,11 +16,16 @@ Usage mrclusterstats [ options ] input design contrast mask output - *input*: a text file containing the file names of the input images, one file per line -- *design*: the design matrix, rows should correspond with images in the input image text file +- *design*: the design matrix - *contrast*: the contrast matrix, only specify one contrast as it will automatically compute the opposite contrast. - *mask*: a mask used to define voxels included in the analysis. - *output*: the filename prefix for all output. +Description +----------- + +In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experiment al design; hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. + Options ------- @@ -51,10 +56,10 @@ Options for controlling TFCE behaviour Additional options for mrclusterstats ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-negative** automatically test the negative (opposite) contrast. By computing the opposite contrast simultaneously the computation time is reduced. - - **-threshold value** the cluster-forming threshold to use for a standard cluster-based analysis. This disables TFCE, which is the default otherwise. +- **-column path** add a column to the design matrix corresponding to subject voxel-wise values (the contrast vector length must include columns for these additions) + - **-connectivity** use 26-voxel-neighbourhood connectivity (Default: 6) Standard options diff --git a/docs/reference/commands/mrregister.rst b/docs/reference/commands/mrregister.rst index 3acd5e7078..c2e86f2492 100644 --- a/docs/reference/commands/mrregister.rst +++ b/docs/reference/commands/mrregister.rst @@ -6,7 +6,7 @@ mrregister Synopsis -------- -Register two images together using a rigid, affine or a non-linear transformation model +Register two images together using a symmetric rigid, affine or non-linear transformation model Usage -------- diff --git a/docs/reference/commands/mrstats.rst b/docs/reference/commands/mrstats.rst index 1cabe267f3..128f649f1c 100644 --- a/docs/reference/commands/mrstats.rst +++ b/docs/reference/commands/mrstats.rst @@ -34,8 +34,6 @@ Additional options for mrstats - **-allvolumes** generate statistics across all image volumes, rather than one set of statistics per image volume -- **-ignorezero** ignore zero-valued input voxels. - Standard options ^^^^^^^^^^^^^^^^ diff --git a/docs/reference/commands/transformcalc.rst b/docs/reference/commands/transformcalc.rst index e91ac161fb..ffe81f9968 100644 --- a/docs/reference/commands/transformcalc.rst +++ b/docs/reference/commands/transformcalc.rst @@ -16,7 +16,7 @@ Usage transformcalc [ options ] input [ input ... ] operation output - *input*: the input for the specified operation -- *operation*: the operation to perform, one of: invert, half, rigid, header, average, interpolate, decompose.invert: invert the input transformation:matrix_in invert outputhalf: calculate the matrix square root of the input transformation:matrix_in half outputrigid: calculate the rigid transformation of the affine input transformation:matrix_in rigid outputheader: calculate the transformation matrix from an original image and an image with modified header:mov mapmovhdr header outputaverage: calculate the average affine matrix of all input matrices:input ... average outputinterpolate: create interpolated transformation matrix between input (t=0) and input2 (t=1). Based on matrix decomposition with linear interpolation of translation, rotation and stretch described in Shoemake, K., Hill, M., & Duff, T. (1992). Matrix Animation and Polar Decomposition. Matrix, 92, 258-264. doi:10.1.1.56.1336input input2 interpolate outputdecompose: decompose transformation matrix M into translation, rotation and stretch and shear (M = T * R * S). The output is a key-value text file scaling: vector of 3 scaling factors in x, y, z direction, shear: list of shear factors for xy, xz, yz axes, angles: list of Euler angles about static x, y, z axes in radians in the range [0:pi]x[-pi:pi]x[-pi:pi], angle_axis: angle in radians and rotation axis, translation : translation vector along x, y, z axes in mm, R: composed roation matrix (R = rot_x * rot_y * rot_z), S: composed scaling and shear matrix.matrix_in decompose output +- *operation*: the operation to perform, one of: invert, half, rigid, header, average, interpolate, decompose, align_vertices_rigid.invert: invert the input transformation:matrix_in invert outputhalf: calculate the matrix square root of the input transformation:matrix_in half outputrigid: calculate the rigid transformation of the affine input transformation:matrix_in rigid outputheader: calculate the transformation matrix from an original image and an image with modified header:mov mapmovhdr header outputaverage: calculate the average affine matrix of all input matrices:input ... average outputinterpolate: create interpolated transformation matrix between input (t=0) and input2 (t=1). Based on matrix decomposition with linear interpolation of translation, rotation and stretch described in Shoemake, K., Hill, M., & Duff, T. (1992). Matrix Animation and Polar Decomposition. Matrix, 92, 258-264. doi:10.1.1.56.1336input input2 interpolate outputdecompose: decompose transformation matrix M into translation, rotation and stretch and shear (M = T * R * S). The output is a key-value text file scaling: vector of 3 scaling factors in x, y, z direction, shear: list of shear factors for xy, xz, yz axes, angles: list of Euler angles about static x, y, z axes in radians in the range [0:pi]x[-pi:pi]x[-pi:pi], angle_axis: angle in radians and rotation axis, translation : translation vector along x, y, z axes in mm, R: composed roation matrix (R = rot_x * rot_y * rot_z), S: composed scaling and shear matrix.matrix_in decompose outputalign_vertices_rigid: align two sets of landmarks using a rigid transformation. Vertex coordinates are in scanner space, corresponding vertices must be stored in the same row of moving.txt and fixed.txt. Requires 3 or more vertices in each file. algorithm: Kabsch 'A solution for the best rotation to relate two sets of vectors' DOI:10.1107/S0567739476001873:input moving.txt fixed.txt align_vertices_rigid output - *output*: the output transformation matrix. Options diff --git a/docs/reference/commands/vectorstats.rst b/docs/reference/commands/vectorstats.rst index d5fff16f89..55cfd7c0d1 100644 --- a/docs/reference/commands/vectorstats.rst +++ b/docs/reference/commands/vectorstats.rst @@ -16,9 +16,14 @@ Usage vectorstats [ options ] input design contrast output - *input*: a text file listing the file names of the input subject data -- *design*: the design matrix. Note that a column of 1's will need to be added for correlations. -- *contrast*: the contrast vector, specified as a single row of weights -- *output*: the filename prefix for all output. +- *design*: the design matrix +- *contrast*: the contrast matrix +- *output*: the filename prefix for all output + +Description +----------- + +In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experiment al design; hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. Options ------- diff --git a/docs/reference/commands_list.rst b/docs/reference/commands_list.rst index e0bd88afe9..e2dda2196c 100644 --- a/docs/reference/commands_list.rst +++ b/docs/reference/commands_list.rst @@ -183,7 +183,7 @@ List of MRtrix3 commands :ref:`mrmetric`, "Computes a dissimilarity metric between two images" :ref:`mrmodelfield`, "Model an input image using low frequency 3D polynomial basis functions" :ref:`mrpad`, "Pad an image to increase the FOV" - :ref:`mrregister`, "Register two images together using a rigid, affine or a non-linear transformation model" + :ref:`mrregister`, "Register two images together using a symmetric rigid, affine or non-linear transformation model" :ref:`mrresize`, "Resize an image by defining the new image resolution, voxel size or a scale factor" :ref:`mrstats`, "Compute images statistics" :ref:`mrthreshold`, "Create bitwise image by thresholding image intensity" diff --git a/src/dwi/tractography/mapping/writer.h b/src/dwi/tractography/mapping/writer.h index e94ba69f2b..a7fe0aa678 100644 --- a/src/dwi/tractography/mapping/writer.h +++ b/src/dwi/tractography/mapping/writer.h @@ -125,7 +125,7 @@ namespace MR { } else if (voxel_statistic == V_MAX) { for (auto l = loop (buffer); l; ++l ) buffer.value() = std::numeric_limits::lowest(); - } + } /* shouldn't be needed: scratch IO class memset to zero already: else { buffer.zero(); @@ -140,7 +140,7 @@ namespace MR { (type == DEC && voxel_statistic == V_SUM)) { Header H_counts (header); - if (type == DEC || type == TOD) + if (type == DEC || type == TOD) H_counts.ndim() = 3; counts.reset (new Image (Image::scratch (H_counts, "TWI streamline count buffer"))); } @@ -183,14 +183,14 @@ namespace MR { if (counts->value()) buffer.value() /= value_type(counts->value()); } - } + } else if (type == DEC) { for (auto l = loop (buffer); l; ++l) { auto value = get_dec(); - if (value.squaredNorm()) + if (value.squaredNorm()) set_dec (value.normalized()); } - } + } else if (type == TOD) { for (auto l = loop (buffer, *counts); l; ++l) { if (counts->value()) { @@ -293,7 +293,7 @@ namespace MR { void MapWriter::receive_greyscale (const Cont& in) { assert (MapWriterBase::type == GREYSCALE); - for (const auto& i : in) { + for (const auto& i : in) { assign_pos_of (i).to (buffer); const default_type factor = get_factor (i, in); const default_type weight = in.weight * i.get_length(); @@ -320,7 +320,7 @@ namespace MR { void MapWriter::receive_dec (const Cont& in) { assert (type == DEC); - for (const auto& i : in) { + for (const auto& i : in) { assign_pos_of (i).to (buffer); const default_type factor = get_factor (i, in); const default_type weight = in.weight * i.get_length(); @@ -361,7 +361,7 @@ namespace MR { void MapWriter::receive_dixel (const Cont& in) { assert (type == DIXEL); - for (const auto& i : in) { + for (const auto& i : in) { assign_pos_of (i, 0, 3).to (buffer); buffer.index(3) = i.get_dir(); const default_type factor = get_factor (i, in); @@ -391,7 +391,7 @@ namespace MR { { assert (type == TOD); VoxelTOD::vector_type sh_coefs; - for (const auto& i : in) { + for (const auto& i : in) { assign_pos_of (i, 0, 3).to (buffer); const default_type factor = get_factor (i, in); const default_type weight = in.weight * i.get_length(); @@ -440,14 +440,14 @@ namespace MR { template <> - void MapWriter::add (const default_type weight, const default_type factor) + inline void MapWriter::add (const default_type weight, const default_type factor) { if (weight && factor) buffer.value() = true; } template - void MapWriter::add (const default_type weight, const default_type factor) + inline void MapWriter::add (const default_type weight, const default_type factor) { buffer.value() += weight * factor; } @@ -485,7 +485,7 @@ namespace MR { { assert (type == TOD); sh_coefs.resize (buffer.size(3)); - for (auto l = Loop (3) (buffer); l; ++l) + for (auto l = Loop (3) (buffer); l; ++l) sh_coefs[buffer.index(3)] = buffer.value(); } @@ -494,7 +494,7 @@ namespace MR { { assert (type == TOD); assert (sh_coefs.size() == buffer.size(3)); - for (auto l = Loop (3) (buffer); l; ++l) + for (auto l = Loop (3) (buffer); l; ++l) buffer.value() = sh_coefs[buffer.index(3)]; } diff --git a/testing/tests/vectorstats b/testing/tests/vectorstats new file mode 100644 index 0000000000..0f3d8e129b --- /dev/null +++ b/testing/tests/vectorstats @@ -0,0 +1,40 @@ +# 10 subjects per group +N=10 && SNR=2 && \ +python -c """ +import random; +subj_files = [] +for i in range(0,2*${N}): # 2 groups + path = 'tmp' + str(i) + '.txt' + # 5 data points per subject + if i < ${N}: + # First group has effect in row 1, not in rows 2-5 + data = [ random.normalvariate(${SNR},1.0), + random.normalvariate(0.0,1.0), + random.normalvariate(0.0,1.0), + random.normalvariate(0.0,1.0), + random.normalvariate(0.0,1.0) ] + else: + # Second group has effect in row 2, not in rows 1 or 3-5 + data = [ random.normalvariate(0.0,1.0), + random.normalvariate(${SNR},1.0), + random.normalvariate(0.0,1.0), + random.normalvariate(0.0,1.0), + random.normalvariate(0.0,1.0) ] + with open(path, 'w') as f: + f.write('\n'.join([str(f) for f in data])) + subj_files.append(path) +with open('tmpdesign.csv', 'w') as f: + for i in range(0,2*${N}): + group = '1' if i < ${N} else '0' + # Group ID, then one random EV + f.write('1,' + group + ',' + str(random.normalvariate(0.0,1.0)) + '\n') +with open('tmpcontrast.csv', 'w') as f: + # Three contrast rows: + # - Group difference (effect should be present in data row 1) + # - Inverse group difference (effect should be present in data row 2) + # - Random EV (should be absent) + f.write('0,1,0\n0,-1,0\n0,0,1\n') +with open('tmpsubjects.txt', 'w') as f: + for path in subj_files: + f.write(path + '\n') +""" && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout From dadea22aefcddedaedf0ba2c08fc323d518b2f3f Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 24 Aug 2017 16:50:52 +1000 Subject: [PATCH 0058/1471] vectorstats: Use standardised stats data import interface --- cmd/mrclusterstats.cpp | 4 +- cmd/vectorstats.cpp | 98 ++++++++++++++++++++++-------------------- 2 files changed, 52 insertions(+), 50 deletions(-) diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index d47198257d..771c291428 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -17,11 +17,9 @@ #include "algo/loop.h" #include "file/path.h" -#include "math/SH.h" - -#include "dwi/directions/predefined.h" #include "math/stats/glm.h" +#include "math/stats/import.h" #include "math/stats/permutation.h" #include "math/stats/typedefs.h" diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index b960721b9a..f4fb8458e8 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -19,6 +19,7 @@ #include "file/path.h" #include "math/stats/glm.h" +#include "math/stats/import.h" #include "math/stats/permutation.h" #include "math/stats/typedefs.h" @@ -27,6 +28,7 @@ using namespace MR; using namespace App; +using namespace MR::Math::Stats; @@ -62,41 +64,62 @@ using Math::Stats::vector_type; -// TODO Implement subject data import class, per-datum design matrices +// Define data importer class that willl obtain data for a +// specific subject based on the string path to the data file for +// that subject +// +// This is far more simple than the equivalent functionality in other +// MRtrix3 statistical inference commands, since the data are +// already in a vectorised form. + +class SubjectVectorImport : public SubjectDataImportBase +{ MEMALIGN(SubjectVectorImport) + public: + SubjectVectorImport (const std::string& path) : + SubjectDataImportBase (path), + data (load_vector (path)) { } + + void operator() (matrix_type::ColXpr column) const override + { + assert (column.rows() == size()); + column = data; + } + + default_type operator[] (const size_t index) const override + { + assert (index < size()); + return data[index]; + } + + size_t size() const override { return data.size(); } + + private: + const vector_type data; + +}; void run() { - // Read filenames - vector filenames; - { - std::string folder = Path::dirname (argument[0]); - std::ifstream ifs (argument[0].c_str()); - std::string temp; - while (getline (ifs, temp)) { - std::string filename (Path::join (folder, temp)); - size_t p = filename.find_last_not_of(" \t"); - if (std::string::npos != p) - filename.erase(p+1); - if (filename.size()) { - if (!MR::Path::exists (filename)) - throw Exception ("Input data vector file not found: \"" + filename + "\""); - filenames.push_back (filename); - } - } + CohortDataImport importer; + importer.initialise (argument[0]); + const size_t num_subjects = importer.size(); + CONSOLE ("Number of subjects: " + str(num_subjects)); + const size_t num_elements = importer[0]->size(); + CONSOLE ("Number of elements: " + str(num_elements)); + for (size_t i = 0; i != importer.size(); ++i) { + if (importer[i]->size() != num_elements) + throw Exception ("Subject file \"" + importer[i]->name() + "\" contains incorrect number of elements (" + str(importer[i]) + "; expected " + str(num_elements) + ")"); } - const vector_type example_data = load_vector (filenames.front()); - const size_t num_elements = example_data.size(); - size_t num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); // Load design matrix const matrix_type design = load_matrix (argument[1]); - if (size_t(design.rows()) != filenames.size()) - throw Exception ("Number of subjects (" + str(filenames.size()) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); + if (size_t(design.rows()) != num_subjects) + throw Exception ("Number of subjects (" + str(num_subjects) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); // Load permutations file if supplied auto opt = get_options("permutations"); @@ -117,28 +140,9 @@ void run() const std::string output_prefix = argument[3]; // Load input data - // TODO Define subject data import class - matrix_type data (num_elements, filenames.size()); - { - ProgressBar progress ("Loading input vector data", filenames.size()); - for (size_t subject = 0; subject < filenames.size(); subject++) { - - const std::string& path (filenames[subject]); - vector_type subject_data; - try { - subject_data = load_vector (path); - } catch (Exception& e) { - throw Exception (e, "Error loading vector data for subject #" + str(subject) + " (file \"" + path + "\")"); - } - - if (size_t(subject_data.size()) != num_elements) - throw Exception ("Vector data for subject #" + str(subject) + " (file \"" + path + "\") is wrong length (" + str(subject_data.size()) + " , expected " + str(num_elements) + ")"); - - data.col(subject) = subject_data; - - ++progress; - } - } + matrix_type data (num_elements, num_subjects); + for (size_t subject = 0; subject != num_subjects; subject++) + (*importer[subject]) (data.col(subject)); { matrix_type betas, abs_effects, std_effects, stdevs; @@ -154,8 +158,8 @@ void run() // Precompute default statistic // Don't use convenience function: No enhancer! // Manually construct default permutation - vector default_permutation (filenames.size()); - for (size_t i = 0; i != filenames.size(); ++i) + vector default_permutation (num_subjects); + for (size_t i = 0; i != num_subjects; ++i) default_permutation[i] = i; matrix_type default_tvalues; (*glm_ttest) (default_permutation, default_tvalues); From ae46e2d0b423540f7425d0c21f5f572104363dcf Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 24 Aug 2017 17:38:05 +1000 Subject: [PATCH 0059/1471] vectorstats: Add -column option --- cmd/mrclusterstats.cpp | 6 +- cmd/vectorstats.cpp | 158 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 148 insertions(+), 16 deletions(-) diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 771c291428..de19406452 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -80,11 +80,11 @@ void usage () + Option ("threshold", "the cluster-forming threshold to use for a standard cluster-based analysis. " "This disables TFCE, which is the default otherwise.") - + Argument ("value").type_float (1.0e-6) + + Argument ("value").type_float (1.0e-6) + Option ("column", "add a column to the design matrix corresponding to subject voxel-wise values " "(the contrast vector length must include columns for these additions)").allow_multiple() - + Argument ("path").type_file_in() + + Argument ("path").type_file_in() + Option ("connectivity", "use 26-voxel-neighbourhood connectivity (Default: 6)"); @@ -395,7 +395,7 @@ void run() { ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + (3 * num_contrasts)); for (ssize_t i = 0; i != contrast.cols(); ++i) { - write_output (betas.row(i), v2v, prefix + (use_tfce ? "tfce.mif" : "cluster_sizes.mif"), output_header); + write_output (betas.row(i), v2v, prefix + "beta" + str(i) + ".mif", output_header); ++progress; } for (size_t i = 0; i != num_contrasts; ++i) { diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index f4fb8458e8..9cdfc932bd 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -53,7 +53,13 @@ void usage () OPTIONS - + Stats::PermTest::Options (false); + + Stats::PermTest::Options (false) + + + OptionGroup ("Additional options for vectorstats") + + + Option ("column", "add a column to the design matrix corresponding to subject element-wise values " + "(the contrast vector length must include columns for these additions)").allow_multiple() + + Argument ("path").type_file_in(); } @@ -134,8 +140,28 @@ void run() // Load contrast matrix const matrix_type contrast = load_matrix (argument[2]); const size_t num_contrasts = contrast.rows(); - if (contrast.cols() != design.cols()) - throw Exception ("number of columns in contrast matrix (" + str(contrast.cols()) + ") does not match number of columns in design matrix (" + str(design.cols()) + ")"); + + // Before validating the contrast matrix, we first need to see if there are any + // additional design matrix columns coming from voxel-wise subject data + vector extra_columns; + bool nans_in_columns = false; + opt = get_options ("column"); + for (size_t i = 0; i != opt.size(); ++i) { + extra_columns.push_back (CohortDataImport()); + extra_columns[i].initialise (opt[i][0]); + if (!extra_columns[i].allFinite()) + nans_in_columns = true; + } + if (extra_columns.size()) { + CONSOLE ("number of element-wise design matrix columns: " + str(extra_columns.size())); + if (nans_in_columns) + INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from voxel-wise design matrices accordingly"); + } + + if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) + throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" + + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); const std::string output_prefix = argument[3]; @@ -144,16 +170,122 @@ void run() for (size_t subject = 0; subject != num_subjects; subject++) (*importer[subject]) (data.col(subject)); + const bool nans_in_data = !data.allFinite(); + if (nans_in_data) { + INFO ("Non-finite values present in data; rows will be removed from element-wise design matrices accordingly"); + if (!extra_columns.size()) { + INFO ("(Note that this will result in slower execution than if such values were not present)"); + } + } + + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast, nans_in_data, nans_in_columns)); + } else { + glm_test.reset (new GLMTTestFixed (data, design, contrast)); + } + + + // Only add contrast row number to image outputs if there's more than one contrast + auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; + { - matrix_type betas, abs_effects, std_effects, stdevs; - Math::Stats::GLM::all_stats (data, design, contrast, betas, abs_effects, std_effects, stdevs); - save_matrix (betas, output_prefix + "betas.csv"); - save_matrix (abs_effects, output_prefix + "abs_effect.csv"); - save_matrix (std_effects, output_prefix + "std_effect.csv"); - save_matrix (stdevs, output_prefix + "std_dev.csv"); + matrix_type betas (contrast.cols(), num_elements); + matrix_type abs_effect_size (num_contrasts, num_elements), std_effect_size (num_contrasts, num_elements), stdev (num_contrasts, num_elements); + + if (extra_columns.size()) { + + // For each variable of interest (e.g. beta coefficients, effect size etc.) need to: + // Construct the output data vector, with size = num_voxels + // For each voxel: + // Use glm_test to obtain the design matrix for the default permutation for that voxel + // Use the relevant Math::Stats::GLM function to get the value of interest for just that voxel + // (will still however need to come out as a matrix_type) + // Write that value to data vector + // Finally, use write_output() function to write to an image file + class Source + { NOMEMALIGN + public: + Source (const size_t num_elements) : + num_elements (num_elements), + counter (0), + progress (new ProgressBar ("calculating basic properties of default permutation", num_elements)) { } + + bool operator() (size_t& index) + { + index = counter++; + if (counter >= num_elements) { + progress.reset(); + return false; + } + assert (progress); + ++(*progress); + return true; + } + + private: + const size_t num_elements; + size_t counter; + std::unique_ptr progress; + }; + + class Functor + { MEMALIGN(Functor) + public: + Functor (const matrix_type& data, std::shared_ptr glm_test, const matrix_type& contrasts, + matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, matrix_type& stdev) : + data (data), + glm_test (glm_test), + contrasts (contrasts), + global_betas (betas), + global_abs_effect_size (abs_effect_size), + global_std_effect_size (std_effect_size), + global_stdev (stdev) { } + + bool operator() (const size_t& index) + { + const matrix_type data_element = data.row (index); + const matrix_type design_element = dynamic_cast(glm_test.get())->default_design (index); + Math::Stats::GLM::all_stats (data_element, design_element, contrasts, + local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + global_betas.col (index) = local_betas; + global_abs_effect_size.col(index) = local_abs_effect_size.col(0); + global_std_effect_size.col(index) = local_std_effect_size.col(0); + global_stdev.col(index) = local_stdev.col(0); + return true; + } + + private: + const matrix_type& data; + const std::shared_ptr glm_test; + const matrix_type& contrasts; + matrix_type& global_betas; + matrix_type& global_abs_effect_size; + matrix_type& global_std_effect_size; + matrix_type& global_stdev; + matrix_type local_betas, local_abs_effect_size, local_std_effect_size, local_stdev; + }; + + Source source (num_elements); + Functor functor (data, glm_test, contrast, + betas, abs_effect_size, std_effect_size, stdev); + Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); + + } else { + + ProgressBar progress ("calculating basic properties of default permutation"); + Math::Stats::GLM::all_stats (data, design, contrast, + betas, abs_effect_size, std_effect_size, stdev); + } + + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", 4); + save_matrix (betas, output_prefix + "betas.mif"); ++progress; + save_matrix (abs_effect_size, output_prefix + "abs_effect.csv"); ++progress; + save_matrix (std_effect_size, output_prefix + "std_effect.csv"); ++progress; + save_matrix (stdev, output_prefix + "std_dev.csv"); } - std::shared_ptr glm_ttest (new Math::Stats::GLMTTestFixed (data, design, contrast)); // Precompute default statistic // Don't use convenience function: No enhancer! @@ -162,7 +294,7 @@ void run() for (size_t i = 0; i != num_subjects; ++i) default_permutation[i] = i; matrix_type default_tvalues; - (*glm_ttest) (default_permutation, default_tvalues); + (*glm_test) (default_permutation, default_tvalues); save_matrix (default_tvalues, output_prefix + "tvalue.csv"); // Perform permutation testing @@ -174,10 +306,10 @@ void run() matrix_type empirical_distribution; if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_ttest, enhancer, empirical_distribution, + Stats::PermTest::run_permutations (permutations, glm_test, enhancer, empirical_distribution, default_tvalues, null_distribution, uncorrected_pvalues); } else { - Stats::PermTest::run_permutations (num_perms, glm_ttest, enhancer, empirical_distribution, + Stats::PermTest::run_permutations (num_perms, glm_test, enhancer, empirical_distribution, default_tvalues, null_distribution, uncorrected_pvalues); } From 2bdf59e0f35f3e7e92cbd4fa2c31ed631dfa72a3 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Fri, 8 Sep 2017 16:43:48 +0100 Subject: [PATCH 0060/1471] dwi2fod: initial changes to output modelled data --- cmd/dwi2fod.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/dwi2fod.cpp b/cmd/dwi2fod.cpp index e6e1b3b4b2..e97237005a 100644 --- a/cmd/dwi2fod.cpp +++ b/cmd/dwi2fod.cpp @@ -125,7 +125,7 @@ class CSD_Processor { MEMALIGN(CSD_Processor) INFO ("voxel [ " + str (dwi.index(0)) + " " + str (dwi.index(1)) + " " + str (dwi.index(2)) + " ] did not reach full convergence"); - write_back (fod); + fod.row(3) = sdeconv.FOD(); } @@ -155,11 +155,6 @@ class CSD_Processor { MEMALIGN(CSD_Processor) } - void write_back (Image& fod) { - for (auto l = Loop (3) (fod); l; ++l) - fod.value() = sdeconv.FOD() [fod.index(3)]; - } - }; @@ -183,8 +178,7 @@ class MSMT_Processor { MEMALIGN (MSMT_Processor) return; } - for (auto l = Loop (3) (dwi_image); l; ++l) - dwi_data[dwi_image.index(3)] = dwi_image.value(); + dwi_data = dwi_image.row(3); sdeconv (dwi_data, output_data); if (sdeconv.niter >= sdeconv.shared.problem.max_niter) { @@ -198,12 +192,18 @@ class MSMT_Processor { MEMALIGN (MSMT_Processor) for (auto l = Loop(3)(odf_images[i]); l; ++l) odf_images[i].value() = output_data[j++]; } + + if (modelled_image.valid()) { + assign_pos_of (dwi_image, 0, 3).to (modelled_image); + dwi_data = sdeconv.shared.problem.H * output_data; + modelled_image.row(3) = dwi_data; + } } private: DWI::SDeconv::MSMT_CSD sdeconv; - Image mask_image; + Image mask_image, modelled_image; vector< Image > odf_images; Eigen::VectorXd dwi_data; Eigen::VectorXd output_data; From 327f38468af35e8a2b5d46a58d12066ca304c82c Mon Sep 17 00:00:00 2001 From: Max Pietsch Date: Tue, 12 Sep 2017 13:45:56 +0100 Subject: [PATCH 0061/1471] dwi2fod: msmt csd output modelled data. added options for msmtm ICLS regulariser --- cmd/dwi2fod.cpp | 18 +++++++++----- src/dwi/sdeconv/msmt_csd.cpp | 46 ++++++++++++++++++++++++++++++++++++ src/dwi/sdeconv/msmt_csd.h | 19 +++++++++++---- 3 files changed, 73 insertions(+), 10 deletions(-) create mode 100644 src/dwi/sdeconv/msmt_csd.cpp diff --git a/cmd/dwi2fod.cpp b/cmd/dwi2fod.cpp index e97237005a..7c1fa375b1 100644 --- a/cmd/dwi2fod.cpp +++ b/cmd/dwi2fod.cpp @@ -54,9 +54,6 @@ OptionGroup CommonOptions = OptionGroup ("Options common to more than one algori "only perform computation within the specified binary brain mask image.") + Argument ("image").type_image_in(); - - - void usage () { AUTHOR = "J-Donald Tournier (jdtournier@gmail.com) and Ben Jeurissen (ben.jeurissen@uantwerpen.be)"; @@ -94,6 +91,7 @@ void usage () + DWI::ShellOption + CommonOptions + DWI::SDeconv::CSD_options + + DWI::SDeconv::MSMT_CSD_options + Stride::Options; } @@ -162,10 +160,12 @@ class CSD_Processor { MEMALIGN(CSD_Processor) class MSMT_Processor { MEMALIGN (MSMT_Processor) public: - MSMT_Processor (const DWI::SDeconv::MSMT_CSD::Shared& shared, Image& mask_image, vector< Image > odf_images) : + MSMT_Processor (const DWI::SDeconv::MSMT_CSD::Shared& shared, Image& mask_image, + vector< Image > odf_images, Image dwi_modelled = Image()) : sdeconv (shared), mask_image (mask_image), odf_images (odf_images), + modelled_image (dwi_modelled), dwi_data (shared.grad.rows()), output_data (shared.problem.H.cols()) { } @@ -203,8 +203,9 @@ class MSMT_Processor { MEMALIGN (MSMT_Processor) private: DWI::SDeconv::MSMT_CSD sdeconv; - Image mask_image, modelled_image; + Image mask_image; vector< Image > odf_images; + Image modelled_image; Eigen::VectorXd dwi_data; Eigen::VectorXd output_data; }; @@ -290,7 +291,12 @@ void run () odfs.push_back (Image (Image::create (odf_paths[i], header_out))); } - MSMT_Processor processor (shared, mask, odfs); + Image dwi_modelled; + auto opt = get_options ("modelled"); + if (opt.size()) + dwi_modelled = Image::create (opt[0][0], header_in); + + MSMT_Processor processor (shared, mask, odfs, dwi_modelled); auto dwi = header_in.get_image().with_direct_io (3); ThreadedLoop ("performing multi-shell, multi-tissue CSD", dwi, 0, 3) .run (processor, dwi); diff --git a/src/dwi/sdeconv/msmt_csd.cpp b/src/dwi/sdeconv/msmt_csd.cpp new file mode 100644 index 0000000000..2b03973719 --- /dev/null +++ b/src/dwi/sdeconv/msmt_csd.cpp @@ -0,0 +1,46 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "dwi/sdeconv/msmt_csd.h" + +namespace MR +{ + namespace DWI + { + namespace SDeconv + { + + using namespace App; + + const OptionGroup MSMT_CSD_options = + OptionGroup ("Options for the Multi-Shell, Multi-Tissue Constrained Spherical Deconvolution algorithm") + + Option ("norm_lambda", + "the regularisation parameter lambda that controls the strength of the " + "constraint on the norm of the solution (default = " + str(DEFAULT_MSMTCSD_NORM_LAMBDA, 2) + ").") + + Argument ("value").type_float (0.0) + + + Option ("neg_lambda", + "the regularisation parameter lambda that controls the strength of the " + "non-negativity constraint (default = " + str(DEFAULT_MSMTCSD_NEG_LAMBDA, 2) + ").") + + Argument ("value").type_float (0.0) + + + Option ("modelled", + "return the modelled dwi image.") + + Argument ("image").type_image_out(); + + + } + } +} + diff --git a/src/dwi/sdeconv/msmt_csd.h b/src/dwi/sdeconv/msmt_csd.h index ed3dadb55c..623b1509d9 100644 --- a/src/dwi/sdeconv/msmt_csd.h +++ b/src/dwi/sdeconv/msmt_csd.h @@ -27,6 +27,8 @@ #include "dwi/gradient.h" #include "dwi/shells.h" +#define DEFAULT_MSMTCSD_NORM_LAMBDA 1.0e-10 +#define DEFAULT_MSMTCSD_NEG_LAMBDA 1.0e-10 namespace MR { @@ -35,7 +37,7 @@ namespace MR namespace SDeconv { - + extern const App::OptionGroup MSMT_CSD_options; class MSMT_CSD { MEMALIGN(MSMT_CSD) public: @@ -45,7 +47,9 @@ namespace MR Shared (const Header& dwi_header) : grad (DWI::get_valid_DW_scheme (dwi_header)), shells (grad), - HR_dirs (DWI::Directions::electrostatic_repulsion_300()) { shells.select_shells(false,false,false); } + HR_dirs (DWI::Directions::electrostatic_repulsion_300()), + solution_min_norm_regularisation (DEFAULT_MSMTCSD_NORM_LAMBDA), + constraint_min_norm_regularisation (DEFAULT_MSMTCSD_NEG_LAMBDA) { shells.select_shells(false,false,false); } void parse_cmdline_options() @@ -57,6 +61,12 @@ namespace MR opt = get_options ("directions"); if (opt.size()) HR_dirs = load_matrix (opt[0][0]); + opt = get_options ("norm_lambda"); + if (opt.size()) + solution_min_norm_regularisation = opt[0][0]; + opt = get_options ("neg_lambda"); + if (opt.size()) + constraint_min_norm_regularisation = opt[0][0]; } @@ -197,8 +207,8 @@ namespace MR b_m += m[i]; b_n += n[i]; } - - problem = Math::ICLS::Problem (C, A, 1.0e-10, 1.0e-10); + problem = Math::ICLS::Problem (C, A, + solution_min_norm_regularisation, constraint_min_norm_regularisation); INFO ("Multi-shell, multi-tissue CSD initialised successfully"); } @@ -214,6 +224,7 @@ namespace MR vector lmax, lmax_response; vector responses; Math::ICLS::Problem problem; + double solution_min_norm_regularisation, constraint_min_norm_regularisation; private: From d7b216278274e423e11114b8c01cdb0e4949e64b Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 26 Oct 2017 10:29:13 +1100 Subject: [PATCH 0062/1471] tcksift2: New option -linear The code for this feature was added a long time ago, but command-line access to the functionality was never provided. When using the -linear option, the command estimates a weighting factor for each streamline in a single step, with each factor determined independently. This is a considerably different approach to the primary SIFT2 optimisation algorithm, but nevertheless operates on a similar principle, and with the same fundamental goal. Its main use is in the conceptual demonstration of why the SIFT2 algorithm mis designed the way it is. --- cmd/tcksift2.cpp | 81 ++++++++++++++---------- docs/reference/commands/tcksift.rst | 2 +- docs/reference/commands/tcksift2.rst | 4 +- docs/reference/commands_list.rst | 4 +- docs/reference/scripts_list.rst | 2 + src/dwi/tractography/SIFT/sift.cpp | 2 +- src/dwi/tractography/SIFT2/tckfactor.cpp | 51 +++++++++++++-- 7 files changed, 99 insertions(+), 47 deletions(-) diff --git a/cmd/tcksift2.cpp b/cmd/tcksift2.cpp index 935a9ce800..dd26ade165 100644 --- a/cmd/tcksift2.cpp +++ b/cmd/tcksift2.cpp @@ -97,7 +97,10 @@ const OptionGroup SIFT2AlgorithmOption = OptionGroup ("Options for controlling t + Option ("min_cf_decrease", "minimum decrease in the cost function (as a fraction of the initial value) that must occur each iteration for the algorithm to continue " "(default: " + str(SIFT2_MIN_CF_DECREASE_DEFAULT, 2) + ")") - + Argument ("frac").type_float (0.0, 1.0); + + Argument ("frac").type_float (0.0, 1.0) + + + Option ("linear", "perform a linear estimation of streamline weights, rather than the standard non-linear optimisation " + "(typically does not provide as accurate a model fit; but only requires a single pass)"); @@ -172,44 +175,52 @@ void run () if (output_debug) tckfactor.output_all_debug_images ("before"); - auto opt = get_options ("csv"); - if (opt.size()) - tckfactor.set_csv_path (opt[0][0]); - - const float reg_tikhonov = get_option_value ("reg_tikhonov", SIFT2_REGULARISATION_TIKHONOV_DEFAULT); - const float reg_tv = get_option_value ("reg_tv", SIFT2_REGULARISATION_TV_DEFAULT); - tckfactor.set_reg_lambdas (reg_tikhonov, reg_tv); + if (get_options ("linear").size()) { + + tckfactor.calc_afcsa(); + + } else { + + auto opt = get_options ("csv"); + if (opt.size()) + tckfactor.set_csv_path (opt[0][0]); + + const float reg_tikhonov = get_option_value ("reg_tikhonov", SIFT2_REGULARISATION_TIKHONOV_DEFAULT); + const float reg_tv = get_option_value ("reg_tv", SIFT2_REGULARISATION_TV_DEFAULT); + tckfactor.set_reg_lambdas (reg_tikhonov, reg_tv); + + opt = get_options ("min_iters"); + if (opt.size()) + tckfactor.set_min_iters (int(opt[0][0])); + opt = get_options ("max_iters"); + if (opt.size()) + tckfactor.set_max_iters (int(opt[0][0])); + opt = get_options ("min_factor"); + if (opt.size()) + tckfactor.set_min_factor (float(opt[0][0])); + opt = get_options ("min_coeff"); + if (opt.size()) + tckfactor.set_min_coeff (float(opt[0][0])); + opt = get_options ("max_factor"); + if (opt.size()) + tckfactor.set_max_factor (float(opt[0][0])); + opt = get_options ("max_coeff"); + if (opt.size()) + tckfactor.set_max_coeff (float(opt[0][0])); + opt = get_options ("max_coeff_step"); + if (opt.size()) + tckfactor.set_max_coeff_step (float(opt[0][0])); + opt = get_options ("min_cf_decrease"); + if (opt.size()) + tckfactor.set_min_cf_decrease (float(opt[0][0])); + + tckfactor.estimate_factors(); - opt = get_options ("min_iters"); - if (opt.size()) - tckfactor.set_min_iters (int(opt[0][0])); - opt = get_options ("max_iters"); - if (opt.size()) - tckfactor.set_max_iters (int(opt[0][0])); - opt = get_options ("min_factor"); - if (opt.size()) - tckfactor.set_min_factor (float(opt[0][0])); - opt = get_options ("min_coeff"); - if (opt.size()) - tckfactor.set_min_coeff (float(opt[0][0])); - opt = get_options ("max_factor"); - if (opt.size()) - tckfactor.set_max_factor (float(opt[0][0])); - opt = get_options ("max_coeff"); - if (opt.size()) - tckfactor.set_max_coeff (float(opt[0][0])); - opt = get_options ("max_coeff_step"); - if (opt.size()) - tckfactor.set_max_coeff_step (float(opt[0][0])); - opt = get_options ("min_cf_decrease"); - if (opt.size()) - tckfactor.set_min_cf_decrease (float(opt[0][0])); - - tckfactor.estimate_factors(); + } tckfactor.output_factors (argument[2]); - opt = get_options ("out_coeffs"); + auto opt = get_options ("out_coeffs"); if (opt.size()) tckfactor.output_coefficients (opt[0][0]); diff --git a/docs/reference/commands/tcksift.rst b/docs/reference/commands/tcksift.rst index f14358d734..049c003304 100644 --- a/docs/reference/commands/tcksift.rst +++ b/docs/reference/commands/tcksift.rst @@ -53,7 +53,7 @@ Options to make SIFT provide additional output files - **-out_mu file** output the final value of SIFT proportionality coefficient mu to a text file -- **-output_debug** provide various output images for assessing & debugging performace etc. +- **-output_debug** provide various output images for assessing & debugging performance etc. - **-out_selection path** output a text file containing the binary selection of streamlines diff --git a/docs/reference/commands/tcksift2.rst b/docs/reference/commands/tcksift2.rst index 0b64d9aef4..d19416d34e 100644 --- a/docs/reference/commands/tcksift2.rst +++ b/docs/reference/commands/tcksift2.rst @@ -49,7 +49,7 @@ Options to make SIFT provide additional output files - **-out_mu file** output the final value of SIFT proportionality coefficient mu to a text file -- **-output_debug** provide various output images for assessing & debugging performace etc. +- **-output_debug** provide various output images for assessing & debugging performance etc. - **-out_coeffs path** output text file containing the weighting coefficient for each streamline @@ -81,6 +81,8 @@ Options for controlling the SIFT2 optimisation algorithm - **-min_cf_decrease frac** minimum decrease in the cost function (as a fraction of the initial value) that must occur each iteration for the algorithm to continue (default: 2.5e-05) +- **-linear** perform a linear estimation of streamline weights, rather than the standard non-linear optimisation (typically does not provide as accurate a model fit; but only requires a single pass) + Standard options ^^^^^^^^^^^^^^^^ diff --git a/docs/reference/commands_list.rst b/docs/reference/commands_list.rst index 72c668e989..2525cd621c 100644 --- a/docs/reference/commands_list.rst +++ b/docs/reference/commands_list.rst @@ -100,8 +100,8 @@ List of MRtrix3 commands commands/tcknormalise commands/tckresample commands/tcksample - commands/tcksift commands/tcksift2 + commands/tcksift commands/tckstats commands/tensor2metric commands/transformcalc @@ -213,8 +213,8 @@ List of MRtrix3 commands :ref:`tcknormalise`, "Apply a normalisation map to a tracks file" :ref:`tckresample`, "Resample each streamline in a track file to a new set of vertices" :ref:`tcksample`, "Sample values of an associated image along tracks" - :ref:`tcksift`, "Filter a whole-brain fibre-tracking data set such that the streamline densities match the FOD lobe integrals" :ref:`tcksift2`, "Successor to the SIFT method; instead of removing streamlines, use an EM framework to find an appropriate cross-section multiplier for each streamline" + :ref:`tcksift`, "Filter a whole-brain fibre-tracking data set such that the streamline densities match the FOD lobe integrals" :ref:`tckstats`, "Calculate statistics on streamlines length" :ref:`tensor2metric`, "Generate maps of tensor-derived parameters" :ref:`transformcalc`, "Perform calculations on linear transformation matrices" diff --git a/docs/reference/scripts_list.rst b/docs/reference/scripts_list.rst index 56153bd922..371ef3ee27 100644 --- a/docs/reference/scripts_list.rst +++ b/docs/reference/scripts_list.rst @@ -18,6 +18,7 @@ List of MRtrix3 scripts scripts/dwiintensitynorm scripts/dwipreproc scripts/labelsgmfix + scripts/MRtrix3_connectome scripts/population_template @@ -31,4 +32,5 @@ List of MRtrix3 scripts :ref:`dwiintensitynorm`, "Performs a global DWI intensity normalisation on a group of subjects using the median b=0 white matter value as the reference" :ref:`dwipreproc`, "Perform diffusion image pre-processing using FSL's eddy tool; including inhomogeneity distortion correction using FSL's topup tool if possible" :ref:`labelsgmfix`, "In a FreeSurfer parcellation image, replace the sub-cortical grey matter structure delineations using FSL FIRST" + :ref:`MRtrix3_connectome`, "" :ref:`population_template`, "Generates an unbiased group-average template from a series of images" diff --git a/src/dwi/tractography/SIFT/sift.cpp b/src/dwi/tractography/SIFT/sift.cpp index 9b76c65f9e..4418f5d5a2 100644 --- a/src/dwi/tractography/SIFT/sift.cpp +++ b/src/dwi/tractography/SIFT/sift.cpp @@ -54,7 +54,7 @@ const OptionGroup SIFTOutputOption = OptionGroup ("Options to make SIFT provide + Option ("out_mu", "output the final value of SIFT proportionality coefficient mu to a text file") + Argument ("file").type_file_out() - + Option ("output_debug", "provide various output images for assessing & debugging performace etc."); + + Option ("output_debug", "provide various output images for assessing & debugging performance etc."); diff --git a/src/dwi/tractography/SIFT2/tckfactor.cpp b/src/dwi/tractography/SIFT2/tckfactor.cpp index bf037edcbb..3b99b82895 100644 --- a/src/dwi/tractography/SIFT2/tckfactor.cpp +++ b/src/dwi/tractography/SIFT2/tckfactor.cpp @@ -142,13 +142,49 @@ namespace MR { void TckFactor::calc_afcsa() { - VAR (calc_cost_function()); + CONSOLE ("Cost function before linear optimisation is " + str(calc_cost_function()) + ")"); - coefficients.resize (num_tracks(), 0.0); + try { + coefficients = decltype(coefficients)::Zero (num_tracks()); + } catch (...) { + throw Exception ("Error assigning memory for streamline weights vector"); + } - const double fixed_mu = mu(); + class Functor + { NOMEMALIGN + public: + Functor (TckFactor& master) : + master (master), + fixed_mu (master.mu()) { } + Functor (const Functor&) = default; + bool operator() (const SIFT::TrackIndexRange& range) const { + for (SIFT::track_t track_index = range.first; track_index != range.second; ++track_index) { + const SIFT::TrackContribution& tckcont = *master.contributions[track_index]; + double sum_afd = 0.0; + for (size_t f = 0; f != tckcont.dim(); ++f) { + const size_t fixel_index = tckcont[f].get_fixel_index(); + const Fixel& fixel = master.fixels[fixel_index]; + const float length = tckcont[f].get_length(); + sum_afd += fixel.get_weight() * fixel.get_FOD() * (length / fixel.get_orig_TD()); + } + const double afcsa = sum_afd / tckcont.get_total_contribution(); + master.coefficients[track_index] = std::log (afcsa / fixed_mu); + } + return true; + } + private: + TckFactor& master; + const double fixed_mu; + }; + { + SIFT::TrackIndexRangeWriter writer (SIFT_TRACK_INDEX_BUFFER_SIZE, num_tracks()); + Functor functor (*this); + Thread::run_queue (writer, SIFT::TrackIndexRange(), Thread::multi (functor)); + } - // Just do single-threaded for now + // Single-threaded version +/* + const double fixed_mu = mu(); for (SIFT::track_t i = 0; i != num_tracks(); ++i) { const SIFT::TrackContribution& tckcont = *contributions[i]; double sum_afd = 0.0; @@ -161,6 +197,7 @@ namespace MR { const double afcsa = sum_afd / tckcont.get_total_contribution(); coefficients[i] = std::log (afcsa / fixed_mu); } +*/ for (vector::iterator i = fixels.begin(); i != fixels.end(); ++i) { i->clear_TD(); @@ -172,7 +209,7 @@ namespace MR { Thread::run_queue (writer, SIFT::TrackIndexRange(), Thread::multi (worker)); } - VAR (calc_cost_function()); + CONSOLE ("Cost function after linear optimisation is " + str(calc_cost_function()) + ")"); } @@ -202,7 +239,7 @@ namespace MR { } unsigned int iter = 0; - + auto display_func = [&](){ return printf(" %5u %3.3f%% %2.3f%% %u", iter, 100.0 * cf_data / init_cf, 100.0 * cf_reg / init_cf, nonzero_streamlines); }; CONSOLE (" Iteration CF (data) CF (reg) Streamlines"); ProgressBar progress (""); @@ -307,7 +344,7 @@ namespace MR { } progress.update (display_func); - + // Leaving out testing the fixel exclusion mask criterion; doesn't converge, and results in CF increase } while (((new_cf - prev_cf < required_cf_change) || (iter < min_iters) /* || !fixels_to_exclude.empty() */ ) && (iter < max_iters)); From 6f7d7f7449a1af9a7bb45c29f33c2cab136546e1 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Tue, 31 Oct 2017 12:44:45 +0000 Subject: [PATCH 0063/1471] mrview: add transform tool --- src/gui/mrview/tool/list.h | 2 ++ src/gui/mrview/tool/transform.h | 56 +++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 src/gui/mrview/tool/transform.h diff --git a/src/gui/mrview/tool/list.h b/src/gui/mrview/tool/list.h index 2f3936dce7..c47e5cea44 100644 --- a/src/gui/mrview/tool/list.h +++ b/src/gui/mrview/tool/list.h @@ -23,6 +23,7 @@ #include "gui/mrview/tool/screen_capture.h" #include "gui/mrview/tool/tractography/tractography.h" #include "gui/mrview/tool/connectome/connectome.h" +#include "gui/mrview/tool/transform.h" #else @@ -39,6 +40,7 @@ TOOL(ODF, ODF display, Display orientation density functions) TOOL(Fixel, Fixel plot, Plot fixel images) TOOL(Connectome, Connectome, Plot connectome properties) TOOL(Capture, Screen capture, Capture the screen as a png file) +TOOL(Transform, Transform, Manipulate the rigid-body transform of the image) #endif diff --git a/src/gui/mrview/tool/transform.h b/src/gui/mrview/tool/transform.h new file mode 100644 index 0000000000..253a00f67f --- /dev/null +++ b/src/gui/mrview/tool/transform.h @@ -0,0 +1,56 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#ifndef __gui_mrview_tool_transform_h__ +#define __gui_mrview_tool_transform_h__ + +#include "gui/mrview/tool/base.h" +#include "gui/mrview/mode/base.h" + +namespace MR +{ + namespace GUI + { + namespace MRView + { + + namespace Tool + { + + + class Transform : public Base, public Mode::ModeGuiVisitor + { MEMALIGN(Transform) + Q_OBJECT + public: + Transform (Dock* parent); + + private slots: + void onImageChanged (); + void onImageVisibilityChanged (bool); + + private: + QPushButton *activated; + }; + + } + } + } +} + +#endif + + + + + From b0527ea2e4521e68c442a30b20f270d37b0246f8 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Tue, 31 Oct 2017 16:38:40 +0000 Subject: [PATCH 0064/1471] mrview: change handling of camera interactions This adds a dedicated Tool::CameraInteractor class that Tools can derive from to handle the camera move operations (pan, tilt, rotate, ...). The View tool now uses this to handle clip plane operations. --- src/gui/mrview/mode/base.cpp | 30 +++++++-- src/gui/mrview/mode/base.h | 12 +--- src/gui/mrview/mode/lightbox.cpp | 8 +-- src/gui/mrview/mode/ortho.cpp | 14 +++- src/gui/mrview/mode/volume.cpp | 101 ----------------------------- src/gui/mrview/mode/volume.h | 8 --- src/gui/mrview/tool/base.cpp | 6 ++ src/gui/mrview/tool/base.h | 16 +++++ src/gui/mrview/tool/transform.h | 7 +- src/gui/mrview/tool/view.cpp | 106 ++++++++++++++++++++++++++++++- src/gui/mrview/tool/view.h | 11 +++- src/gui/mrview/window.cpp | 7 ++ src/gui/mrview/window.h | 6 ++ 13 files changed, 198 insertions(+), 134 deletions(-) diff --git a/src/gui/mrview/mode/base.cpp b/src/gui/mrview/mode/base.cpp index 2709e2d26a..28f58d7adb 100644 --- a/src/gui/mrview/mode/base.cpp +++ b/src/gui/mrview/mode/base.cpp @@ -31,7 +31,6 @@ namespace MR update_overlays (false), visible (true) { } - Base::~Base () { glarea()->setCursor (Cursor::crosshair); @@ -152,13 +151,18 @@ namespace MR void Base::slice_move_event (float x) { + if (window().active_camera_interactor() && window().active_camera_interactor()->slice_move_event (x)) + return; + const Projection* proj = get_current_projection(); if (!proj) return; const auto &header = image()->header(); float increment = snap_to_image() ? x * header.spacing (plane()) : x * std::pow (header.spacing(0) * header.spacing(1) * header.spacing(2), 1/3.f); - move_in_out (increment, *proj); + auto move = get_through_plane_translation (increment, *proj); + + set_focus (focus() + move); move_target_to_focus_plane (*proj); updateGL(); } @@ -188,9 +192,14 @@ namespace MR void Base::pan_event () { + if (window().active_camera_interactor() && window().active_camera_interactor()->pan_event()) + return; + const Projection* proj = get_current_projection(); if (!proj) return; - set_target (target() - proj->screen_to_model_direction (window().mouse_displacement(), target())); + + auto move = -proj->screen_to_model_direction (window().mouse_displacement(), target()); + set_target (target() + move); updateGL(); } @@ -198,9 +207,14 @@ namespace MR void Base::panthrough_event () { + if (window().active_camera_interactor() && window().active_camera_interactor()->panthrough_event()) + return; + const Projection* proj = get_current_projection(); if (!proj) return; - move_in_out_FOV (window().mouse_displacement().y(), *proj); + auto move = get_through_plane_translation_FOV (window().mouse_displacement().y(), *proj); + + set_focus (focus() + move); move_target_to_focus_plane (*proj); updateGL(); } @@ -296,12 +310,16 @@ namespace MR void Base::tilt_event () { + if (window().active_camera_interactor() && window().active_camera_interactor()->tilt_event()) + return; + if (snap_to_image()) window().set_snap_to_image (false); const Math::Versorf rot = get_tilt_rotation(); if (!rot) return; + Math::Versorf orient = rot * orientation(); set_orientation (orient); updateGL(); @@ -313,12 +331,16 @@ namespace MR void Base::rotate_event () { + if (window().active_camera_interactor() && window().active_camera_interactor()->rotate_event()) + return; + if (snap_to_image()) window().set_snap_to_image (false); const Math::Versorf rot = get_rotate_rotation(); if (!rot) return; + Math::Versorf orient = rot * orientation(); set_orientation (orient); updateGL(); diff --git a/src/gui/mrview/mode/base.h b/src/gui/mrview/mode/base.h index e40c3e1d6b..ef2c77fac6 100644 --- a/src/gui/mrview/mode/base.h +++ b/src/gui/mrview/mode/base.h @@ -141,21 +141,15 @@ namespace MR return reinterpret_cast (window().glarea); } - Eigen::Vector3f move_in_out_displacement (float distance, const Projection& projection) const { + Eigen::Vector3f get_through_plane_translation (float distance, const Projection& projection) const { Eigen::Vector3f move (projection.screen_normal()); move.normalize(); move *= distance; return move; } - void move_in_out (float distance, const Projection& projection) { - if (!image()) return; - Eigen::Vector3f move = move_in_out_displacement (distance, projection); - set_focus (focus() + move); - } - - void move_in_out_FOV (int increment, const Projection& projection) { - move_in_out (MOVE_IN_OUT_FOV_MULTIPLIER * increment * FOV(), projection); + Eigen::Vector3f get_through_plane_translation_FOV (int increment, const Projection& projection) { + return get_through_plane_translation (MOVE_IN_OUT_FOV_MULTIPLIER * increment * FOV(), projection); } void render_tools (const Projection& projection, bool is_3D = false, int axis = 0, int slice = 0) { diff --git a/src/gui/mrview/mode/lightbox.cpp b/src/gui/mrview/mode/lightbox.cpp index 3f4fafdd0f..4b15839c02 100644 --- a/src/gui/mrview/mode/lightbox.cpp +++ b/src/gui/mrview/mode/lightbox.cpp @@ -128,7 +128,7 @@ namespace MR const Projection& slice_proj = slices_proj_focusdelta[current_slice_index].first; float focus_delta = slices_proj_focusdelta[current_slice_index].second; - const Eigen::Vector3f slice_focus = move_in_out_displacement(focus_delta, slice_proj); + const Eigen::Vector3f slice_focus = get_through_plane_translation (focus_delta, slice_proj); set_focus(focus() + slice_focus); } else if (volume_indices[slice_index] == -1) current_slice_index = prev_index; @@ -216,7 +216,7 @@ namespace MR slice_proj.set_viewport(window(), x + dw * col, y + h - (dh * (row+1)), dw, dh); // We need to setup the modelview/proj matrices before we set the new focus - // because move_in_out_displacement is reliant on MVP + // because get_through_plane_translation is reliant on MVP setup_projection (plane(), slice_proj); if (rend_vols) { @@ -228,8 +228,8 @@ namespace MR else { float focus_delta = slices_proj_focusdelta[slice_idx].second; - Eigen::Vector3f slice_focus = move_in_out_displacement(focus_delta, slice_proj); - set_focus(orig_focus + slice_focus); + auto move = get_through_plane_translation (focus_delta, slice_proj); + set_focus (orig_focus + move); } if (render_plane) diff --git a/src/gui/mrview/mode/ortho.cpp b/src/gui/mrview/mode/ortho.cpp index 25c1464620..9532858c2d 100644 --- a/src/gui/mrview/mode/ortho.cpp +++ b/src/gui/mrview/mode/ortho.cpp @@ -141,22 +141,32 @@ namespace MR void Ortho::slice_move_event (float x) { + if (window().active_camera_interactor() && window().active_camera_interactor()->slice_move_event(x)) + return; + const Projection* proj = get_current_projection(); if (!proj) return; const auto &header = image()->header(); float increment = snap_to_image() ? x * header.spacing (current_plane) : x * std::pow (header.spacing(0) * header.spacing(1) * header.spacing(2), 1/3.f); - move_in_out (increment, *proj); + auto move = get_through_plane_translation (increment, *proj); + + set_focus (focus() + move); updateGL(); } void Ortho::panthrough_event () { + if (window().active_camera_interactor() && window().active_camera_interactor()->panthrough_event()) + return; + const Projection* proj = get_current_projection(); if (!proj) return; - move_in_out_FOV (window().mouse_displacement().y(), *proj); + auto move = get_through_plane_translation_FOV (window().mouse_displacement().y(), *proj); + + set_focus (focus() + move); updateGL(); } diff --git a/src/gui/mrview/mode/volume.cpp b/src/gui/mrview/mode/volume.cpp index a9189e3e11..700cf2b3d0 100644 --- a/src/gui/mrview/mode/volume.cpp +++ b/src/gui/mrview/mode/volume.cpp @@ -554,107 +554,6 @@ namespace MR return view ? view->get_clipintersectionmodestate() : false; } - inline void Volume::move_clip_planes_in_out (vector& clip, float distance) - { - Eigen::Vector3f d = get_current_projection()->screen_normal(); - for (size_t n = 0; n < clip.size(); ++n) { - GL::vec4& p (*clip[n]); - p[3] += distance * (p[0]*d[0] + p[1]*d[1] + p[2]*d[2]); - } - updateGL(); - } - - - inline void Volume::rotate_clip_planes (vector& clip, const Math::Versorf& rot) - { - for (size_t n = 0; n < clip.size(); ++n) { - GL::vec4& p (*clip[n]); - float distance_to_focus = p[0]*focus()[0] + p[1]*focus()[1] + p[2]*focus()[2] - p[3]; - const Math::Versorf norm (0.0f, p[0], p[1], p[2]); - const Math::Versorf rotated = norm * rot; - p[0] = rotated.x(); - p[1] = rotated.y(); - p[2] = rotated.z(); - p[3] = p[0]*focus()[0] + p[1]*focus()[1] + p[2]*focus()[2] - distance_to_focus; - } - updateGL(); - } - - - - - - void Volume::slice_move_event (float x) - { - - vector clip = get_clip_planes_to_be_edited(); - if (clip.size()) { - const auto &header = image()->header(); - float increment = snap_to_image() ? - x * header.spacing (plane()) : - x * std::pow (header.spacing (0) * header.spacing (1) * header.spacing (2), 1/3.f); - move_clip_planes_in_out (clip, increment); - } else - Base::slice_move_event (x); - } - - - - void Volume::pan_event () - { - vector clip = get_clip_planes_to_be_edited(); - if (clip.size()) { - Eigen::Vector3f move = get_current_projection()->screen_to_model_direction (window().mouse_displacement(), target()); - for (size_t n = 0; n < clip.size(); ++n) { - GL::vec4& p (*clip[n]); - p[3] += (p[0]*move[0] + p[1]*move[1] + p[2]*move[2]); - } - updateGL(); - } - else - Base::pan_event(); - } - - - void Volume::panthrough_event () - { - vector clip = get_clip_planes_to_be_edited(); - if (clip.size()) - move_clip_planes_in_out (clip, MOVE_IN_OUT_FOV_MULTIPLIER * window().mouse_displacement().y() * FOV()); - else - Base::panthrough_event(); - } - - - - void Volume::tilt_event () - { - vector clip = get_clip_planes_to_be_edited(); - if (clip.size()) { - const Math::Versorf rot = get_tilt_rotation(); - if (!rot) - return; - rotate_clip_planes (clip, rot); - } - else - Base::tilt_event(); - } - - - - void Volume::rotate_event () - { - vector clip = get_clip_planes_to_be_edited(); - if (clip.size()) { - const Math::Versorf rot = get_rotate_rotation(); - if (!rot) - return; - rotate_clip_planes (clip, rot); - } - else - Base::rotate_event(); - } - } } diff --git a/src/gui/mrview/mode/volume.h b/src/gui/mrview/mode/volume.h index 8c0b2e88cd..8d16a17589 100644 --- a/src/gui/mrview/mode/volume.h +++ b/src/gui/mrview/mode/volume.h @@ -42,11 +42,6 @@ namespace MR } virtual void paint (Projection& projection); - virtual void slice_move_event (float x); - virtual void pan_event (); - virtual void panthrough_event (); - virtual void tilt_event (); - virtual void rotate_event (); protected: GL::VertexBuffer volume_VB, volume_VI; @@ -72,9 +67,6 @@ namespace MR vector get_clip_planes_to_be_edited () const; bool get_cliphighlightstate () const; bool get_clipintersectionmodestate () const; - - void move_clip_planes_in_out (vector& clip, float distance); - void rotate_clip_planes (vector& clip, const Math::Versorf& rot); }; } diff --git a/src/gui/mrview/tool/base.cpp b/src/gui/mrview/tool/base.cpp index 9d967edeaf..3205f08ce5 100644 --- a/src/gui/mrview/tool/base.cpp +++ b/src/gui/mrview/tool/base.cpp @@ -62,6 +62,12 @@ namespace MR bool Base::process_commandline_option (const MR::App::ParsedOption&) { return false; } void Base::add_commandline_options (MR::App::OptionList&) { } + void CameraInteractor::deactivate () { } + bool CameraInteractor::slice_move_event (float) { return false; } + bool CameraInteractor::pan_event () { return false; } + bool CameraInteractor::panthrough_event () { return false; } + bool CameraInteractor::tilt_event () { return false; } + bool CameraInteractor::rotate_event () { return false; } } } } diff --git a/src/gui/mrview/tool/base.h b/src/gui/mrview/tool/base.h index 7868291155..2eaa053d9e 100644 --- a/src/gui/mrview/tool/base.h +++ b/src/gui/mrview/tool/base.h @@ -41,6 +41,22 @@ namespace MR class Base; + class CameraInteractor + { NOMEMALIGN + public: + CameraInteractor () : _active (false) { } + bool active () const { return _active; } + virtual void deactivate (); + virtual bool slice_move_event (float inc); + virtual bool pan_event (); + virtual bool panthrough_event (); + virtual bool tilt_event (); + virtual bool rotate_event (); + protected: + bool _active; + void set_active (bool onoff) { _active = onoff; } + }; + class Dock : public QDockWidget diff --git a/src/gui/mrview/tool/transform.h b/src/gui/mrview/tool/transform.h index 253a00f67f..384500813a 100644 --- a/src/gui/mrview/tool/transform.h +++ b/src/gui/mrview/tool/transform.h @@ -29,18 +29,17 @@ namespace MR { - class Transform : public Base, public Mode::ModeGuiVisitor + class Transform : public Base { MEMALIGN(Transform) Q_OBJECT public: Transform (Dock* parent); private slots: - void onImageChanged (); - void onImageVisibilityChanged (bool); + void onActivate (bool); private: - QPushButton *activated; + QPushButton *activate_button; }; } diff --git a/src/gui/mrview/tool/view.cpp b/src/gui/mrview/tool/view.cpp index d244ef93ed..31e4ce83ff 100644 --- a/src/gui/mrview/tool/view.cpp +++ b/src/gui/mrview/tool/view.cpp @@ -670,6 +670,10 @@ namespace MR transparency_box->setVisible (mode->features & Mode::ShaderTransparency); threshold_box->setVisible (mode->features & Mode::ShaderTransparency); clip_box->setVisible (mode->features & Mode::ShaderClipping); + if (mode->features & Mode::ShaderClipping) + clip_planes_selection_changed_slot(); + else + window().register_camera_interactor(); lightbox_box->setVisible (false); mode->request_update_mode_gui(*this); } @@ -949,6 +953,7 @@ namespace MR clip_planes_invert_action->setEnabled (selected); clip_planes_remove_action->setEnabled (selected); clip_planes_clear_action->setEnabled (clip_planes_model->rowCount()); + window().register_camera_interactor (selected ? this : nullptr); window().updateGL(); } @@ -1048,7 +1053,7 @@ namespace MR - // Called in respose to a request_update_mode_gui(ModeGuiVisitor& visitor) call + // Called in response to a request_update_mode_gui(ModeGuiVisitor& visitor) call void View::update_lightbox_mode_gui(const Mode::LightBox &mode) { lightbox_box->setVisible(true); @@ -1067,6 +1072,105 @@ namespace MR reset_light_box_gui_controls(); } + void View::move_clip_planes_in_out (vector& clip, float distance) + { + Eigen::Vector3f d = window().get_current_mode()->get_current_projection()->screen_normal(); + for (size_t n = 0; n < clip.size(); ++n) { + GL::vec4& p (*clip[n]); + p[3] += distance * (p[0]*d[0] + p[1]*d[1] + p[2]*d[2]); + } + window().updateGL(); + } + + + void View::rotate_clip_planes (vector& clip, const Math::Versorf& rot) + { + const auto& focus (window().focus()); + for (size_t n = 0; n < clip.size(); ++n) { + GL::vec4& p (*clip[n]); + float distance_to_focus = p[0]*focus[0] + p[1]*focus[1] + p[2]*focus[2] - p[3]; + const Math::Versorf norm (0.0f, p[0], p[1], p[2]); + const Math::Versorf rotated = norm * rot; + p[0] = rotated.x(); + p[1] = rotated.y(); + p[2] = rotated.z(); + p[3] = p[0]*focus[0] + p[1]*focus[1] + p[2]*focus[2] - distance_to_focus; + } + window().updateGL(); + } + + + void View::deactivate () + { + clip_planes_list_view->selectionModel()->clear(); + } + + + bool View::slice_move_event (float x) + { + + vector clip = get_clip_planes_to_be_edited(); + if (clip.size()) { + const auto &header = window().image()->header(); + float increment = x * std::pow (header.spacing (0) * header.spacing (1) * header.spacing (2), 1.0f/3.0f); + move_clip_planes_in_out (clip, increment); + } + return true; + } + + + + bool View::pan_event () + { + vector clip = get_clip_planes_to_be_edited(); + if (clip.size()) { + Eigen::Vector3f move = window().get_current_mode()->get_current_projection()->screen_to_model_direction (window().mouse_displacement(), window().target()); + for (size_t n = 0; n < clip.size(); ++n) { + GL::vec4& p (*clip[n]); + p[3] += (p[0]*move[0] + p[1]*move[1] + p[2]*move[2]); + } + window().updateGL(); + } + return true; + } + + + bool View::panthrough_event () + { + vector clip = get_clip_planes_to_be_edited(); + if (clip.size()) + move_clip_planes_in_out (clip, MOVE_IN_OUT_FOV_MULTIPLIER * window().mouse_displacement().y() * window().FOV()); + return true; + } + + + + bool View::tilt_event () + { + vector clip = get_clip_planes_to_be_edited(); + if (clip.size()) { + const Math::Versorf rot = window().get_current_mode()->get_tilt_rotation(); + if (!rot) + return true; + rotate_clip_planes (clip, rot); + } + return true; + } + + + + bool View::rotate_event () + { + vector clip = get_clip_planes_to_be_edited(); + if (clip.size()) { + const Math::Versorf rot = window().get_current_mode()->get_rotate_rotation(); + if (!rot) + return true; + rotate_clip_planes (clip, rot); + } + return true; + } + } } } diff --git a/src/gui/mrview/tool/view.h b/src/gui/mrview/tool/view.h index f781d5e657..25b9d001ac 100644 --- a/src/gui/mrview/tool/view.h +++ b/src/gui/mrview/tool/view.h @@ -41,7 +41,7 @@ namespace MR std::string name; }; - class View : public Base, public Mode::ModeGuiVisitor + class View : public Base, public Mode::ModeGuiVisitor, public Tool::CameraInteractor { MEMALIGN(View) Q_OBJECT public: @@ -55,6 +55,13 @@ namespace MR bool get_clipintersectionmodestate () const; void update_lightbox_mode_gui(const Mode::LightBox &mode) override; + void deactivate () override; + bool slice_move_event (float inc) override; + bool pan_event () override; + bool panthrough_event () override; + bool tilt_event () override; + bool rotate_event () override; + protected: virtual void showEvent (QShowEvent* event) override; @@ -136,6 +143,8 @@ namespace MR void reset_light_box_gui_controls (); void set_transparency_from_image (); + void move_clip_planes_in_out (vector& clip, float distance); + void rotate_clip_planes (vector& clip, const Math::Versorf& rot); }; } diff --git a/src/gui/mrview/window.cpp b/src/gui/mrview/window.cpp index 6566640c61..afcb55b379 100644 --- a/src/gui/mrview/window.cpp +++ b/src/gui/mrview/window.cpp @@ -234,6 +234,7 @@ namespace MR colourbar_position (ColourMap::Position::BottomRight), tools_colourbar_position (ColourMap::Position::TopRight), snap_to_image_axes_and_voxel (true), + camera_interactor (nullptr), tool_has_focus (nullptr), best_FPS (NAN), show_FPS (false), @@ -1758,6 +1759,12 @@ namespace MR + void Window::register_camera_interactor (Tool::CameraInteractor* agent) + { + if (camera_interactor) + camera_interactor->deactivate(); + camera_interactor = agent; + } void Window::process_commandline_option () diff --git a/src/gui/mrview/window.h b/src/gui/mrview/window.h index 85bfa796e9..e9ba0acc9b 100644 --- a/src/gui/mrview/window.h +++ b/src/gui/mrview/window.h @@ -46,6 +46,7 @@ namespace MR { class Base; class ODF; + class CameraInteractor; } @@ -155,6 +156,9 @@ namespace MR GL::Lighting& lighting () { return *lighting_; } ColourMap::Renderer colourbar_renderer; + void register_camera_interactor (Tool::CameraInteractor* agent = nullptr); + Tool::CameraInteractor* active_camera_interactor () { return camera_interactor; } + static void add_commandline_options (MR::App::OptionList& options); static Window* main; @@ -251,6 +255,8 @@ namespace MR float background_colour[3]; + Tool::CameraInteractor* camera_interactor; + QMenu *image_menu; ColourMapButton *colourmap_button; From 1bdf4265a6a0714d439558a8a5f85dc2e5e7d16c Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 1 Nov 2017 14:19:37 +1100 Subject: [PATCH 0065/1471] tcksift2 -linear: Fix for non-contributing streamlines --- src/dwi/tractography/SIFT2/tckfactor.cpp | 29 ++++++++---------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/src/dwi/tractography/SIFT2/tckfactor.cpp b/src/dwi/tractography/SIFT2/tckfactor.cpp index 3b99b82895..25f3bbf67b 100644 --- a/src/dwi/tractography/SIFT2/tckfactor.cpp +++ b/src/dwi/tractography/SIFT2/tckfactor.cpp @@ -167,8 +167,12 @@ namespace MR { const float length = tckcont[f].get_length(); sum_afd += fixel.get_weight() * fixel.get_FOD() * (length / fixel.get_orig_TD()); } - const double afcsa = sum_afd / tckcont.get_total_contribution(); - master.coefficients[track_index] = std::log (afcsa / fixed_mu); + if (sum_afd && tckcont.get_total_contribution()) { + const double afcsa = sum_afd / tckcont.get_total_contribution(); + master.coefficients[track_index] = std::max (master.min_coeff, std::log (afcsa / fixed_mu)); + } else { + master.coefficients[track_index] = master.min_coeff; + } } return true; } @@ -182,23 +186,6 @@ namespace MR { Thread::run_queue (writer, SIFT::TrackIndexRange(), Thread::multi (functor)); } - // Single-threaded version -/* - const double fixed_mu = mu(); - for (SIFT::track_t i = 0; i != num_tracks(); ++i) { - const SIFT::TrackContribution& tckcont = *contributions[i]; - double sum_afd = 0.0; - for (size_t f = 0; f != tckcont.dim(); ++f) { - const size_t fixel_index = tckcont[f].get_fixel_index(); - const Fixel& fixel = fixels[fixel_index]; - const float length = tckcont[f].get_length(); - sum_afd += fixel.get_weight() * fixel.get_FOD() * (length / fixel.get_orig_TD()); - } - const double afcsa = sum_afd / tckcont.get_total_contribution(); - coefficients[i] = std::log (afcsa / fixed_mu); - } -*/ - for (vector::iterator i = fixels.begin(); i != fixels.end(); ++i) { i->clear_TD(); i->clear_mean_coeff(); @@ -361,7 +348,9 @@ namespace MR { try { decltype(coefficients) weights (coefficients.size()); for (SIFT::track_t i = 0; i != num_tracks(); ++i) - weights[i] = std::exp (coefficients[i]); + weights[i] = (coefficients[i] == min_coeff || !std::isfinite(coefficients[i])) ? + 0.0 : + std::exp (coefficients[i]); save_vector (weights, path); } catch (...) { WARN ("Unable to assign memory for output factor file: \"" + Path::basename(path) + "\" not created"); From f2062fb8ac4caf7d720c57debce9f4e2de7291ca Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Wed, 1 Nov 2017 09:49:55 +0000 Subject: [PATCH 0066/1471] mrview: early work on transform tool --- src/gui/mrview/gui_image.h | 9 ++ src/gui/mrview/tool/transform.cpp | 161 ++++++++++++++++++++++++++++++ src/gui/mrview/tool/transform.h | 13 ++- 3 files changed, 182 insertions(+), 1 deletion(-) create mode 100644 src/gui/mrview/tool/transform.cpp diff --git a/src/gui/mrview/gui_image.h b/src/gui/mrview/gui_image.h index 6cba1f1552..7bdfede1ce 100644 --- a/src/gui/mrview/gui_image.h +++ b/src/gui/mrview/gui_image.h @@ -41,6 +41,10 @@ namespace MR class ODF; } + + + + class ImageBase : public Volume { MEMALIGN(ImageBase) public: @@ -61,6 +65,11 @@ namespace MR }; + + + + + class Image : public ImageBase { MEMALIGN(Image) public: diff --git a/src/gui/mrview/tool/transform.cpp b/src/gui/mrview/tool/transform.cpp new file mode 100644 index 0000000000..229807f8fc --- /dev/null +++ b/src/gui/mrview/tool/transform.cpp @@ -0,0 +1,161 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "gui/mrview/tool/transform.h" + +namespace MR +{ + namespace GUI + { + namespace MRView + { + namespace Tool + { + + Transform::Transform (Dock* parent) : + Base (parent) + { + VBoxLayout* main_box = new VBoxLayout (this); + + activate_button = new QPushButton ("Activate",this); + activate_button->setToolTip (tr ("Activate transform manipulation mode\nAll camera move operations will now apply to the main image")); + activate_button->setIcon (QIcon (":/rotate.svg")); + activate_button->setCheckable (true); + activate_button->setChecked (!window().get_image_visibility()); + connect (activate_button, SIGNAL (clicked(bool)), this, SLOT (onActivate (bool))); + main_box->addWidget (activate_button, 0); + + main_box->addStretch (); + } + + + void Transform::showEvent (QShowEvent*) + { + activate_button->setChecked (false); + } + + + void Transform::closeEvent (QCloseEvent*) + { + if (window().active_camera_interactor() == this) + window().register_camera_interactor(); + } + + + void Transform::onActivate (bool onoff) + { + window().register_camera_interactor (onoff ? this : nullptr); + } + + + void Transform::deactivate () + { + activate_button->setChecked (false); + } + + bool Transform::slice_move_event (float x) + { + const Projection* proj = window().get_current_mode()->get_current_projection(); + if (!proj) + return true; + const auto &header = window().image()->header(); + float increment = window().snap_to_image() ? + x * header.spacing (window().plane()) : + x * std::pow (header.spacing(0) * header.spacing(1) * header.spacing(2), 1.0f/3.0f); + auto move = window().get_current_mode()->get_through_plane_translation (increment, *proj); + + transform_type M = header.transform(); + VAR (M.matrix()); + M.translate (move.cast()); + VAR(M.matrix()); + window().updateGL(); + return true; + } + + + + + bool Transform::pan_event () + { + /*const Projection* proj = get_current_projection(); + if (!proj) return; + + auto move = -proj->screen_to_model_direction (window().mouse_displacement(), target()); + set_target (target() + move); + updateGL();*/ + return false; + } + + + + bool Transform::panthrough_event () + { + /*const Projection* proj = get_current_projection(); + if (!proj) return; + auto move = get_through_plane_translation_FOV (window().mouse_displacement().y(), *proj); + + set_focus (focus() + move); + move_target_to_focus_plane (*proj); + updateGL();*/ + return false; + } + + + bool Transform::tilt_event () + { + /*if (snap_to_image()) + window().set_snap_to_image (false); + + const Math::Versorf rot = get_tilt_rotation(); + if (!rot) + return; + + Math::Versorf orient = rot * orientation(); + set_orientation (orient); + updateGL();*/ + return false; + } + + + + + + bool Transform::rotate_event () + { + /*if (snap_to_image()) + window().set_snap_to_image (false); + + const Math::Versorf rot = get_rotate_rotation(); + if (!rot) + return; + + Math::Versorf orient = rot * orientation(); + set_orientation (orient); + updateGL();*/ + return false; + } + + + + + } + } + } +} + + + + + + diff --git a/src/gui/mrview/tool/transform.h b/src/gui/mrview/tool/transform.h index 384500813a..9404b69bb1 100644 --- a/src/gui/mrview/tool/transform.h +++ b/src/gui/mrview/tool/transform.h @@ -29,12 +29,23 @@ namespace MR { - class Transform : public Base + class Transform : public Base, public Tool::CameraInteractor { MEMALIGN(Transform) Q_OBJECT public: Transform (Dock* parent); + void deactivate () override; + bool slice_move_event (float inc) override; + bool pan_event () override; + bool panthrough_event () override; + bool tilt_event () override; + bool rotate_event () override; + + protected: + virtual void showEvent (QShowEvent* event) override; + virtual void closeEvent (QCloseEvent* event) override; + private slots: void onActivate (bool); From 4a847cf6ad511774e9767bbed7b57656b11518ed Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Wed, 8 Nov 2017 11:32:39 +0000 Subject: [PATCH 0067/1471] mrview: further changes to support transform tool --- src/gui/mrview/gui_image.cpp | 12 ++++++++++++ src/gui/mrview/gui_image.h | 2 ++ src/gui/mrview/volume.cpp | 10 ++++++++++ src/gui/mrview/volume.h | 1 + 4 files changed, 25 insertions(+) diff --git a/src/gui/mrview/gui_image.cpp b/src/gui/mrview/gui_image.cpp index 4889cdbcb0..8aa1507f4f 100644 --- a/src/gui/mrview/gui_image.cpp +++ b/src/gui/mrview/gui_image.cpp @@ -640,6 +640,18 @@ namespace MR } + + + + void Image::set_tranform (const transform_type& new_transform) + { + Volume::set_tranform (new_transform); + + linear_interp = decltype (linear_interp) (image); + nearest_interp = decltype (nearest_interp) (image); + } + + } } } diff --git a/src/gui/mrview/gui_image.h b/src/gui/mrview/gui_image.h index 7bdfede1ce..2b932a60b3 100644 --- a/src/gui/mrview/gui_image.h +++ b/src/gui/mrview/gui_image.h @@ -87,6 +87,8 @@ namespace MR cfloat nearest_neighbour_value (const Eigen::Vector3f&) const; const MR::Transform& transform() const { return linear_interp; } + void set_tranform (const transform_type& transform); + const vector& comments() const { return _comments; } void reset_windowing (const int, const bool); diff --git a/src/gui/mrview/volume.cpp b/src/gui/mrview/volume.cpp index 138220f8c4..38804ddfd6 100644 --- a/src/gui/mrview/volume.cpp +++ b/src/gui/mrview/volume.cpp @@ -55,6 +55,16 @@ namespace MR } + + + void Volume::set_tranform (const transform_type& new_transform) + { + _header.transform() = new_transform; + _transform = new MR::Transform (_header); + } + + + } } } diff --git a/src/gui/mrview/volume.h b/src/gui/mrview/volume.h index f31f090b18..90af2b68f0 100644 --- a/src/gui/mrview/volume.h +++ b/src/gui/mrview/volume.h @@ -93,6 +93,7 @@ namespace MR const MR::Header& header () const { return _header; } MR::Header& header () { return _header; } const MR::Transform& transform () const { return _transform; } + void set_tranform (const transform_type& new_transform); void min_max_set() { update_levels(); From 1039319fc1bdc638c8720c1f8780057599c3fd19 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 17 Nov 2017 21:14:44 +1100 Subject: [PATCH 0068/1471] Stats: First compilable version with Freedman-Lane - Modify how multiple contrasts are handled. Initial support for this was previously implemented for vectorstats only, based on the contrast being a matrix rather than a vector. However, this framework does not naturally extend to F-tests. This new approach stores each within a GLM::Contrast class, and these are explicitly looped over whenever required. GLM t-test code has been replaced with the F-test (i.e. without variance groups) as presented in Winkler et al., 2014. This has removed many optimisations, but was necessary in order to sufficiently generalise the framework for upcoming enhancement. Changed stdev calculations in statistical inference commands to a vector, since this does not change betwee contrasts. - Removed apparent redundant creation of output images related to the default permutation in mrclusterstats. - Model is currently partitioned based on null columns of the contrast matrix only. Note that this is merely the first COMPILING version of this code. --- cmd/connectomestats.cpp | 72 +++-- cmd/fixelcfestats.cpp | 55 ++-- cmd/mrclusterstats.cpp | 101 +++----- cmd/vectorstats.cpp | 76 ++++-- core/math/stats/glm.cpp | 562 ++++++++++++++++++++-------------------- core/math/stats/glm.h | 387 +++++++++++++-------------- src/stats/permtest.cpp | 18 +- src/stats/permtest.h | 18 +- 8 files changed, 651 insertions(+), 638 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 705fcd16c9..ada68be796 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -32,6 +32,7 @@ using namespace MR; using namespace App; using namespace MR::Math::Stats; +using namespace MR::Math::Stats::GLM; using Math::Stats::matrix_type; using Math::Stats::vector_type; @@ -55,7 +56,7 @@ void usage () SYNOPSIS = "Connectome group-wise statistics at the edge level using non-parametric permutation testing"; DESCRIPTION - + Math::Stats::glm_column_ones_description; + + Math::Stats::GLM::column_ones_description; ARGUMENTS @@ -109,8 +110,8 @@ void usage () void load_tfce_parameters (Stats::TFCE::Wrapper& enhancer) { const default_type dH = get_option_value ("tfce_dh", TFCE_DH_DEFAULT); - const default_type E = get_option_value ("tfce_e", TFCE_E_DEFAULT); - const default_type H = get_option_value ("tfce_h", TFCE_H_DEFAULT); + const default_type E = get_option_value ("tfce_e", TFCE_E_DEFAULT); + const default_type H = get_option_value ("tfce_h", TFCE_H_DEFAULT); enhancer.set_tfce_parameters (dH, E, H); } @@ -212,8 +213,15 @@ void run() throw Exception ("number of subjects (" + str(importer.size()) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); // Load contrast matrix - const matrix_type contrast = load_matrix (argument[3]); - const size_t num_contrasts = contrast.rows(); + // TODO Eventually this should be functionalised, and include F-tests + // TODO Eventually will want ability to disable t-test output, and output F-tests only + vector contrasts; + { + const matrix_type contrast_matrix = load_matrix (argument[3]); + for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) + contrasts.emplace_back (Contrast (contrast_matrix.row (row))); + } + const size_t num_contrasts = contrasts.size(); // Before validating the contrast matrix, we first need to see if there are any // additional design matrix columns coming from fixel-wise subject data @@ -233,20 +241,22 @@ void run() } // Now we can check the contrast matrix - if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) - throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" + const ssize_t num_factors = design.cols() + extra_columns.size(); + if (contrasts[0].cols() != num_factors) + // TODO Re-word this error message + throw Exception ("the number of columns per contrast (" + str(contrasts[0].cols()) + ")" + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); // Load permutations file if supplied opt = get_options ("permutations"); - vector > permutations; + vector< vector > permutations; if (opt.size()) { permutations = Permutation::load_permutations_file (opt[0][0]); num_perms = permutations.size(); if (permutations[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); + throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); } // Load non-stationary correction permutations file if supplied @@ -256,7 +266,7 @@ void run() permutations_nonstationary = Permutation::load_permutations_file (opt[0][0]); nperms_nonstationary = permutations.size(); if (permutations_nonstationary[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); + throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); } const std::string output_prefix = argument[4]; @@ -276,19 +286,21 @@ void run() const bool nans_in_data = data.allFinite(); // Construct the class for performing the initial statistical tests - std::shared_ptr glm_test; + std::shared_ptr glm_test; if (extra_columns.size() || nans_in_data) { - glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast, nans_in_data, nans_in_columns)); + glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); } else { - glm_test.reset (new GLMTTestFixed (data, design, contrast)); + glm_test.reset (new GLM::TestFixed (data, design, contrasts)); } // Only add contrast row number to image outputs if there's more than one contrast auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; { - matrix_type betas (contrast.cols(), num_edges); - matrix_type abs_effect_size (num_contrasts, num_edges), std_effect_size (num_contrasts, num_edges), stdev (num_contrasts, num_edges); + matrix_type betas (num_factors, num_edges); + // TODO Pretty sure these are transposed with respect to what I'd prefer them to be + matrix_type abs_effect_size (num_contrasts, num_edges), std_effect_size (num_contrasts, num_edges); + vector_type stdev (num_edges); if (extra_columns.size()) { @@ -329,8 +341,8 @@ void run() class Functor { MEMALIGN(Functor) public: - Functor (const matrix_type& data, std::shared_ptr glm_test, const matrix_type& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, matrix_type& stdev) : + Functor (const matrix_type& data, std::shared_ptr glm_test, const vector& contrasts, + matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : data (data), glm_test (glm_test), contrasts (contrasts), @@ -342,29 +354,30 @@ void run() bool operator() (const size_t& edge_index) { const matrix_type data_edge = data.row (edge_index); - const matrix_type design_edge = dynamic_cast(glm_test.get())->default_design (edge_index); + const matrix_type design_edge = dynamic_cast(glm_test.get())->default_design (edge_index); Math::Stats::GLM::all_stats (data_edge, design_edge, contrasts, local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); global_betas.col (edge_index) = local_betas; global_abs_effect_size.col(edge_index) = local_abs_effect_size.col(0); global_std_effect_size.col(edge_index) = local_std_effect_size.col(0); - global_stdev.col(edge_index) = local_stdev.col(0); + global_stdev[edge_index] = local_stdev[0]; return true; } private: const matrix_type& data; - const std::shared_ptr glm_test; - const matrix_type& contrasts; + const std::shared_ptr glm_test; + const vector& contrasts; matrix_type& global_betas; matrix_type& global_abs_effect_size; matrix_type& global_std_effect_size; - matrix_type& global_stdev; - matrix_type local_betas, local_abs_effect_size, local_std_effect_size, local_stdev; + vector_type& global_stdev; + matrix_type local_betas, local_abs_effect_size, local_std_effect_size; + vector_type local_stdev; }; Source source (num_edges); - Functor functor (data, glm_test, contrast, + Functor functor (data, glm_test, contrasts, betas, abs_effect_size, std_effect_size, stdev); Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); @@ -372,20 +385,23 @@ void run() } else { ProgressBar progress ("calculating basic properties of default permutation"); - Math::Stats::GLM::all_stats (data, design, contrast, + Math::Stats::GLM::all_stats (data, design, contrasts, betas, abs_effect_size, std_effect_size, stdev); } - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + (3 * num_contrasts)); - for (ssize_t i = 0; i != contrast.cols(); ++i) { + // TODO Contrasts should be somehow named, in order to differentiate between t-tests and F-tests + + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); + for (ssize_t i = 0; i != num_factors; ++i) { save_matrix (mat2vec.V2M (betas.row(i)), "beta" + str(i) + ".csv"); ++progress; } for (size_t i = 0; i != num_contrasts; ++i) { save_matrix (mat2vec.V2M (abs_effect_size.row(i)), "abs_effect" + postfix(i) + ".csv"); ++progress; save_matrix (mat2vec.V2M (std_effect_size.row(i)), "std_effect" + postfix(i) + ".csv"); ++progress; - save_matrix (mat2vec.V2M (stdev.row(i)), "std_dev" + postfix(i) + ".csv"); ++progress; } + save_matrix (mat2vec.V2M (stdev), "std_dev.csv"); + } diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 4b525c812a..ef54791d59 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -38,6 +38,7 @@ using namespace MR; using namespace App; using namespace MR::DWI::Tractography::Mapping; using namespace MR::Math::Stats; +using namespace MR::Math::Stats::GLM; using Stats::CFE::direction_type; using Stats::CFE::connectivity_value_type; @@ -56,7 +57,7 @@ void usage () SYNOPSIS = "Fixel-based analysis using connectivity-based fixel enhancement and non-parametric permutation testing"; DESCRIPTION - + Math::Stats::glm_column_ones_description; + + Math::Stats::GLM::column_ones_description; REFERENCES + "Raffelt, D.; Smith, RE.; Ridgway, GR.; Tournier, JD.; Vaughan, DN.; Rose, S.; Henderson, R.; Connelly, A." // Internal @@ -262,8 +263,13 @@ void run() } // Load contrast matrix - const matrix_type contrast = load_matrix (argument[3]); - const size_t num_contrasts = contrast.rows(); + vector contrasts; + { + const matrix_type contrast_matrix = load_matrix (argument[3]); + for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) + contrasts.emplace_back (Contrast (contrast_matrix.row (row))); + } + const size_t num_contrasts = contrasts.size(); // Before validating the contrast matrix, we first need to see if there are any // additional design matrix columns coming from fixel-wise subject data @@ -282,8 +288,9 @@ void run() INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from fixel-wise design matrices accordingly"); } - if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) - throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" + const ssize_t num_factors = design.cols() + extra_columns.size(); + if (contrasts[0].cols() != num_factors) + throw Exception ("the number of columns per contrast (" + str(contrasts[0].cols()) + ")" + (extra_columns.size() ? " (in addition to the " + str(extra_columns.size()) + " uses of -column)" : "") + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")"); @@ -419,19 +426,20 @@ void run() // Construct the class for performing the initial statistical tests - std::shared_ptr glm_test; + std::shared_ptr glm_test; if (extra_columns.size() || nans_in_data) { - glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast, nans_in_data, nans_in_columns)); + glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); } else { - glm_test.reset (new GLMTTestFixed (data, design, contrast)); + glm_test.reset (new GLM::TestFixed (data, design, contrasts)); } // Only add contrast row number to image outputs if there's more than one contrast auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; { - matrix_type betas (contrast.cols(), num_fixels); - matrix_type abs_effect_size (num_contrasts, num_fixels), std_effect_size (num_contrasts, num_fixels), stdev (num_contrasts, num_fixels); + matrix_type betas (num_factors, num_fixels); + matrix_type abs_effect_size (num_contrasts, num_fixels), std_effect_size (num_contrasts, num_fixels); + vector_type stdev (num_fixels); if (extra_columns.size()) { @@ -472,8 +480,8 @@ void run() class Functor { MEMALIGN(Functor) public: - Functor (const matrix_type& data, std::shared_ptr glm_test, const matrix_type& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, matrix_type& stdev) : + Functor (const matrix_type& data, std::shared_ptr glm_test, const vector& contrasts, + matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : data (data), glm_test (glm_test), contrasts (contrasts), @@ -485,49 +493,50 @@ void run() bool operator() (const size_t& fixel_index) { const matrix_type data_fixel = data.row (fixel_index); - const matrix_type design_fixel = dynamic_cast(glm_test.get())->default_design (fixel_index); + const matrix_type design_fixel = dynamic_cast(glm_test.get())->default_design (fixel_index); Math::Stats::GLM::all_stats (data_fixel, design_fixel, contrasts, local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); global_betas.col(fixel_index) = local_betas; global_abs_effect_size.col(fixel_index) = local_abs_effect_size.col(0); global_std_effect_size.col(fixel_index) = local_std_effect_size.col(0); - global_stdev.col(fixel_index) = local_stdev.col(0); + global_stdev[fixel_index] = local_stdev[0]; return true; } private: const matrix_type& data; - const std::shared_ptr glm_test; - const matrix_type& contrasts; + const std::shared_ptr glm_test; + const vector& contrasts; matrix_type& global_betas; matrix_type& global_abs_effect_size; matrix_type& global_std_effect_size; - matrix_type& global_stdev; - matrix_type local_betas, local_abs_effect_size, local_std_effect_size, local_stdev; + vector_type& global_stdev; + matrix_type local_betas, local_abs_effect_size, local_std_effect_size; + vector_type local_stdev; }; Source source (num_fixels); - Functor functor (data, glm_test, contrast, + Functor functor (data, glm_test, contrasts, betas, abs_effect_size, std_effect_size, stdev); Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); } else { ProgressBar progress ("calculating basic properties of default permutation"); - Math::Stats::GLM::all_stats (data, design, contrast, + Math::Stats::GLM::all_stats (data, design, contrasts, betas, abs_effect_size, std_effect_size, stdev); } - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + (3 * num_contrasts)); - for (ssize_t i = 0; i != contrast.cols(); ++i) { + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); + for (ssize_t i = 0; i != num_factors; ++i) { write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), betas.row(i), output_header); ++progress; } for (size_t i = 0; i != num_contrasts; ++i) { write_fixel_output (Path::join (output_fixel_directory, "abs_effect" + postfix(i) + ".mif"), abs_effect_size.row(i), output_header); ++progress; write_fixel_output (Path::join (output_fixel_directory, "std_effect" + postfix(i) + ".mif"), std_effect_size.row(i), output_header); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "std_dev" + postfix(i) + ".mif"), stdev.row(i), output_header); } + write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), stdev, output_header); } diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index de19406452..577fe8ba06 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -32,6 +32,7 @@ using namespace MR; using namespace App; using namespace MR::Math::Stats; +using namespace MR::Math::Stats::GLM; #define DEFAULT_TFCE_DH 0.1 @@ -46,7 +47,7 @@ void usage () SYNOPSIS = "Voxel-based analysis using permutation testing and threshold-free cluster enhancement"; DESCRIPTION - + Math::Stats::glm_column_ones_description; + + Math::Stats::GLM::column_ones_description; REFERENCES + "* If not using the -threshold command-line option:\n" @@ -110,7 +111,7 @@ void write_output (const VectorType& data, -// Define data importer class that willl obtain voxel data for a +// Define data importer class that will obtain voxel data for a // specific subject based on the string path to the image file for // that subject // @@ -209,8 +210,13 @@ void run() { throw Exception ("number of input files does not match number of rows in design matrix"); // Load contrast matrix - const matrix_type contrast = load_matrix (argument[2]); - const size_t num_contrasts = contrast.rows(); + vector contrasts; + { + const matrix_type contrast_matrix = load_matrix (argument[2]); + for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) + contrasts.emplace_back (Contrast (contrast_matrix.row (row))); + } + const size_t num_contrasts = contrasts.size(); // Before validating the contrast matrix, we first need to see if there are any // additional design matrix columns coming from voxel-wise subject data @@ -230,8 +236,9 @@ void run() { INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from voxel-wise design matrices accordingly"); } - if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) - throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" + const ssize_t num_factors = design.cols() + extra_columns.size(); + if (contrasts[0].cols() != num_factors) + throw Exception ("the number of columns per contrast (" + str(contrasts[0].cols()) + ")" + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); @@ -294,19 +301,20 @@ void run() { matrix_type empirical_enhanced_statistic; // Construct the class for performing the initial statistical tests - std::shared_ptr glm_test; + std::shared_ptr glm_test; if (extra_columns.size() || nans_in_data) { - glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast, nans_in_data, nans_in_columns)); + glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); } else { - glm_test.reset (new GLMTTestFixed (data, design, contrast)); + glm_test.reset (new GLM::TestFixed (data, design, contrasts)); } // Only add contrast row number to image outputs if there's more than one contrast auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; { - matrix_type betas (contrast.cols(), num_voxels); - matrix_type abs_effect_size (num_contrasts, num_voxels), std_effect_size (num_contrasts, num_voxels), stdev (num_contrasts, num_voxels); + matrix_type betas (num_contrasts, num_voxels); + matrix_type abs_effect_size (num_contrasts, num_voxels), std_effect_size (num_contrasts, num_voxels); + vector_type stdev (num_voxels); if (extra_columns.size()) { @@ -347,8 +355,8 @@ void run() { class Functor { MEMALIGN(Functor) public: - Functor (const matrix_type& data, std::shared_ptr glm_test, const matrix_type& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, matrix_type& stdev) : + Functor (const matrix_type& data, std::shared_ptr glm_test, const vector& contrasts, + matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : data (data), glm_test (glm_test), contrasts (contrasts), @@ -360,49 +368,50 @@ void run() { bool operator() (const size_t& voxel_index) { const matrix_type data_voxel = data.row (voxel_index); - const matrix_type design_voxel = dynamic_cast(glm_test.get())->default_design (voxel_index); + const matrix_type design_voxel = dynamic_cast(glm_test.get())->default_design (voxel_index); Math::Stats::GLM::all_stats (data_voxel, design_voxel, contrasts, local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); global_betas.col (voxel_index) = local_betas; global_abs_effect_size.col(voxel_index) = local_abs_effect_size.col(0); global_std_effect_size.col(voxel_index) = local_std_effect_size.col(0); - global_stdev.col(voxel_index) = local_stdev.col(0); + global_stdev[voxel_index] = local_stdev[0]; return true; } private: const matrix_type& data; - const std::shared_ptr glm_test; - const matrix_type& contrasts; + const std::shared_ptr glm_test; + const vector& contrasts; matrix_type& global_betas; matrix_type& global_abs_effect_size; matrix_type& global_std_effect_size; - matrix_type& global_stdev; - matrix_type local_betas, local_abs_effect_size, local_std_effect_size, local_stdev; + vector_type& global_stdev; + matrix_type local_betas, local_abs_effect_size, local_std_effect_size; + vector_type local_stdev; }; Source source (num_voxels); - Functor functor (data, glm_test, contrast, + Functor functor (data, glm_test, contrasts, betas, abs_effect_size, std_effect_size, stdev); Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); } else { ProgressBar progress ("calculating basic properties of default permutation"); - Math::Stats::GLM::all_stats (data, design, contrast, + Math::Stats::GLM::all_stats (data, design, contrasts, betas, abs_effect_size, std_effect_size, stdev); } - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", contrast.cols() + (3 * num_contrasts)); - for (ssize_t i = 0; i != contrast.cols(); ++i) { + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); + for (ssize_t i = 0; i != num_factors; ++i) { write_output (betas.row(i), v2v, prefix + "beta" + str(i) + ".mif", output_header); ++progress; } for (size_t i = 0; i != num_contrasts; ++i) { write_output (abs_effect_size.row(i), v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); ++progress; write_output (std_effect_size.row(i), v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); ++progress; - write_output (stdev.row(i), v2v, prefix + "std_dev" + postfix(i) + ".mif", output_header); ++progress; } + write_output (stdev, v2v, prefix + "std_dev.mif", output_header); } std::shared_ptr enhancer; @@ -428,50 +437,6 @@ void run() { save_vector (empirical_enhanced_statistic.row(i), prefix + "empirical" + postfix(i) + ".txt"); } - Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_enhanced_statistic, - default_cluster_output, tvalue_output); - - { - ProgressBar progress ("generating pre-permutation output", contrast.cols() + (5 * num_contrasts)); - for (size_t i = 0; i != num_contrasts; ++i) { - write_output (tvalue_output.row(i), v2v, prefix + "tvalue" + postfix(i) + ".mif", output_header); - ++progress; - } - for (size_t i = 0; i != num_contrasts; ++i) { - write_output (default_cluster_output.row(i), v2v, prefix + (use_tfce ? "tfce" : "cluster_sizes") + postfix(i) + ".mif", output_header); - ++progress; - } - { - const auto betas = Math::Stats::GLM::solve_betas (data, design); - for (size_t i = 0; i != size_t(contrast.cols()); ++i) { - write_output (betas.row(i), v2v, prefix + "beta" + str(i) + ".mif", output_header); - ++progress; - } - } - { - const auto temp = Math::Stats::GLM::abs_effect_size (data, design, contrast); - for (size_t i = 0; i != num_contrasts; ++i) { - write_output (temp.row(i), v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); - ++progress; - } - } - { - const auto temp = Math::Stats::GLM::std_effect_size (data, design, contrast); - for (size_t i = 0; i != num_contrasts; ++i) { - write_output (temp.row(i), v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); - ++progress; - } - } - { - const auto temp = Math::Stats::GLM::stdev (data, design); - for (size_t i = 0; i != num_contrasts; ++i) { - write_output (temp.row(i), v2v, prefix + "std_dev" + postfix(i) + ".mif", output_header); - ++progress; - } - } - - } - if (!get_options ("notest").size()) { matrix_type perm_distribution (num_contrasts, num_perms); diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 9cdfc932bd..8781dc06e6 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -29,6 +29,7 @@ using namespace MR; using namespace App; using namespace MR::Math::Stats; +using namespace MR::Math::Stats::GLM; @@ -39,7 +40,13 @@ void usage () SYNOPSIS = "Statistical testing of vector data using non-parametric permutation testing"; DESCRIPTION - + Math::Stats::glm_column_ones_description; + + "This command can be used to perform permutation testing of any form of data. " + "The data for each input subject must be stored in a text file, with one value per row. " + "The data for each row across subjects will be tested independently, i.e. there is no " + "statistical enhancement that occurs between the data; however family-wise error control " + "will be used." + + + Math::Stats::GLM::column_ones_description; ARGUMENTS @@ -138,8 +145,13 @@ void run() } // Load contrast matrix - const matrix_type contrast = load_matrix (argument[2]); - const size_t num_contrasts = contrast.rows(); + vector contrasts; + { + const matrix_type contrast_matrix = load_matrix (argument[2]); + for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) + contrasts.emplace_back (Contrast (contrast_matrix.row (row))); + } + const size_t num_contrasts = contrasts.size(); // Before validating the contrast matrix, we first need to see if there are any // additional design matrix columns coming from voxel-wise subject data @@ -158,8 +170,9 @@ void run() INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from voxel-wise design matrices accordingly"); } - if (contrast.cols() != design.cols() + ssize_t(extra_columns.size())) - throw Exception ("the number of columns per contrast (" + str(contrast.cols()) + ")" + const ssize_t num_factors = design.cols() + extra_columns.size(); + if (contrasts[0].cols() != num_factors) + throw Exception ("the number of columns per contrast (" + str(contrasts[0].cols()) + ")" + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); @@ -179,11 +192,11 @@ void run() } // Construct the class for performing the initial statistical tests - std::shared_ptr glm_test; + std::shared_ptr glm_test; if (extra_columns.size() || nans_in_data) { - glm_test.reset (new GLMTTestVariable (extra_columns, data, design, contrast, nans_in_data, nans_in_columns)); + glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); } else { - glm_test.reset (new GLMTTestFixed (data, design, contrast)); + glm_test.reset (new GLM::TestFixed (data, design, contrasts)); } @@ -191,8 +204,9 @@ void run() auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; { - matrix_type betas (contrast.cols(), num_elements); - matrix_type abs_effect_size (num_contrasts, num_elements), std_effect_size (num_contrasts, num_elements), stdev (num_contrasts, num_elements); + matrix_type betas (num_factors, num_elements); + matrix_type abs_effect_size (num_contrasts, num_elements), std_effect_size (num_contrasts, num_elements); + vector_type stdev (num_elements); if (extra_columns.size()) { @@ -233,8 +247,8 @@ void run() class Functor { MEMALIGN(Functor) public: - Functor (const matrix_type& data, std::shared_ptr glm_test, const matrix_type& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, matrix_type& stdev) : + Functor (const matrix_type& data, std::shared_ptr glm_test, const vector& contrasts, + matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : data (data), glm_test (glm_test), contrasts (contrasts), @@ -246,44 +260,47 @@ void run() bool operator() (const size_t& index) { const matrix_type data_element = data.row (index); - const matrix_type design_element = dynamic_cast(glm_test.get())->default_design (index); + const matrix_type design_element = dynamic_cast(glm_test.get())->default_design (index); Math::Stats::GLM::all_stats (data_element, design_element, contrasts, local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); global_betas.col (index) = local_betas; global_abs_effect_size.col(index) = local_abs_effect_size.col(0); global_std_effect_size.col(index) = local_std_effect_size.col(0); - global_stdev.col(index) = local_stdev.col(0); + global_stdev[index] = local_stdev[0]; return true; } private: const matrix_type& data; - const std::shared_ptr glm_test; - const matrix_type& contrasts; + const std::shared_ptr glm_test; + const vector& contrasts; matrix_type& global_betas; matrix_type& global_abs_effect_size; matrix_type& global_std_effect_size; - matrix_type& global_stdev; - matrix_type local_betas, local_abs_effect_size, local_std_effect_size, local_stdev; + vector_type& global_stdev; + matrix_type local_betas, local_abs_effect_size, local_std_effect_size; + vector_type local_stdev; }; Source source (num_elements); - Functor functor (data, glm_test, contrast, + Functor functor (data, glm_test, contrasts, betas, abs_effect_size, std_effect_size, stdev); Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); } else { ProgressBar progress ("calculating basic properties of default permutation"); - Math::Stats::GLM::all_stats (data, design, contrast, + Math::Stats::GLM::all_stats (data, design, contrasts, betas, abs_effect_size, std_effect_size, stdev); } - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", 4); - save_matrix (betas, output_prefix + "betas.mif"); ++progress; - save_matrix (abs_effect_size, output_prefix + "abs_effect.csv"); ++progress; - save_matrix (std_effect_size, output_prefix + "std_effect.csv"); ++progress; - save_matrix (stdev, output_prefix + "std_dev.csv"); + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", 2 + (2 * num_contrasts)); + save_matrix (betas, output_prefix + "betas.csv"); ++progress; + for (size_t i = 0; i != num_contrasts; ++i) { + save_vector (abs_effect_size.row(i), output_prefix + "abs_effect" + postfix(i) + ".csv"); ++progress; + save_vector (std_effect_size.row(i), output_prefix + "std_effect" + postfix(i) + ".csv"); ++progress; + } + save_vector (stdev, output_prefix + "std_dev.csv"); } @@ -295,7 +312,8 @@ void run() default_permutation[i] = i; matrix_type default_tvalues; (*glm_test) (default_permutation, default_tvalues); - save_matrix (default_tvalues, output_prefix + "tvalue.csv"); + for (size_t i = 0; i != num_contrasts; ++i) + save_matrix (default_tvalues.row(i), output_prefix + "tvalue" + postfix(i) + ".csv"); // Perform permutation testing if (!get_options ("notest").size()) { @@ -315,8 +333,10 @@ void run() matrix_type default_pvalues (num_elements, num_contrasts); Math::Stats::Permutation::statistic2pvalue (null_distribution, default_tvalues, default_pvalues); - save_matrix (default_pvalues, output_prefix + "fwe_pvalue.csv"); - save_matrix (uncorrected_pvalues, output_prefix + "uncorrected_pvalue.csv"); + for (size_t i = 0; i != num_contrasts; ++i) { + save_vector (default_pvalues.row(i), output_prefix + "fwe_pvalue.csv"); + save_vector (uncorrected_pvalues.row(i), output_prefix + "uncorrected_pvalue.csv"); + } } } diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 24096a8630..a00fd7e1b0 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -25,24 +25,24 @@ namespace MR { namespace Stats { + namespace GLM + { - const char* const glm_column_ones_description = - "In some software packages, a column of ones is automatically added to the " - "GLM design matrix; the purpose of this column is to estimate the \"global " - "intercept\", which is the predicted value of the observed variable if all " - "explanatory variables were to be zero. However there are rare situations " - "where including such a column would not be appropriate for a particular " - "experiment al design; hence, in MRtrix3 statistical inference commands, " - "it is up to the user to determine whether or not this column of ones should " - "be included in their design matrix, and add it explicitly if necessary. " - "The contrast matrix must also reflect the presence of this additional column."; + const char* const column_ones_description = + "In some software packages, a column of ones is automatically added to the " + "GLM design matrix; the purpose of this column is to estimate the \"global " + "intercept\", which is the predicted value of the observed variable if all " + "explanatory variables were to be zero. However there are rare situations " + "where including such a column would not be appropriate for a particular " + "experimental design. Hence, in MRtrix3 statistical inference commands, " + "it is up to the user to determine whether or not this column of ones should " + "be included in their design matrix, and add it explicitly if necessary. " + "The contrast matrix must also reflect the presence of this additional column."; - namespace GLM - { matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design) { @@ -50,42 +50,66 @@ namespace MR } - matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) + + vector_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast) + { + return matrix_type(contrast) * solve_betas (measurements, design); + } + + matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) { - return contrasts * solve_betas (measurements, design); + matrix_type result (measurements.rows(), contrasts.size()); + for (size_t ic = 0; ic != contrasts.size(); ++ic) + result.col (ic) = abs_effect_size (measurements, design, contrasts[ic]); + return result; } - matrix_type stdev (const matrix_type& measurements, const matrix_type& design) + + vector_type stdev (const matrix_type& measurements, const matrix_type& design) { - matrix_type residuals = measurements.transpose() - design * solve_betas (measurements, design); //TODO - residuals = residuals.array().pow(2.0); - matrix_type one_over_dof (1, measurements.cols()); //TODO supply transposed measurements + matrix_type residuals = measurements.transpose() - design * solve_betas (measurements, design); + residuals = residuals.array().pow (2.0); + matrix_type one_over_dof (1, measurements.cols()); one_over_dof.fill (1.0 / value_type(design.rows()-Math::rank (design))); return (one_over_dof * residuals).array().sqrt(); } - matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) + + vector_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast) + { + return abs_effect_size (measurements, design, contrast).array() / stdev (measurements, design).array(); + } + + matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) { - return abs_effect_size (measurements, design, contrasts).array() / stdev (measurements, design).array(); + const auto stdev_reciprocal = vector_type::Ones (measurements.rows()).array() / stdev (measurements, design).array(); + matrix_type result (measurements.rows(), contrasts.size()); + for (size_t ic = 0; ic != contrasts.size(); ++ic) + result.col (ic) = abs_effect_size (measurements, design, contrasts[ic]) * stdev_reciprocal; + return result; } + void all_stats (const matrix_type& measurements, const matrix_type& design, - const matrix_type& contrasts, + const vector& contrasts, matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, - matrix_type& stdev) + vector_type& stdev) { betas = solve_betas (measurements, design); //std::cerr << "Betas: " << betas.rows() << " x " << betas.cols() << ", max " << betas.array().maxCoeff() << "\n"; - abs_effect_size = contrasts * betas; + abs_effect_size.resize (measurements.rows(), contrasts.size()); + // TESTME Surely this doesn't make sense for an F-test? + for (size_t ic = 0; ic != contrasts.size(); ++ic) + abs_effect_size.col (ic) = matrix_type (contrasts[ic]) * betas; //std::cerr << "abs_effect_size: " << abs_effect_size.rows() << " x " << abs_effect_size.cols() << ", max " << abs_effect_size.array().maxCoeff() << "\n"; matrix_type residuals = measurements.transpose() - design * betas; - residuals = residuals.array().pow(2.0); + residuals = residuals.array().pow (2.0); //std::cerr << "residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; matrix_type one_over_dof (1, measurements.cols()); one_over_dof.fill (1.0 / value_type(design.rows()-Math::rank (design))); @@ -96,282 +120,108 @@ namespace MR //std::cerr << "stdev: " << stdev.rows() << " x " << stdev.cols() << ", max " << stdev.array().maxCoeff() << "\n"; // TODO Should be a cleaner way of doing this (broadcasting?) matrix_type stdev_fill (abs_effect_size.rows(), abs_effect_size.cols()); - for (size_t i = 0; i != stdev_fill.rows(); ++i) + for (ssize_t i = 0; i != stdev_fill.rows(); ++i) stdev_fill.row(i) = stdev; std_effect_size = abs_effect_size.array() / stdev_fill.array(); //std::cerr << "std_effect_size: " << std_effect_size.rows() << " x " << std_effect_size.cols() << ", max " << std_effect_size.array().maxCoeff() << "\n"; - //TRACE; } - } - - - - GLMTTestFixed::GLMTTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : - GLMTestBase (measurements, design, contrasts), - pinvX (Math::pinv (X)), - scaled_contrasts_t (calc_scaled_contrasts().transpose()) - { - assert (contrasts.cols() == design.cols()); - } - - - - void GLMTTestFixed::operator() (const vector& perm_labelling, matrix_type& output) const - { - output = matrix_type::Zero (num_elements(), num_outputs()); - matrix_type tvalues, betas, residuals_t, SX_t, pinvSX_t; - - // TODO Currently the entire design matrix is permuted; - // we may instead prefer Freedman-Lane - // This however would be different for each row in the contrasts matrix, - // since the columns that correspond to nuisance variables - // varies between rows - - SX_t.resize (X.rows(), X.cols()); - pinvSX_t.resize (pinvX.rows(), pinvX.cols()); - for (ssize_t i = 0; i < X.rows(); ++i) { - SX_t.row(i) = X.row (perm_labelling[i]); - pinvSX_t.col(i) = pinvX.col (perm_labelling[i]); - } - - SX_t.transposeInPlace(); - pinvSX_t.transposeInPlace(); - for (ssize_t i = 0; i < y.rows(); i += GLM_BATCH_SIZE) { - const auto tmp = y.block (i, 0, std::min (GLM_BATCH_SIZE, (int)(y.rows()-i)), y.cols()); - ttest (tvalues, SX_t, pinvSX_t, tmp, betas, residuals_t); - for (size_t col = 0; col != num_outputs(); ++col) { - for (size_t n = 0; n != size_t(tvalues.rows()); ++n) { - value_type val = tvalues(n, col); - if (!std::isfinite (val)) - val = value_type(0); - output(i+n, col) = val; - } + Contrast::Partition Contrast::operator() (const matrix_type& design) const + { + // For now, let's do the most basic partitioning possible: + // Split design matrix column-wise depending on whether entries in the contrast matrix are all zero + // TODO Later, may include config variables / compiler flags to change model partitioning technique + matrix_type X, Z; + const size_t nonzero_column_count = c.colwise().any().count(); + X.resize (design.rows(), nonzero_column_count); + Z.resize (design.rows(), design.cols() - nonzero_column_count); + ssize_t ix = 0, iz = 0; + for (ssize_t ic = 0; ic != c.cols(); ++ic) { + if (c.col (ic).any()) + X.col (ix++) = design.col (ic); + else + Z.col (iz++) = design.col (ic); } + return Partition (X, Z); } - } - void GLMTTestFixed::ttest (matrix_type& tvalues, - const matrix_type& design_t, - const matrix_type& pinv_design_t, - Eigen::Block measurements, - matrix_type& betas, - matrix_type& residuals_t) const - { - betas.noalias() = measurements * pinv_design_t; - residuals_t.noalias() = measurements - betas * design_t; - tvalues.noalias() = betas * scaled_contrasts_t; - for (size_t n = 0; n < size_t(tvalues.rows()); ++n) - tvalues.row(n).array() /= residuals_t.row(n).norm(); - } - - // scale contrasts for use in ttest() member function - /* This function pre-scales the contrast matrix in order to make conversion from GLM betas - * to t-values more computationally efficient. - * - * For design matrix X, contrast matrix c, beta vector b and variance o^2, the t-value is calculated as: - * c^T.b - * t = -------------------------- - * sqrt(o^2.c^T.(X^T.X)^-1.c) - * - * Definition of variance (for vector of residuals e): - * e^T.e - * o^2 = ------ - * DOF(X) - * - * (Note that the above equations are used directly in GLMTTestVariable) - * - * This function will generate scaled contrasts c' from c, such that: - * DOF(X) - * c' = c.sqrt(------------------) - * c^T.(X^T.X)^-1.c - * - * c'^T.b - * t = ----------- - * sqrt(e^T.e) - * - * Note each row of the contrast matrix will still be treated as an independent contrast. The number - * of elements in each contrast vector must equal the number of columns in the design matrix. - */ - matrix_type GLMTTestFixed::calc_scaled_contrasts() const - { - const size_t dof = X.rows() - Math::rank(X); - const matrix_type XtX = X.transpose() * X; - const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); - matrix_type result = c; - for (size_t n = 0; n < size_t(c.rows()); ++n) { - auto pinv_XtX_c = pinv_XtX * c.row(n).transpose(); - result.row(n) *= std::sqrt (value_type(dof) / c.row(n).dot (pinv_XtX_c)); - } - return result; - } + TestFixed::TestFixed (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) : + TestBase (measurements, design, contrasts), + pinvM (Math::pinv (M)), + Rm (matrix_type::Identity (num_subjects(), num_subjects()) - (M*pinvM)) + { + assert (contrasts[0].cols() == design.cols()); + // When the design matrix is fixed, we can pre-calculate the model partitioning for each contrast + for (const auto c : contrasts) + partitions.emplace_back (c (design)); + } - - - - - GLMTTestVariable::GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const bool nans_in_data, const bool nans_in_columns) : - GLMTestBase (measurements, design, contrasts), - importers (importers), - nans_in_data (nans_in_data), - nans_in_columns (nans_in_columns) - { - // Make sure that the specified contrasts reflect the full design matrix (with additional - // data loaded) - assert (contrasts.cols() == X.cols() + ssize_t(importers.size())); - } - - - - void GLMTTestVariable::operator() (const vector& perm_labelling, matrix_type& output) const - { - output = matrix_type::Zero (num_elements(), num_outputs()); - matrix_type tvalues, betas, residuals; - - // Set the size of the permuted design matrix to include the additional columns - // that will be imported from external files - matrix_type SX (X.rows(), X.cols() + importers.size()); - - // Pre-permute the fixed contents of the design matrix - for (ssize_t row = 0; row != X.rows(); ++row) - SX.block(row, 0, 1, X.cols()) = X.row (perm_labelling[row]); - - // Loop over all elements in the input image - for (ssize_t element = 0; element != y.rows(); ++element) { - - // For each element (row in y), need to load the additional data for that element - // for all subjects in order to construct the design matrix - // Would it be preferable to pre-calculate and store these per-element design matrices, - // rather than re-generating them each time? (More RAM, less CPU) - // No, most of the time that subject data will be memory-mapped, so pre-loading (in - // addition to the duplication of the fixed design matrix contents) would hurt bad - matrix_type extra_data (X.rows(), importers.size()); - for (ssize_t col = 0; col != ssize_t(importers.size()); ++col) - extra_data.col(col) = importers[col] (element); - - // If there are non-finite values present either in the input - // data or the element-wise design matrix columns (or both), - // need to track which rows are being kept / discarded - BitSet element_mask (X.rows(), true); - if (nans_in_data) { - for (ssize_t row = 0; row != y.rows(); ++row) { - if (!std::isfinite (y (row, element))) - element_mask[row] = false; - } - } - if (nans_in_columns) { - // Bear in mind that we need to test for finite values in the - // row in which this data is going to be written to based on - // the permutation labelling - for (ssize_t row = 0; row != extra_data.rows(); ++row) { - if (!extra_data.row (perm_labelling[row]).allFinite()) - element_mask[row] = false; - } - } - - // Do we need to reduce the size of our matrices / vectors - // based on the presence of non-finite values? - if (element_mask.full()) { - - // Make sure the data from the additional columns is appropriately permuted - // (i.e. in the same way as what the fixed portion of the design matrix experienced) - for (ssize_t row = 0; row != X.rows(); ++row) - SX.block(row, X.cols(), 1, importers.size()) = extra_data.row (perm_labelling[row]); - - ttest (tvalues, SX, y.row(element), betas, residuals); - - } else { - - const ssize_t new_num_elements = element_mask.count(); - matrix_type y_masked (1, new_num_elements); - matrix_type SX_masked (new_num_elements, X.cols() + importers.size()); - ssize_t new_index = 0; - for (ssize_t old_index = 0; old_index != X.rows(); ++old_index) { - if (element_mask[old_index]) { - y_masked(0, new_index) = y(old_index, element); - SX_masked.block (new_index, 0, 1, X.cols()) = SX.block (old_index, 0, 1, X.cols()); - SX_masked.block (new_index, X.cols(), 1, importers.size()) = extra_data.row (perm_labelling[old_index]); - ++new_index; + void TestFixed::operator() (const vector& perm_labelling, matrix_type& output) const + { + assert (perm_labelling.size() == num_subjects()); + if (!(size_t(output.rows()) == num_elements() && size_t(output.cols()) == num_outputs())) + output.resize (num_elements(), num_outputs()); + + + // TODO Re-express the permutation labelling as a permutation matrix + // (we'll deal with altering how these permutations are provided + // to the GLM code later) + matrix_type perm_matrix (matrix_type::Zero (num_subjects(), num_subjects())); + for (size_t i = 0; i != num_subjects(); ++i) + perm_matrix (i, perm_labelling[i]) = value_type(1); // TESTME + + matrix_type beta, betahat; + vector_type F; + + // Implement Freedman-Lane for fixed design matrix case + // Each contrast needs to be handled explicitly on its own + for (size_t ic = 0; ic != c.size(); ++ic) { + + // First, we perform permutation of the input data + // In Freedman-Lane, the initial 'effective' regression against the nuisance + // variables, and permutation of the data, are done in a single step + const matrix_type Sy = perm_matrix * partitions[ic].Rz * y; + + // Now, we regress this shuffled data against the full model + //ttest (tvalues, X.transpose(), pinvX.transpose(), Sy, betas, residuals_t); + beta.noalias() = Sy * pinvM; + betahat.noalias() = beta * matrix_type(c[ic]); + F = (betahat.transpose() * (partitions[ic].X.inverse()*partitions[ic].X) * betahat / c[ic].rank()) / + ((Rm*Sy).squaredNorm() / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z)); + + // Put the results into the output matrix, replacing NaNs with zeroes + // TODO Check again to see if this new statistic produces NaNs when input data are all zeroes + // We also need to convert F to t if necessary + for (ssize_t iF = 0; iF != F.size(); ++iF) { + if (!std::isfinite (F[iF])) { + output (iF, ic) = value_type(0); + } else if (c[ic].is_F()) { + output (iF, ic) = F[iF]; + } else { + assert (betahats.cols() == 1); + output (iF, ic) = std::sqrt (F[iF]) * (betahat (iF, 0) > 0 ? 1.0 : -1.0); } } - assert (new_index == new_num_elements); - - // const_cast required as Eigen does not know how to convert from RowXpr to ConstRowXpr - ttest (tvalues, SX_masked, const_cast(&y_masked)->row(0), betas, residuals); - - } - for (size_t col = 0; col != num_outputs(); ++col) { - value_type val = tvalues (element, col); - if (!std::isfinite (val)) - val = value_type(0); - output(element, col) = val; } - } - } - void GLMTTestVariable::ttest (matrix_type& tvalues, - const matrix_type& design, - matrix_type::ConstRowXpr measurements, - matrix_type& betas, - matrix_type& residuals) const - { - //std::cerr << "Design: " << design.rows() << " x " << design.cols() << ", max " << design.array().maxCoeff() << "\n"; - //std::cerr << "Measurements: " << measurements.rows() << " x " << measurements.cols() << ", max " << measurements.array().maxCoeff() << "\n"; - matrix_type pinv_design = Math::pinv (design); - //std::cerr << "PINV Design: " << pinv_design.rows() << " x " << pinv_design.cols() << ", max " << pinv_design.array().maxCoeff() << "\n"; - const matrix_type XtX = design.transpose() * design; - //std::cerr << "XtX: " << XtX.rows() << " x " << XtX.cols() << ", max " << XtX.array().maxCoeff() << "\n"; - const matrix_type pinv_XtX = (XtX.transpose() * XtX).fullPivLu().solve (XtX.transpose()); - //std::cerr << "PINV XtX: " << pinv_XtX.rows() << " x " << pinv_XtX.cols() << ", max " << pinv_XtX.array().maxCoeff() << "\n"; - betas = pinv_design * measurements.matrix(); - //std::cerr << "Betas: " << betas.rows() << " x " << betas.cols() << ", max " << betas.array().maxCoeff() << "\n"; - residuals = measurements - (design * betas); - //std::cerr << "Residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; - tvalues = c * betas; - //std::cerr << "T-values: " << tvalues.rows() << " x " << tvalues.cols() << ", max " << tvalues.array().maxCoeff() << "\n"; - //VAR (Math::rank (design)); - const default_type variance = residuals.matrix().squaredNorm() / default_type(design.rows() - Math::rank(design)); - //VAR (variance); - // The fact that we're only able to test one element at a time here should be - // placing a restriction on the dimensionality of tvalues - // Previously, could be (number of elements) * (number of contrasts); - // now can only reflect the number of contrasts - for (size_t n = 0; n != num_outputs(); ++n) { - const default_type ct_pinv_XtX_c = c.row(n).dot (pinv_XtX * c.row(n).transpose()); - //VAR (ct_pinv_XtX_c); - tvalues.row(n) /= std::sqrt (variance * ct_pinv_XtX_c); - } - //std::cerr << "T-values: " << tvalues.rows() << " x " << tvalues.cols() << ", max " << tvalues.array().maxCoeff() << "\n"; - } - - - - matrix_type GLMTTestVariable::default_design (const size_t index) const - { - matrix_type output (X.rows(), X.cols() + importers.size()); - output.block (0, 0, X.rows(), X.cols()) = X; - for (size_t i = 0; i != importers.size(); ++i) - output.col (X.cols() + i) = importers[i] (index); - return output; - } @@ -380,33 +230,179 @@ namespace MR + TestVariable::TestVariable (const vector& importers, + const matrix_type& measurements, + const matrix_type& design, + const vector& contrasts, + const bool nans_in_data, + const bool nans_in_columns) : + TestBase (measurements, design, contrasts), + importers (importers), + nans_in_data (nans_in_data), + nans_in_columns (nans_in_columns) + { + // Make sure that the specified contrasts reflect the full design matrix (with additional + // data loaded) + assert (contrasts.cols() == M.cols() + ssize_t(importers.size())); + } + void TestVariable::operator() (const vector& perm_labelling, matrix_type& output) const + { + if (!(size_t(output.rows()) == num_elements() && size_t(output.cols()) == num_outputs())) + output.resize (num_elements(), num_outputs()); + + // Convert permutation labelling to a matrix, as for the fixed design matrix case + matrix_type perm_matrix (matrix_type::Zero (num_subjects(), num_subjects())); + for (size_t i = 0; i != num_subjects(); ++i) + perm_matrix (i, perm_labelling[i]) = value_type(1); + + // Let's loop over elements first, then contrasts in the inner loop + for (ssize_t element = 0; element != y.rows(); ++element) { + + // For each element (row in y), need to load the additional data for that element + // for all subjects in order to construct the design matrix + // Would it be preferable to pre-calculate and store these per-element design matrices, + // rather than re-generating them each time? (More RAM, less CPU) + // No, most of the time that subject data will be memory-mapped, so pre-loading (in + // addition to the duplication of the fixed design matrix contents) would hurt bad + matrix_type extra_data (num_subjects(), importers.size()); + for (ssize_t col = 0; col != ssize_t(importers.size()); ++col) + extra_data.col(col) = importers[col] (element); + + // What can we do here that's common across all contrasts? + // - Import the element-wise data + // - Identify rows to be excluded based on NaNs in the design matrix + // - Identify rows to be excluded based on NaNs in the input data + // + // Note that this is going to have to operate slightly differently to + // how it used to be done, i.e. via the permutation labelling vector, + // if we are to support taking the shuffling matrix as input to this functor + // I think the approach will have to be: + // - Both NaNs in design matrix and NaNs in input data need to be removed + // in order to perform the initial regression against nuisance variables + // - Can then remove the corresponding _columns_ of the permutation matrix? + // No, don't think it's removal of columns; think it's removal of any rows + // that contain non-zero values in those columns + // + BitSet element_mask (M.rows(), true); + if (nans_in_data) { + for (ssize_t row = 0; row != y.rows(); ++row) { + if (!std::isfinite (y (row, element))) + element_mask[row] = false; + } + } + if (nans_in_columns) { + for (ssize_t row = 0; row != extra_data.rows(); ++row) { + if (!extra_data.row (row).allFinite()) + element_mask[row] = false; + } + } + const size_t finite_count = element_mask.count(); + + // Do we need to reduce the size of our matrices / vectors + // based on the presence of non-finite values? + matrix_type Mfull_masked; + matrix_type perm_matrix_masked; + vector_type y_masked; + if (finite_count == num_subjects()) { + + Mfull_masked.resize (num_subjects(), num_factors()); + Mfull_masked.block (0, 0, num_subjects(), M.cols()) = M; + Mfull_masked.block (0, M.cols(), num_subjects(), extra_data.cols()) = extra_data; + perm_matrix_masked = perm_matrix; + y_masked = y.row (element); + + } else { + + Mfull_masked.resize (finite_count, num_factors()); + y_masked.resize (finite_count); + BitSet perm_matrix_mask (num_subjects(), true); + ssize_t out_index = 0; + for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { + if (element_mask[in_index]) { + Mfull_masked.block (out_index, 0, 1, M.cols()) = M.row (in_index); + Mfull_masked.block (out_index, M.cols(), 1, extra_data.cols()) = extra_data.row (in_index); + y_masked[out_index] = y (element, in_index); + ++out_index; + } else { + // Any row in the permutation matrix that contains a non-zero entry + // in the column corresponding to in_row needs to be removed + // from the permutation matrix + for (ssize_t perm_row = 0; perm_row != perm_matrix.rows(); ++perm_row) { + if (perm_matrix (perm_row, in_index)) + perm_matrix_mask[perm_row] = false; + } + } + } + assert (out_index == finite_count); + assert (perm_matrix_mask.count() == finite_count); + // Only after we've reduced the design matrix do we now reduce the permutation matrix + perm_matrix_masked.resize (finite_count, num_subjects()); + out_index = 0; + for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { + if (perm_matrix_mask[in_index]) + perm_matrix_masked.row (out_index++) = perm_matrix.row (in_index); + } + assert (out_index == finite_count); + } + assert (Mfull_masked.allFinite()); + + const matrix_type pinvMfull_masked = Math::pinv (Mfull_masked); + + const matrix_type Rm = matrix_type::Identity (finite_count, finite_count) - (Mfull_masked*pinvMfull_masked); + + matrix_type beta, betahat; + vector_type F; + + // We now have our permutation (shuffling) matrix and design matrix prepared, + // and can commence regressing the partitioned model of each contrast + for (size_t ic = 0; ic != c.size(); ++ic) { + + const auto partition = c[ic] (Mfull_masked); + + // Now that we have the individual contrast model partition for these data, + // the rest of this function should proceed similarly to the fixed + // design matrix case + // TODO Consider functionalising the below; should be consistent between fixed and variable + const matrix_type Sy = perm_matrix_masked * partition.Rz * y_masked.matrix(); + beta.noalias() = Sy * pinvMfull_masked; + betahat.noalias() = beta * matrix_type(c[ic]); + F = (betahat.transpose() * (partition.X.inverse()*partition.X) * betahat / c[ic].rank()) / + ((Rm*Sy).squaredNorm() / (finite_count - partition.rank_x - partition.rank_z)); + + for (ssize_t iF = 0; iF != F.size(); ++iF) { + if (!std::isfinite (F[iF])) { + output (iF, ic) = value_type(0); + } else if (c[ic].is_F()) { + output (iF, ic) = F[iF]; + } else { + assert (betahats.cols() == 1); + output (iF, ic) = std::sqrt (F[iF]) * (betahat (iF, 0) > 0 ? 1.0 : -1.0); + } + } + } // End looping over contrasts + } // End looping over elements + } - GLMFTestFixed::GLMFTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const matrix_type& ftests) : - GLMTestBase (measurements, design, contrasts), - ftests (ftests) { } + matrix_type TestVariable::default_design (const size_t index) const + { + matrix_type output (M.rows(), M.cols() + importers.size()); + output.block (0, 0, M.rows(), M.cols()) = M; + for (size_t i = 0; i != importers.size(); ++i) + output.col (M.cols() + i) = importers[i] (index); + return output; + } - void GLMFTestFixed::operator() (const vector& perm_labelling, matrix_type& output) const - { } - - - - - - - - - } } } diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 1216a6a459..014d5449b6 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -25,23 +25,83 @@ namespace MR { namespace Stats { + namespace GLM + { - - extern const char* const glm_column_ones_description; - + // TODO Define a base class to contain information regarding an individual contrast, and + // pre-compute as much as possible with regards to Freedman-Lane + // Note: This can be constructed for both t-tests and F-tests + // (This is why the constructor is a template: Could be created either from a row() + // call on the contrast matrix, or from a matrix explicitly constructed from a set of + // rows from the contrast matrix, which is how an F-test is constructed. + // TODO In the case of a single-row F-test, still need to be able to differentiate between + // a t-test and an F-test for the sake of signedness (and maybe taking the square root) + // + // TODO Exactly how this may be utilised depends on whether a fixed or variable design + // matrix will be used; ideally the interface to the Contrast class should deal with this + class Contrast + { MEMALIGN(Contrast) + public: + + class Partition + { MEMALIGN (Partition) + public: + Partition (const matrix_type& x, const matrix_type& z) : + X (x), + Z (z), + Hz (Z * Math::pinv (Z)), + Rz (matrix_type::Identity (Z.rows(), Z.rows()) - Hz), + rank_x (Math::rank (X)), + rank_z (Math::rank (Z)) { } + // X = Component of design matrix related to effect of interest + // Z = Component of design matrix related to nuisance regressors + const matrix_type X, Z; + // We would also like to automatically calculate, on creation of a partition: + // Hz: Projection matrix of nuisance regressors only + // Rz: Residual-forming matrix due to nuisance regressors only + // rank_x: Rank of X + // rank_z: Rank of Z + const matrix_type Hz, Rz; + const size_t rank_x, rank_z; + }; + + Contrast (matrix_type::ConstRowXpr& in) : + c (in), + r (Math::rank (c)), + F (false) { } + + Contrast (const matrix_type& in) : + c (in), + r (Math::rank (c)), + F (true) { } + + Partition operator() (const matrix_type&) const; + + operator const matrix_type& () const { return c; } + ssize_t cols() const { return c.cols(); } + size_t rank() const { return r; } + bool is_F() const { return F; } + + private: + const matrix_type c; + const size_t r; + const bool F; + }; + + + + extern const char* const column_ones_description; - namespace GLM - { /** \addtogroup Statistics - @{ */ + @{ */ /*! Compute a matrix of the beta coefficients - * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix - * @return the matrix containing the output GLM betas - */ + * @param measurements a matrix storing the measured data for each subject in a column + * @param design the design matrix + * @return the matrix containing the output GLM betas + */ matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design); @@ -52,7 +112,8 @@ namespace MR * @param contrast a matrix defining the group difference * @return the matrix containing the output effect */ - matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); + vector_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast); + matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts); @@ -61,7 +122,7 @@ namespace MR * @param design the design matrix * @return the matrix containing the output standard deviation */ - matrix_type stdev (const matrix_type& measurements, const matrix_type& design); + vector_type stdev (const matrix_type& measurements, const matrix_type& design); @@ -71,7 +132,8 @@ namespace MR * @param contrast a matrix defining the group difference * @return the matrix containing the output standardised effect size */ - matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); + vector_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast); + matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts); @@ -84,193 +146,138 @@ namespace MR * @param std_effect_size the matrix containing the output standardised effect size * @param stdev the matrix containing the output standard deviation */ - void all_stats (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, matrix_type& stdev); + void all_stats (const matrix_type& measurements, const matrix_type& design, const vector& contrasts, + matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev); //! @} - } // End GLM namespace - - - - // Define a base class for GLM tests - // Should support both T-tests and F-tests - // The latter will always produce 1 column only, whereas the former will produce the same number of columns as there are contrasts - class GLMTestBase - { MEMALIGN(GLMTestBase) - public: - GLMTestBase (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts) : - y (measurements), - X (design), - c (contrasts), - outputs (c.rows()) - { - assert (y.cols() == X.rows()); - // Can no longer apply this assertion here; GLMTTestVariable later - // expands the number of columns in X - //assert (c.cols() == X.cols()); - } - - /*! Compute the statistics - * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) - * @param output the matrix containing the output statistics (one vector per contrast) - */ - virtual void operator() (const vector& perm_labelling, matrix_type& output) const = 0; - - size_t num_elements () const { return y.rows(); } - size_t num_factors () const { return X.cols(); } - size_t num_outputs () const { return outputs; } - size_t num_subjects () const { return X.rows(); } - - protected: - const matrix_type& y, X, c; - size_t outputs; - - }; - - - - - /** \addtogroup Statistics - @{ */ - /*! A class to compute t-statistics using a fixed General Linear Model. - * This class produces a t-statistic per contrast of interest. It should be used in - * cases where the same design matrix is to be applied for all image elements being - * tested; able to pre-compute a number of matrices before testing, improving - * execution speed. - */ - class GLMTTestFixed : public GLMTestBase - { MEMALIGN(GLMTTestFixed) - public: - /*! - * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix - * @param contrast a matrix containing the contrast of interest. - */ - GLMTTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrast); - - /*! Compute the t-statistics - * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) - * @param output the vector containing the output t-statistics (one vector per contrast) - */ - void operator() (const vector& perm_labelling, matrix_type& output) const override; - - protected: - const matrix_type pinvX, scaled_contrasts_t; - - private: - - //! GLM t-test incorporating various optimisations - /*! note that the data, effects, and residual matrices are transposed. - * This is to take advantage of Eigen's convention of storing - * matrices in column-major format by default. - * - * This function makes use of member variable scaled_contrasts_t, - * set up by the GLMTTestFixed constructor, which is also transposed. */ - void ttest (matrix_type& tvalues, - const matrix_type& design_t, - const matrix_type& pinv_design_t, - Eigen::Block measurements, - matrix_type& betas, - matrix_type& residuals_t) const; - - //! Pre-scaling of contrast matrix - /*! This modulates the contents of the contrast matrix for compatibility - * with member function ttest(). - * - * Scaling is performed in a member function such that member scaled_contrasts_t - * can be defined as const. */ - matrix_type calc_scaled_contrasts() const; - - }; - //! @} - - - - /** \addtogroup Statistics - @{ */ - /*! A class to compute t-statistics using a 'variable' General Linear Model. - * This class produces a t-statistic per contrast of interest. It should be used in - * cases where additional subject data must be imported into the design matrix before - * computing t-values; the design matrix therefore does not remain fixed for all - * elements being tested, but varies depending on the particular element being tested. - * - * How additional data is imported into the design matrix will depend on the - * particular type of data being tested. Therefore an Importer class must be - * defined that is responsible for acquiring and vectorising these data. - */ - class GLMTTestVariable : public GLMTestBase - { MEMALIGN(GLMTTestVariable) - public: - GLMTTestVariable (const vector& importers, const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const bool nans_in_data, const bool nans_in_columns); - - /*! Compute the t-statistics - * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) - * @param output the vector containing the output t-statistics - * - * In GLMTTestVariable, this function additionally needs to import the - * extra external data individually for each element tested. - */ - void operator() (const vector& perm_labelling, matrix_type& output) const override; - /*! Acquire the design matrix for the default permutation - * (note that this needs to be re-run for each element being tested) - * @param index the index of the element for which the design matrix is requested - * @return the design matrix for that element, including imported data for extra columns - */ - matrix_type default_design (const size_t index) const; - - protected: - const vector& importers; - const bool nans_in_data, nans_in_columns; - - //! generic GLM t-test - /*! This version of the t-test function does not incorporate the - * optimisations that are used in the GLMTTestFixed class, since - * many are not applicable when the design matrix changes between - * different elements. - * - * Since the design matrix varies between the different elements - * being tested, this function only accepts testing of a single - * vector of measurements at a time. */ - void ttest (matrix_type& tvalues, - const matrix_type& design, - matrix_type::ConstRowXpr measurements, - matrix_type& betas, - matrix_type& residuals) const; - }; - - - /** \addtogroup Statistics - @{ */ - /*! A class to compute F-statistics using a fixed General Linear Model. - * This class produces a single F-statistic across all contrasts of interest. - * NOT YET IMPLEMENTED - */ - class GLMFTestFixed : public GLMTestBase { MEMALIGN(GLMFTestFixed) - public: - /*! - * @param measurements a matrix storing the measured data for each subject in a column - * @param design the design matrix - * @param contrast a matrix containing the contrast of interest. - */ - GLMFTestFixed (const matrix_type& measurements, const matrix_type& design, const matrix_type& contrasts, const matrix_type& ftests); - /*! Compute the F-statistics - * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) - * @param output the vector containing the output f-statistics - */ - void operator() (const vector& perm_labelling, matrix_type& output) const override; - protected: - // TODO How to deal with f-tests that apply to specific contrasts only? - const matrix_type ftests; - }; - //! @} + // Define a base class for GLM tests + // Should support both T-tests and F-tests + // The latter will always produce 1 column only, whereas the former will produce the same number of columns as there are contrasts + class TestBase + { MEMALIGN(TestBase) + public: + TestBase (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) : + y (measurements), + M (design), + c (contrasts) + { + assert (y.cols() == M.rows()); + // Can no longer apply this assertion here; GLMTTestVariable later + // expands the number of columns in M + //assert (c.cols() == M.cols()); + } + + /*! Compute the statistics + * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) + * @param output the matrix containing the output statistics (one vector per contrast) + */ + virtual void operator() (const vector& perm_labelling, matrix_type& output) const = 0; + + size_t num_elements () const { return y.rows(); } + size_t num_outputs () const { return c.size(); } + size_t num_subjects () const { return M.rows(); } + virtual size_t num_factors() const { return M.cols(); } + + protected: + const matrix_type& y, M; + const vector& c; + + }; + + + + + /** \addtogroup Statistics + @{ */ + /*! A class to compute statistics using a fixed General Linear Model. + * This class produces a statistic per contrast of interest: t-statistic for + * t-tests, F-statistic for F-tests. It should be used in + * cases where the same design matrix is to be applied for all image elements being + * tested; able to pre-compute a number of matrices before testing, improving + * execution speed. + */ + class TestFixed : public TestBase + { MEMALIGN(TestFixed) + public: + /*! + * @param measurements a matrix storing the measured data for each subject in a column + * @param design the design matrix + * @param contrast a matrix containing the contrast of interest. + */ + TestFixed (const matrix_type& measurements, const matrix_type& design, const vector& contrasts); + + /*! Compute the statistics + * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) + * @param output the vector containing the output t-statistics (one column per contrast) + */ + void operator() (const vector& perm_labelling, matrix_type& output) const override; + + protected: + // New classes to store information relevant to Freedman-Lane implementation + vector partitions; + const matrix_type pinvM; + const matrix_type Rm; + + }; + //! @} + + + + /** \addtogroup Statistics + @{ */ + /*! A class to compute statistics using a 'variable' General Linear Model. + * This class produces a statistic per contrast of interest. It should be used in + * cases where additional subject data must be imported into the design matrix before + * computing t-values; the design matrix therefore does not remain fixed for all + * elements being tested, but varies depending on the particular element being tested. + * + * How additional data is imported into the design matrix will depend on the + * particular type of data being tested. Therefore an Importer class must be + * defined that is responsible for acquiring and vectorising these data. + */ + class TestVariable : public TestBase + { MEMALIGN(TestVariable) + public: + TestVariable (const vector& importers, + const matrix_type& measurements, + const matrix_type& design, + const vector& contrasts, + const bool nans_in_data, + const bool nans_in_columns); + + /*! Compute the statistics + * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) + * @param output the vector containing the output statistics + * + * In TestVariable, this function additionally needs to import the + * extra external data individually for each element tested. + */ + void operator() (const vector& perm_labelling, matrix_type& output) const override; + + /*! Acquire the design matrix for the default permutation + * (note that this needs to be re-run for each element being tested) + * @param index the index of the element for which the design matrix is requested + * @return the design matrix for that element, including imported data for extra columns + */ + matrix_type default_design (const size_t index) const; + + size_t num_factors() const override { return M.cols() + importers.size(); } + + protected: + const vector& importers; + const bool nans_in_data, nans_in_columns; + + }; + + } } } } diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index 4b6a3fe8ec..91767584c3 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -56,7 +56,7 @@ namespace MR - PreProcessor::PreProcessor (const std::shared_ptr stats_calculator, + PreProcessor::PreProcessor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, matrix_type& global_enhanced_sum, vector>& global_enhanced_count) : @@ -111,7 +111,7 @@ namespace MR - Processor::Processor (const std::shared_ptr stats_calculator, + Processor::Processor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistics, const matrix_type& default_enhanced_statistics, @@ -123,8 +123,8 @@ namespace MR default_enhanced_statistics (default_enhanced_statistics), statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), enhanced_statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), - // NOTE: uncorrected_pvalue_counter currently transposed with respect to matrices - // TODO Consider changing to Eigen::Array + // NOTE: uncorrected_pvalue_counter currently transposed with respect to matrices + // TODO Consider changing to Eigen::Array uncorrected_pvalue_counter (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)), perm_dist (perm_dist), global_uncorrected_pvalue_counter (global_uncorrected_pvalue_counter), @@ -175,7 +175,7 @@ namespace MR - void precompute_empirical_stat (const std::shared_ptr stats_calculator, + void precompute_empirical_stat (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, PermutationStack& perm_stack, matrix_type& empirical_statistic) { @@ -195,7 +195,7 @@ namespace MR - void precompute_default_permutation (const std::shared_ptr stats_calculator, + void precompute_default_permutation (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, matrix_type& default_enhanced_statistics, @@ -220,7 +220,7 @@ namespace MR void run_permutations (PermutationStack& perm_stack, - const std::shared_ptr stats_calculator, + const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, @@ -247,7 +247,7 @@ namespace MR void run_permutations (const vector>& permutations, - const std::shared_ptr stats_calculator, + const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, @@ -264,7 +264,7 @@ namespace MR void run_permutations (const size_t num_permutations, - const std::shared_ptr stats_calculator, + const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, diff --git a/src/stats/permtest.h b/src/stats/permtest.h index 9ce61b84f2..80a3344a3f 100644 --- a/src/stats/permtest.h +++ b/src/stats/permtest.h @@ -56,7 +56,7 @@ namespace MR /*! A class to pre-compute the empirical enhanced statistic image for non-stationarity correction */ class PreProcessor { MEMALIGN (PreProcessor) public: - PreProcessor (const std::shared_ptr stats_calculator, + PreProcessor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, matrix_type& global_enhanced_sum, vector>& global_enhanced_count); @@ -66,7 +66,7 @@ namespace MR bool operator() (const Permutation&); protected: - std::shared_ptr stats_calculator; + std::shared_ptr stats_calculator; std::shared_ptr enhancer; matrix_type& global_enhanced_sum; vector>& global_enhanced_count; @@ -83,7 +83,7 @@ namespace MR /*! A class to perform the permutation testing */ class Processor { MEMALIGN (Processor) public: - Processor (const std::shared_ptr stats_calculator, + Processor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistics, const matrix_type& default_enhanced_statistics, @@ -95,7 +95,7 @@ namespace MR bool operator() (const Permutation&); protected: - std::shared_ptr stats_calculator; + std::shared_ptr stats_calculator; std::shared_ptr enhancer; const matrix_type& empirical_enhanced_statistics; const matrix_type& default_enhanced_statistics; @@ -111,7 +111,7 @@ namespace MR // Precompute the empircal test statistic for non-stationarity adjustment - void precompute_empirical_stat (const std::shared_ptr stats_calculator, + void precompute_empirical_stat (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, PermutationStack& perm_stack, matrix_type& empirical_statistic); @@ -119,7 +119,7 @@ namespace MR // Precompute the default statistic image and enhanced statistic. We need to precompute this for calculating the uncorrected p-values. - void precompute_default_permutation (const std::shared_ptr stats_calculator, + void precompute_default_permutation (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, matrix_type& default_enhanced_statistics, @@ -133,7 +133,7 @@ namespace MR // - Pre-defined permutations (likely provided via a command-line option) // - A requested number of permutations void run_permutations (PermutationStack& perm_stack, - const std::shared_ptr stats_calculator, + const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, @@ -142,7 +142,7 @@ namespace MR void run_permutations (const vector>& permutations, - const std::shared_ptr stats_calculator, + const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, @@ -151,7 +151,7 @@ namespace MR void run_permutations (const size_t num_permutations, - const std::shared_ptr stats_calculator, + const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, From 3d56d8d005d1519a557f254fa732f819713ad057 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 17 Nov 2017 21:53:16 +1100 Subject: [PATCH 0069/1471] Stats GLM: Fixes for compilation in debug mode --- core/math/stats/glm.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index a00fd7e1b0..67f412907c 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -212,7 +212,7 @@ namespace MR } else if (c[ic].is_F()) { output (iF, ic) = F[iF]; } else { - assert (betahats.cols() == 1); + assert (betahat.cols() == 1); output (iF, ic) = std::sqrt (F[iF]) * (betahat (iF, 0) > 0 ? 1.0 : -1.0); } } @@ -243,7 +243,7 @@ namespace MR { // Make sure that the specified contrasts reflect the full design matrix (with additional // data loaded) - assert (contrasts.cols() == M.cols() + ssize_t(importers.size())); + assert (contrasts[0].cols() == M.cols() + ssize_t(importers.size())); } @@ -379,7 +379,7 @@ namespace MR } else if (c[ic].is_F()) { output (iF, ic) = F[iF]; } else { - assert (betahats.cols() == 1); + assert (betahat.cols() == 1); output (iF, ic) = std::sqrt (F[iF]) * (betahat (iF, 0) > 0 ? 1.0 : -1.0); } } From 3fa095f93d31a42285d161c694253a962fe4a753 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sat, 25 Nov 2017 21:46:42 +1100 Subject: [PATCH 0070/1471] vectorstats: First seemingly working version with Freedman-Lane --- cmd/vectorstats.cpp | 12 ++--- core/math/stats/glm.cpp | 92 ++++++++++++++++++++++++++++++--------- testing/tests/vectorstats | 4 +- 3 files changed, 80 insertions(+), 28 deletions(-) diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 8781dc06e6..946238ce9f 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -152,6 +152,7 @@ void run() contrasts.emplace_back (Contrast (contrast_matrix.row (row))); } const size_t num_contrasts = contrasts.size(); + CONSOLE ("Number of contrasts: " + str(num_contrasts)); // Before validating the contrast matrix, we first need to see if there are any // additional design matrix columns coming from voxel-wise subject data @@ -175,6 +176,7 @@ void run() throw Exception ("the number of columns per contrast (" + str(contrasts[0].cols()) + ")" + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); + CONSOLE ("Number of factors: " + str(num_factors)); const std::string output_prefix = argument[3]; @@ -297,8 +299,8 @@ void run() ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", 2 + (2 * num_contrasts)); save_matrix (betas, output_prefix + "betas.csv"); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { - save_vector (abs_effect_size.row(i), output_prefix + "abs_effect" + postfix(i) + ".csv"); ++progress; - save_vector (std_effect_size.row(i), output_prefix + "std_effect" + postfix(i) + ".csv"); ++progress; + save_vector (abs_effect_size.col(i), output_prefix + "abs_effect" + postfix(i) + ".csv"); ++progress; + save_vector (std_effect_size.col(i), output_prefix + "std_effect" + postfix(i) + ".csv"); ++progress; } save_vector (stdev, output_prefix + "std_dev.csv"); } @@ -313,7 +315,7 @@ void run() matrix_type default_tvalues; (*glm_test) (default_permutation, default_tvalues); for (size_t i = 0; i != num_contrasts; ++i) - save_matrix (default_tvalues.row(i), output_prefix + "tvalue" + postfix(i) + ".csv"); + save_matrix (default_tvalues.col(i), output_prefix + "tvalue" + postfix(i) + ".csv"); // Perform permutation testing if (!get_options ("notest").size()) { @@ -334,8 +336,8 @@ void run() matrix_type default_pvalues (num_elements, num_contrasts); Math::Stats::Permutation::statistic2pvalue (null_distribution, default_tvalues, default_pvalues); for (size_t i = 0; i != num_contrasts; ++i) { - save_vector (default_pvalues.row(i), output_prefix + "fwe_pvalue.csv"); - save_vector (uncorrected_pvalues.row(i), output_prefix + "uncorrected_pvalue.csv"); + save_vector (default_pvalues.col(i), output_prefix + "fwe_pvalue" + postfix(i) + ".csv"); + save_vector (uncorrected_pvalues.col(i), output_prefix + "uncorrected_pvalue" + postfix(i) + ".csv"); } } diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 67f412907c..13c0ba3ab2 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -102,28 +102,33 @@ namespace MR vector_type& stdev) { betas = solve_betas (measurements, design); - //std::cerr << "Betas: " << betas.rows() << " x " << betas.cols() << ", max " << betas.array().maxCoeff() << "\n"; + std::cerr << "Betas: " << betas.rows() << " x " << betas.cols() << ", max " << betas.array().maxCoeff() << "\n"; abs_effect_size.resize (measurements.rows(), contrasts.size()); // TESTME Surely this doesn't make sense for an F-test? - for (size_t ic = 0; ic != contrasts.size(); ++ic) - abs_effect_size.col (ic) = matrix_type (contrasts[ic]) * betas; - //std::cerr << "abs_effect_size: " << abs_effect_size.rows() << " x " << abs_effect_size.cols() << ", max " << abs_effect_size.array().maxCoeff() << "\n"; + for (size_t ic = 0; ic != contrasts.size(); ++ic) { + if (contrasts[ic].is_F()) { + abs_effect_size.col (ic).setZero(); + } else { + abs_effect_size.col (ic) = (matrix_type (contrasts[ic]) * betas).row (0); + } + } + std::cerr << "abs_effect_size: " << abs_effect_size.rows() << " x " << abs_effect_size.cols() << ", max " << abs_effect_size.array().maxCoeff() << "\n"; matrix_type residuals = measurements.transpose() - design * betas; residuals = residuals.array().pow (2.0); - //std::cerr << "residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; + std::cerr << "residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; matrix_type one_over_dof (1, measurements.cols()); one_over_dof.fill (1.0 / value_type(design.rows()-Math::rank (design))); - //std::cerr << "one_over_dof: " << one_over_dof.rows() << " x " << one_over_dof.cols() << ", max " << one_over_dof.array().maxCoeff() << "\n"; - //VAR (design.rows()); - //VAR (Math::rank (design)); - stdev = (one_over_dof * residuals).array().sqrt(); - //std::cerr << "stdev: " << stdev.rows() << " x " << stdev.cols() << ", max " << stdev.array().maxCoeff() << "\n"; + std::cerr << "one_over_dof: " << one_over_dof.rows() << " x " << one_over_dof.cols() << ", max " << one_over_dof.array().maxCoeff() << "\n"; + VAR (design.rows()); + VAR (Math::rank (design)); + stdev = (one_over_dof * residuals).array().sqrt().row(0); + std::cerr << "stdev: " << stdev.size() << ", max " << stdev.array().maxCoeff() << "\n"; // TODO Should be a cleaner way of doing this (broadcasting?) matrix_type stdev_fill (abs_effect_size.rows(), abs_effect_size.cols()); - for (ssize_t i = 0; i != stdev_fill.rows(); ++i) - stdev_fill.row(i) = stdev; + for (ssize_t i = 0; i != stdev_fill.cols(); ++i) + stdev_fill.col(i) = stdev; std_effect_size = abs_effect_size.array() / stdev_fill.array(); - //std::cerr << "std_effect_size: " << std_effect_size.rows() << " x " << std_effect_size.cols() << ", max " << std_effect_size.array().maxCoeff() << "\n"; + std::cerr << "std_effect_size: " << std_effect_size.rows() << " x " << std_effect_size.cols() << ", max " << std_effect_size.array().maxCoeff() << "\n"; } @@ -194,14 +199,59 @@ namespace MR // First, we perform permutation of the input data // In Freedman-Lane, the initial 'effective' regression against the nuisance // variables, and permutation of the data, are done in a single step - const matrix_type Sy = perm_matrix * partitions[ic].Rz * y; + //VAR (perm_matrix.rows()); + //VAR (perm_matrix.cols()); + //VAR (partitions[ic].Rz.rows()); + //VAR (partitions[ic].Rz.cols()); + //VAR (y.rows()); + //VAR (y.cols()); + auto temp = perm_matrix * partitions[ic].Rz; + //VAR (temp.rows()); + //VAR (temp.cols()); + //const matrix_type Sy = perm_matrix * partitions[ic].Rz * y.rowwise(); + + // TODO Re-attempt performing this as a single matrix multiplication across all elements + matrix_type Sy (y.rows(), y.cols()); + for (size_t ie = 0; ie != y.rows(); ++ie) + Sy.row (ie) = temp * y.row (ie).transpose(); + //VAR (Sy.rows()); + //VAR (Sy.cols()); // Now, we regress this shuffled data against the full model - //ttest (tvalues, X.transpose(), pinvX.transpose(), Sy, betas, residuals_t); - beta.noalias() = Sy * pinvM; - betahat.noalias() = beta * matrix_type(c[ic]); - F = (betahat.transpose() * (partitions[ic].X.inverse()*partitions[ic].X) * betahat / c[ic].rank()) / - ((Rm*Sy).squaredNorm() / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z)); + //VAR (pinvM.rows()); + //VAR (pinvM.cols()); + beta.noalias() = pinvM * Sy.transpose(); + //VAR (beta.rows()); + //VAR (beta.cols()); + //VAR (matrix_type(c[ic]).rows()); + //VAR (matrix_type(c[ic]).cols()); + betahat = matrix_type(c[ic]) * beta; + //VAR (betahat.rows()); + //VAR (betahat.cols()); + //VAR (partitions[ic].X.rows()); + //VAR (partitions[ic].X.cols()); + //VAR (Rm.rows()); + //VAR (Rm.cols()); + F.resize (y.rows()); + auto temp1 = partitions[ic].X.transpose()*partitions[ic].X; + //VAR (temp1.rows()); + //VAR (temp1.cols()); + const default_type one_over_dof = num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z; + for (size_t ie = 0; ie != y.rows(); ++ie) { + vector_type this_betahat = betahat.col (ie); + //VAR (this_betahat.size()); + auto temp2 = this_betahat.matrix() * (temp1 * this_betahat.matrix()) / c[ic].rank(); + //VAR (temp2.rows()); + //VAR (temp2.cols()); + auto temp3 = Rm*Sy.transpose().col (ie); + //VAR (temp3.rows()); + //VAR (temp3.cols()); + F[ie] = temp2 (0, 0) / (temp3.squaredNorm() / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z)); + } + // TODO Try to use broadcasting here; it doesn't like having colwise() as the RHS argument + //F = (betahat.transpose().rowwise() * ((partitions[ic].X.transpose()*partitions[ic].X) * betahat.colwise()) / c[ic].rank()) / + // ((Rm*Sy.transpose()).colwise().squaredNorm() / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z)); + //VAR (F.size()); // Put the results into the output matrix, replacing NaNs with zeroes // TODO Check again to see if this new statistic produces NaNs when input data are all zeroes @@ -212,8 +262,8 @@ namespace MR } else if (c[ic].is_F()) { output (iF, ic) = F[iF]; } else { - assert (betahat.cols() == 1); - output (iF, ic) = std::sqrt (F[iF]) * (betahat (iF, 0) > 0 ? 1.0 : -1.0); + assert (betahat.rows() == 1); + output (iF, ic) = std::sqrt (F[iF]) * (betahat (0, iF) > 0 ? 1.0 : -1.0); } } diff --git a/testing/tests/vectorstats b/testing/tests/vectorstats index 0f3d8e129b..2354d4ee87 100644 --- a/testing/tests/vectorstats +++ b/testing/tests/vectorstats @@ -1,5 +1,5 @@ -# 10 subjects per group -N=10 && SNR=2 && \ +# 16 subjects per group +N=16 && SNR=5 && \ python -c """ import random; subj_files = [] From e7105888e6c28605db7690c0e4273a183b4e0526 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 28 Nov 2017 22:57:57 +1100 Subject: [PATCH 0071/1471] Stats: First compiling implementation of sign-flipping Involves a bumber of changes (some of these may change in the future): - - Class Math::Stats::Shuffler deals with providing shuffling matrices (both permutation and sign-flipping) for the sake of permutation testing. It reads command-line options during initialisation, so that options common across multiple statistical inference commands are parsed using common code. - Removed some redundancy in functions such as run_permutations() by not branching based on the source of permutations, but instead relying on the appropriate construction of the Shuffler class. - Speedups and fixes to Freedman-Lane code. --- cmd/connectomestats.cpp | 50 +---- cmd/fixelcfestats.cpp | 58 +----- cmd/mrclusterstats.cpp | 52 +---- cmd/vectorstats.cpp | 42 ++-- core/math/stats/glm.cpp | 77 ++++---- core/math/stats/glm.h | 14 +- core/math/stats/permutation.cpp | 130 ------------- core/math/stats/permutation.h | 63 ------ core/math/stats/shuffle.cpp | 333 ++++++++++++++++++++++++++++++++ core/math/stats/shuffle.h | 121 ++++++++++++ src/stats/permstack.cpp | 60 ------ src/stats/permstack.h | 69 ------- src/stats/permtest.cpp | 104 ++-------- src/stats/permtest.h | 38 +--- src/stats/tfce.h | 1 - 15 files changed, 559 insertions(+), 653 deletions(-) delete mode 100644 core/math/stats/permutation.cpp delete mode 100644 core/math/stats/permutation.h create mode 100644 core/math/stats/shuffle.cpp create mode 100644 core/math/stats/shuffle.h delete mode 100644 src/stats/permstack.cpp delete mode 100644 src/stats/permstack.h diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index ada68be796..815ef9a1e1 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -20,7 +20,7 @@ #include "file/path.h" #include "math/stats/glm.h" #include "math/stats/import.h" -#include "math/stats/permutation.h" +#include "math/stats/shuffle.h" #include "math/stats/typedefs.h" #include "connectome/enhance.h" @@ -74,7 +74,7 @@ void usage () OPTIONS - + Stats::PermTest::Options (true) + + Math::Stats::shuffle_options (true) // TODO OptionGroup these, and provide a generic loader function + Stats::TFCE::Options (TFCE_DH_DEFAULT, TFCE_E_DEFAULT, TFCE_H_DEFAULT) @@ -143,7 +143,7 @@ class SubjectConnectomeImport : public SubjectDataImportBase default_type operator[] (const size_t index) const override { - assert (index < data.size()); + assert (index < size_t(data.size())); return (data[index]); } @@ -203,9 +203,7 @@ void run() throw Exception ("Unknown enhancement algorithm"); } - size_t num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); - size_t nperms_nonstationary = get_option_value ("nperms_nonstationarity", DEFAULT_NUMBER_PERMUTATIONS_NONSTATIONARITY); // Load design matrix const matrix_type design = load_matrix (argument[2]); @@ -249,26 +247,6 @@ void run() + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); - // Load permutations file if supplied - opt = get_options ("permutations"); - vector< vector > permutations; - if (opt.size()) { - permutations = Permutation::load_permutations_file (opt[0][0]); - num_perms = permutations.size(); - if (permutations[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); - } - - // Load non-stationary correction permutations file if supplied - opt = get_options ("permutations_nonstationary"); - vector > permutations_nonstationary; - if (opt.size()) { - permutations_nonstationary = Permutation::load_permutations_file (opt[0][0]); - nperms_nonstationary = permutations.size(); - if (permutations_nonstationary[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); - } - const std::string output_prefix = argument[4]; // Load input data @@ -408,13 +386,7 @@ void run() // If performing non-stationarity adjustment we need to pre-compute the empirical statistic matrix_type empirical_statistic; if (do_nonstationary_adjustment) { - if (permutations_nonstationary.size()) { - Stats::PermTest::PermutationStack perm_stack (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment"); - Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, perm_stack, empirical_statistic); - } else { - Stats::PermTest::PermutationStack perm_stack (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment", true); - Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, perm_stack, empirical_statistic); - } + Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_statistic); for (size_t i = 0; i != num_contrasts; ++i) save_matrix (mat2vec.V2M (empirical_statistic.row(i)), output_prefix + "_empirical" + postfix(i) + ".csv"); } @@ -433,22 +405,16 @@ void run() // Perform permutation testing if (!get_options ("notest").size()) { - matrix_type null_distribution (num_contrasts, num_perms); - matrix_type uncorrected_pvalues (num_contrasts, num_edges); + matrix_type null_distribution, uncorrected_pvalues; - if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_test, enhancer, empirical_statistic, - enhanced_output, null_distribution, uncorrected_pvalues); - } else { - Stats::PermTest::run_permutations (num_perms, glm_test, enhancer, empirical_statistic, - enhanced_output, null_distribution, uncorrected_pvalues); - } + Stats::PermTest::run_permutations (glm_test, enhancer, empirical_statistic, + enhanced_output, null_distribution, uncorrected_pvalues); for (size_t i = 0; i != num_contrasts; ++i) save_vector (null_distribution.row(i), output_prefix + "_null_dist" + postfix(i) + ".txt"); matrix_type pvalue_output (num_contrasts, num_edges); - Math::Stats::Permutation::statistic2pvalue (null_distribution, enhanced_output, pvalue_output); + Math::Stats::statistic2pvalue (null_distribution, enhanced_output, pvalue_output); for (size_t i = 0; i != num_contrasts; ++i) { save_matrix (mat2vec.V2M (pvalue_output.row(i)), output_prefix + "_fwe_pvalue" + postfix(i) + ".csv"); save_matrix (mat2vec.V2M (uncorrected_pvalues.row(i)), output_prefix + "_uncorrected_pvalue" + postfix(i) + ".csv"); diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index ef54791d59..7bc621a745 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -23,7 +23,7 @@ #include "fixel/loop.h" #include "math/stats/glm.h" #include "math/stats/import.h" -#include "math/stats/permutation.h" +#include "math/stats/shuffle.h" #include "math/stats/typedefs.h" #include "stats/cfe.h" #include "stats/enhance.h" @@ -86,7 +86,7 @@ void usage () OPTIONS - + Stats::PermTest::Options (true) + + Math::Stats::shuffle_options (true) + OptionGroup ("Parameters for the Connectivity-based Fixel Enhancement algorithm") @@ -186,11 +186,9 @@ void run() const value_type cfe_h = get_option_value ("cfe_h", DEFAULT_CFE_H); const value_type cfe_e = get_option_value ("cfe_e", DEFAULT_CFE_E); const value_type cfe_c = get_option_value ("cfe_c", DEFAULT_CFE_C); - int num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); const value_type smooth_std_dev = get_option_value ("smooth", DEFAULT_SMOOTHING_STD) / 2.3548; const value_type connectivity_threshold = get_option_value ("connectivity", DEFAULT_CONNECTIVITY_THRESHOLD); const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); - int nperms_nonstationary = get_option_value ("nperms_nonstationary", DEFAULT_NUMBER_PERMUTATIONS_NONSTATIONARITY); const value_type angular_threshold = get_option_value ("angle", DEFAULT_ANGLE_THRESHOLD); @@ -238,30 +236,6 @@ void run() if (design.rows() != (ssize_t)importer.size()) throw Exception ("number of input files does not match number of rows in design matrix"); - // Load permutations file if supplied - auto opt = get_options("permutations"); - vector > permutations; - if (opt.size()) { - permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - num_perms = permutations.size(); - if (permutations[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); - } - - // Load non-stationary correction permutations file if supplied - opt = get_options("permutations_nonstationary"); - vector > permutations_nonstationary; - if (opt.size()) { - if (do_nonstationary_adjustment) { - permutations_nonstationary = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - nperms_nonstationary = permutations_nonstationary.size(); - if (permutations_nonstationary[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); - } else { - WARN ("-permutations_nonstationary option ignored: nonstationarity correction is not being performed (-nonstationary option)"); - } - } - // Load contrast matrix vector contrasts; { @@ -275,7 +249,7 @@ void run() // additional design matrix columns coming from fixel-wise subject data vector extra_columns; bool nans_in_columns = false; - opt = get_options ("column"); + auto opt = get_options ("column"); for (size_t i = 0; i != opt.size(); ++i) { extra_columns.push_back (CohortDataImport()); extra_columns[i].initialise (opt[i][0]); @@ -374,7 +348,7 @@ void run() } Header output_header (dynamic_cast(importer[0].get())->header()); - output_header.keyval()["num permutations"] = str(num_perms); + //output_header.keyval()["num permutations"] = str(num_perms); output_header.keyval()["dh"] = str(cfe_dh); output_header.keyval()["cfe_e"] = str(cfe_e); output_header.keyval()["cfe_h"] = str(cfe_h); @@ -546,14 +520,7 @@ void run() // If performing non-stationarity adjustment we need to pre-compute the empirical CFE statistic matrix_type empirical_cfe_statistic; if (do_nonstationary_adjustment) { - empirical_cfe_statistic = vector_type::Zero (num_fixels); - if (permutations_nonstationary.size()) { - Stats::PermTest::PermutationStack permutations (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment"); - Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, permutations, empirical_cfe_statistic); - } else { - Stats::PermTest::PermutationStack permutations (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment", false); - Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, permutations, empirical_cfe_statistic); - } + Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, empirical_cfe_statistic); output_header.keyval()["nonstationary adjustment"] = str(true); for (size_t i = 0; i != num_contrasts; ++i) write_fixel_output (Path::join (output_fixel_directory, "cfe_empirical" + postfix(i) + ".mif"), empirical_cfe_statistic.row(i), output_header); @@ -574,16 +541,11 @@ void run() // Perform permutation testing if (!get_options ("notest").size()) { - matrix_type perm_distribution (num_contrasts, num_perms); - matrix_type uncorrected_pvalues (num_contrasts, num_fixels); - if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_test, cfe_integrator, empirical_cfe_statistic, - cfe_output, perm_distribution, uncorrected_pvalues); - } else { - Stats::PermTest::run_permutations (num_perms, glm_test, cfe_integrator, empirical_cfe_statistic, - cfe_output, perm_distribution, uncorrected_pvalues); - } + matrix_type perm_distribution, uncorrected_pvalues; + + Stats::PermTest::run_permutations (glm_test, cfe_integrator, empirical_cfe_statistic, + cfe_output, perm_distribution, uncorrected_pvalues); ProgressBar progress ("outputting final results"); for (size_t i = 0; i != num_contrasts; ++i) { @@ -592,7 +554,7 @@ void run() } matrix_type pvalue_output (num_contrasts, num_fixels); - Math::Stats::Permutation::statistic2pvalue (perm_distribution, cfe_output, pvalue_output); + Math::Stats::statistic2pvalue (perm_distribution, cfe_output, pvalue_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue" + postfix(i) + ".mif"), pvalue_output.row(i), output_header); diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 577fe8ba06..eb6f8d9498 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -20,7 +20,7 @@ #include "math/stats/glm.h" #include "math/stats/import.h" -#include "math/stats/permutation.h" +#include "math/stats/shuffle.h" #include "math/stats/typedefs.h" #include "stats/cluster.h" @@ -73,7 +73,7 @@ void usage () OPTIONS - + Stats::PermTest::Options (true) + + Math::Stats::shuffle_options (true) + Stats::TFCE::Options (DEFAULT_TFCE_DH, DEFAULT_TFCE_E, DEFAULT_TFCE_H) @@ -180,9 +180,6 @@ void run() { const value_type tfce_H = get_option_value ("tfce_h", DEFAULT_TFCE_H); const value_type tfce_E = get_option_value ("tfce_e", DEFAULT_TFCE_E); const bool use_tfce = !std::isfinite (cluster_forming_threshold); - int num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); - int nperms_nonstationary = get_option_value ("nperms_nonstationary", DEFAULT_NUMBER_PERMUTATIONS_NONSTATIONARITY); - const bool do_26_connectivity = get_options("connectivity").size(); const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); @@ -242,26 +239,6 @@ void run() { + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); - // Load permutations file if supplied - opt = get_options("permutations"); - vector > permutations; - if (opt.size()) { - permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - num_perms = permutations.size(); - if (permutations[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); - } - - // Load non-stationary correction permutations file if supplied - opt = get_options("permutations_nonstationary"); - vector > permutations_nonstationary; - if (opt.size()) { - permutations_nonstationary = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - nperms_nonstationary = permutations.size(); - if (permutations_nonstationary[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the nonstationary permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); - } - matrix_type data (num_voxels, importer.size()); bool nans_in_data = false; { @@ -283,7 +260,7 @@ void run() { Header output_header (mask_header); output_header.datatype() = DataType::Float32; - output_header.keyval()["num permutations"] = str(num_perms); + //output_header.keyval()["num permutations"] = str(num_perms); output_header.keyval()["26 connectivity"] = str(do_26_connectivity); output_header.keyval()["nonstationary adjustment"] = str(do_nonstationary_adjustment); if (use_tfce) { @@ -425,30 +402,17 @@ void run() { if (do_nonstationary_adjustment) { if (!use_tfce) throw Exception ("nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); - if (permutations_nonstationary.size()) { - Stats::PermTest::PermutationStack permutations (permutations_nonstationary, "precomputing empirical statistic for non-stationarity adjustment..."); - Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, permutations, empirical_enhanced_statistic); - } else { - Stats::PermTest::PermutationStack permutations (nperms_nonstationary, design.rows(), "precomputing empirical statistic for non-stationarity adjustment...", false); - Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, permutations, empirical_enhanced_statistic); - } - + Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_enhanced_statistic); for (size_t i = 0; i != num_contrasts; ++i) save_vector (empirical_enhanced_statistic.row(i), prefix + "empirical" + postfix(i) + ".txt"); } if (!get_options ("notest").size()) { - matrix_type perm_distribution (num_contrasts, num_perms); - matrix_type uncorrected_pvalue (num_contrasts, num_voxels); + matrix_type perm_distribution, uncorrected_pvalue; - if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_test, enhancer, empirical_enhanced_statistic, - default_cluster_output, perm_distribution, uncorrected_pvalue); - } else { - Stats::PermTest::run_permutations (num_perms, glm_test, enhancer, empirical_enhanced_statistic, - default_cluster_output, perm_distribution, uncorrected_pvalue); - } + Stats::PermTest::run_permutations (glm_test, enhancer, empirical_enhanced_statistic, + default_cluster_output, perm_distribution, uncorrected_pvalue); for (size_t i = 0; i != num_contrasts; ++i) save_vector (perm_distribution.row(i), prefix + "perm_dist" + postfix(i) + ".txt"); @@ -459,7 +423,7 @@ void run() { ++progress; } matrix_type fwe_pvalue_output (num_contrasts, num_voxels); - Math::Stats::Permutation::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); + Math::Stats::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { write_output (fwe_pvalue_output.row(i), v2v, prefix + "fwe_pvalue" + postfix(i) + ".mif", output_header); diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 946238ce9f..bff08f6f81 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -20,7 +20,7 @@ #include "file/path.h" #include "math/stats/glm.h" #include "math/stats/import.h" -#include "math/stats/permutation.h" +#include "math/stats/shuffle.h" #include "math/stats/typedefs.h" #include "stats/permtest.h" @@ -60,7 +60,7 @@ void usage () OPTIONS - + Stats::PermTest::Options (false) + + Math::Stats::shuffle_options (false) + OptionGroup ("Additional options for vectorstats") @@ -127,23 +127,11 @@ void run() throw Exception ("Subject file \"" + importer[i]->name() + "\" contains incorrect number of elements (" + str(importer[i]) + "; expected " + str(num_elements) + ")"); } - size_t num_perms = get_option_value ("nperms", DEFAULT_NUMBER_PERMUTATIONS); - // Load design matrix const matrix_type design = load_matrix (argument[1]); if (size_t(design.rows()) != num_subjects) throw Exception ("Number of subjects (" + str(num_subjects) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); - // Load permutations file if supplied - auto opt = get_options("permutations"); - vector > permutations; - if (opt.size()) { - permutations = Math::Stats::Permutation::load_permutations_file (opt[0][0]); - num_perms = permutations.size(); - if (permutations[0].size() != (size_t)design.rows()) - throw Exception ("number of rows in the permutations file (" + str(opt[0][0]) + ") does not match number of rows in design matrix"); - } - // Load contrast matrix vector contrasts; { @@ -158,7 +146,7 @@ void run() // additional design matrix columns coming from voxel-wise subject data vector extra_columns; bool nans_in_columns = false; - opt = get_options ("column"); + auto opt = get_options ("column"); for (size_t i = 0; i != opt.size(); ++i) { extra_columns.push_back (CohortDataImport()); extra_columns[i].initialise (opt[i][0]); @@ -308,12 +296,10 @@ void run() // Precompute default statistic // Don't use convenience function: No enhancer! - // Manually construct default permutation - vector default_permutation (num_subjects); - for (size_t i = 0; i != num_subjects; ++i) - default_permutation[i] = i; + // Manually construct default shuffling matrix + const matrix_type default_shuffle (matrix_type::Identity (num_subjects, num_subjects)); matrix_type default_tvalues; - (*glm_test) (default_permutation, default_tvalues); + (*glm_test) (default_shuffle, default_tvalues); for (size_t i = 0; i != num_contrasts; ++i) save_matrix (default_tvalues.col(i), output_prefix + "tvalue" + postfix(i) + ".csv"); @@ -321,20 +307,14 @@ void run() if (!get_options ("notest").size()) { std::shared_ptr enhancer; - matrix_type null_distribution (num_perms, num_contrasts); - matrix_type uncorrected_pvalues (num_elements, num_contrasts); - matrix_type empirical_distribution; + matrix_type null_distribution, uncorrected_pvalues; + matrix_type empirical_distribution; // unused - if (permutations.size()) { - Stats::PermTest::run_permutations (permutations, glm_test, enhancer, empirical_distribution, - default_tvalues, null_distribution, uncorrected_pvalues); - } else { - Stats::PermTest::run_permutations (num_perms, glm_test, enhancer, empirical_distribution, - default_tvalues, null_distribution, uncorrected_pvalues); - } + Stats::PermTest::run_permutations (glm_test, enhancer, empirical_distribution, + default_tvalues, null_distribution, uncorrected_pvalues); matrix_type default_pvalues (num_elements, num_contrasts); - Math::Stats::Permutation::statistic2pvalue (null_distribution, default_tvalues, default_pvalues); + Math::Stats::statistic2pvalue (null_distribution, default_tvalues, default_pvalues); for (size_t i = 0; i != num_contrasts; ++i) { save_vector (default_pvalues.col(i), output_prefix + "fwe_pvalue" + postfix(i) + ".csv"); save_vector (uncorrected_pvalues.col(i), output_prefix + "uncorrected_pvalue" + postfix(i) + ".csv"); diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 13c0ba3ab2..f42980e573 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -175,20 +175,12 @@ namespace MR - void TestFixed::operator() (const vector& perm_labelling, matrix_type& output) const + void TestFixed::operator() (const matrix_type& shuffling_matrix, matrix_type& output) const { - assert (perm_labelling.size() == num_subjects()); + assert (shuffling_matrix.rows() == num_subjects()); if (!(size_t(output.rows()) == num_elements() && size_t(output.cols()) == num_outputs())) output.resize (num_elements(), num_outputs()); - - // TODO Re-express the permutation labelling as a permutation matrix - // (we'll deal with altering how these permutations are provided - // to the GLM code later) - matrix_type perm_matrix (matrix_type::Zero (num_subjects(), num_subjects())); - for (size_t i = 0; i != num_subjects(); ++i) - perm_matrix (i, perm_labelling[i]) = value_type(1); // TESTME - matrix_type beta, betahat; vector_type F; @@ -205,17 +197,17 @@ namespace MR //VAR (partitions[ic].Rz.cols()); //VAR (y.rows()); //VAR (y.cols()); - auto temp = perm_matrix * partitions[ic].Rz; - //VAR (temp.rows()); - //VAR (temp.cols()); - //const matrix_type Sy = perm_matrix * partitions[ic].Rz * y.rowwise(); - + auto PRz = shuffling_matrix * partitions[ic].Rz; + //VAR (PRz.rows()); + //VAR (PRz.cols()); // TODO Re-attempt performing this as a single matrix multiplication across all elements matrix_type Sy (y.rows(), y.cols()); - for (size_t ie = 0; ie != y.rows(); ++ie) - Sy.row (ie) = temp * y.row (ie).transpose(); + for (ssize_t ie = 0; ie != y.rows(); ++ie) + Sy.row (ie) = PRz * y.row (ie).transpose(); //VAR (Sy.rows()); //VAR (Sy.cols()); + // TODO Change measurements matrix convention to store data for each subject in a row; + // means data across subjects for a particular element appear in a column, which is contiguous // Now, we regress this shuffled data against the full model //VAR (pinvM.rows()); @@ -232,21 +224,29 @@ namespace MR //VAR (partitions[ic].X.cols()); //VAR (Rm.rows()); //VAR (Rm.cols()); + auto XtX = partitions[ic].X.transpose()*partitions[ic].X; + //VAR (XtX.rows()); + //VAR (XtX.cols()); + const default_type one_over_dof = 1.0 / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z); + auto residuals = Rm*Sy.transpose(); + //VAR (residuals.rows()); + //VAR (residuals.cols()); + vector_type temp3 = residuals.colwise().squaredNorm(); + //VAR (temp3.rows()); + //VAR (temp3.cols()); + // FIXME This should be giving a vector, not a matrix + //auto temp4 = betahat.transpose() * (XtX * betahat) / c[ic].rank(); + //VAR (temp4.rows()); + //VAR (temp4.cols()); + //std::cerr << temp4 << "\n"; F.resize (y.rows()); - auto temp1 = partitions[ic].X.transpose()*partitions[ic].X; - //VAR (temp1.rows()); - //VAR (temp1.cols()); - const default_type one_over_dof = num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z; - for (size_t ie = 0; ie != y.rows(); ++ie) { + for (ssize_t ie = 0; ie != y.rows(); ++ie) { vector_type this_betahat = betahat.col (ie); //VAR (this_betahat.size()); - auto temp2 = this_betahat.matrix() * (temp1 * this_betahat.matrix()) / c[ic].rank(); - //VAR (temp2.rows()); - //VAR (temp2.cols()); - auto temp3 = Rm*Sy.transpose().col (ie); - //VAR (temp3.rows()); - //VAR (temp3.cols()); - F[ie] = temp2 (0, 0) / (temp3.squaredNorm() / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z)); + auto temp2 = this_betahat.matrix() * (XtX * this_betahat.matrix()) / c[ic].rank(); + assert (temp2.rows() == 1); + assert (temp2.cols() == 1); + F[ie] = temp2 (0, 0) / (one_over_dof * temp3[ie]); } // TODO Try to use broadcasting here; it doesn't like having colwise() as the RHS argument //F = (betahat.transpose().rowwise() * ((partitions[ic].X.transpose()*partitions[ic].X) * betahat.colwise()) / c[ic].rank()) / @@ -298,16 +298,11 @@ namespace MR - void TestVariable::operator() (const vector& perm_labelling, matrix_type& output) const + void TestVariable::operator() (const matrix_type& shuffling_matrix, matrix_type& output) const { if (!(size_t(output.rows()) == num_elements() && size_t(output.cols()) == num_outputs())) output.resize (num_elements(), num_outputs()); - // Convert permutation labelling to a matrix, as for the fixed design matrix case - matrix_type perm_matrix (matrix_type::Zero (num_subjects(), num_subjects())); - for (size_t i = 0; i != num_subjects(); ++i) - perm_matrix (i, perm_labelling[i]) = value_type(1); - // Let's loop over elements first, then contrasts in the inner loop for (ssize_t element = 0; element != y.rows(); ++element) { @@ -361,7 +356,7 @@ namespace MR Mfull_masked.resize (num_subjects(), num_factors()); Mfull_masked.block (0, 0, num_subjects(), M.cols()) = M; Mfull_masked.block (0, M.cols(), num_subjects(), extra_data.cols()) = extra_data; - perm_matrix_masked = perm_matrix; + perm_matrix_masked = shuffling_matrix; y_masked = y.row (element); } else { @@ -380,20 +375,20 @@ namespace MR // Any row in the permutation matrix that contains a non-zero entry // in the column corresponding to in_row needs to be removed // from the permutation matrix - for (ssize_t perm_row = 0; perm_row != perm_matrix.rows(); ++perm_row) { - if (perm_matrix (perm_row, in_index)) + for (ssize_t perm_row = 0; perm_row != shuffling_matrix.rows(); ++perm_row) { + if (shuffling_matrix (perm_row, in_index)) perm_matrix_mask[perm_row] = false; } } } - assert (out_index == finite_count); - assert (perm_matrix_mask.count() == finite_count); + assert (out_index == ssize_t(finite_count)); + assert (perm_matrix_mask.count() == ssize_t(finite_count)); // Only after we've reduced the design matrix do we now reduce the permutation matrix perm_matrix_masked.resize (finite_count, num_subjects()); out_index = 0; for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { if (perm_matrix_mask[in_index]) - perm_matrix_masked.row (out_index++) = perm_matrix.row (in_index); + perm_matrix_masked.row (out_index++) = shuffling_matrix.row (in_index); } assert (out_index == finite_count); diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 014d5449b6..5a79f296fc 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -173,10 +173,10 @@ namespace MR } /*! Compute the statistics - * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) - * @param output the matrix containing the output statistics (one vector per contrast) + * @param shuffling_matrix a matrix to permute / sign flip the residuals (for permutation testing) + * @param output the matrix containing the output statistics (one column per contrast) */ - virtual void operator() (const vector& perm_labelling, matrix_type& output) const = 0; + virtual void operator() (const matrix_type& shuffling_matrix, matrix_type& output) const = 0; size_t num_elements () const { return y.rows(); } size_t num_outputs () const { return c.size(); } @@ -212,10 +212,10 @@ namespace MR TestFixed (const matrix_type& measurements, const matrix_type& design, const vector& contrasts); /*! Compute the statistics - * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) + * @param shuffling_matrix a matrix to permute / sign flip the residuals (for permutation testing) * @param output the vector containing the output t-statistics (one column per contrast) */ - void operator() (const vector& perm_labelling, matrix_type& output) const override; + void operator() (const matrix_type& shuffling_matrix, matrix_type& output) const override; protected: // New classes to store information relevant to Freedman-Lane implementation @@ -251,13 +251,13 @@ namespace MR const bool nans_in_columns); /*! Compute the statistics - * @param perm_labelling a vector to shuffle the rows in the design matrix (for permutation testing) + * @param shuffling_matrix a matrix to permute / sign flip the residuals (for permutation testing) * @param output the vector containing the output statistics * * In TestVariable, this function additionally needs to import the * extra external data individually for each element tested. */ - void operator() (const vector& perm_labelling, matrix_type& output) const override; + void operator() (const matrix_type& shuffling_matrix, matrix_type& output) const override; /*! Acquire the design matrix for the default permutation * (note that this needs to be re-run for each element being tested) diff --git a/core/math/stats/permutation.cpp b/core/math/stats/permutation.cpp deleted file mode 100644 index cf2f9088df..0000000000 --- a/core/math/stats/permutation.cpp +++ /dev/null @@ -1,130 +0,0 @@ -/* Copyright (c) 2008-2017 the MRtrix3 contributors - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/. - * - * MRtrix is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/. - */ - - -#include "math/stats/permutation.h" -#include "math/math.h" - -namespace MR -{ - namespace Math - { - namespace Stats - { - namespace Permutation - { - - - - bool is_duplicate (const vector& v1, const vector& v2) - { - for (size_t i = 0; i < v1.size(); i++) { - if (v1[i] != v2[i]) - return false; - } - return true; - } - - - - bool is_duplicate (const vector& perm, - const vector >& previous_permutations) - { - for (size_t p = 0; p < previous_permutations.size(); p++) { - if (is_duplicate (perm, previous_permutations[p])) - return true; - } - return false; - } - - - - void generate (const size_t num_perms, - const size_t num_subjects, - vector >& permutations, - const bool include_default) - { - permutations.clear(); - vector default_labelling (num_subjects); - for (size_t i = 0; i < num_subjects; ++i) - default_labelling[i] = i; - size_t p = 0; - if (include_default) { - permutations.push_back (default_labelling); - ++p; - } - for (;p < num_perms; ++p) { - vector permuted_labelling (default_labelling); - do { - std::random_shuffle (permuted_labelling.begin(), permuted_labelling.end()); - } while (is_duplicate (permuted_labelling, permutations)); - permutations.push_back (permuted_labelling); - } - } - - - - void statistic2pvalue (const matrix_type& null_dist, const matrix_type& stats, matrix_type& pvalues) - { - pvalues.resize (stats.rows(), stats.cols()); - for (size_t contrast = 0; contrast != stats.cols(); ++contrast) { - vector sorted_null_dist; - sorted_null_dist.reserve (null_dist.rows()); - for (size_t perm = 0; perm != null_dist.rows(); ++perm) - sorted_null_dist.push_back (null_dist(perm, contrast)); - std::sort (sorted_null_dist.begin(), sorted_null_dist.end()); - for (size_t element = 0; element != size_t(stats.rows()); ++element) { - if (stats(element, contrast) > 0.0) { - value_type pvalue = 1.0; - for (size_t j = 0; j < size_t(sorted_null_dist.size()); ++j) { - if (stats(element, contrast) < sorted_null_dist[j]) { - pvalue = value_type(j) / value_type(sorted_null_dist.size()); - break; - } - } - pvalues(element, contrast) = pvalue; - } else { - pvalues(element, contrast) = 0.0; - } - } - } - } - - - - vector > load_permutations_file (const std::string& filename) { - vector > temp = load_matrix_2D_vector (filename); - if (!temp.size()) - throw Exception ("no data found in permutations file: " + str(filename)); - - size_t min_value = *std::min_element (std::begin (temp[0]), std::end (temp[0])); - if (min_value > 1) - throw Exception ("indices for relabelling in permutations file must start from either 0 or 1"); - - vector > permutations (temp[0].size(), vector(temp.size())); - for (vector::size_type i = 0; i < temp[0].size(); i++) { - for (vector::size_type j = 0; j < temp.size(); j++) { - if (!temp[j][i]) - throw Exception ("Pre-defined permutation labelling file \"" + filename + "\" contains zeros; labels should be indexed from one"); - permutations[i][j] = temp[j][i] - min_value; - } - } - return permutations; - } - - - - } - } - } -} diff --git a/core/math/stats/permutation.h b/core/math/stats/permutation.h deleted file mode 100644 index 2e9610b996..0000000000 --- a/core/math/stats/permutation.h +++ /dev/null @@ -1,63 +0,0 @@ -/* Copyright (c) 2008-2017 the MRtrix3 contributors - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/. - * - * MRtrix is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/. - */ - - -#ifndef __math_stats_permutation_h__ -#define __math_stats_permutation_h__ - -#include - -#include "math/stats/typedefs.h" - -namespace MR -{ - namespace Math - { - namespace Stats - { - namespace Permutation - { - - - - typedef Math::Stats::value_type value_type; - typedef Math::Stats::vector_type vector_type; - typedef Math::Stats::matrix_type matrix_type; - - - - bool is_duplicate (const vector&, const vector&); - bool is_duplicate (const vector&, const vector >&); - - // Note that this function does not take into account grouping of subjects and therefore generated - // permutations are not guaranteed to be unique wrt the computed test statistic. - // Providing the number of subjects is large then the likelihood of generating duplicates is low. - void generate (const size_t num_perms, - const size_t num_subjects, - vector >& permutations, - const bool include_default); - - void statistic2pvalue (const matrix_type& null_dist, const matrix_type& stats, matrix_type& pvalues); - - - vector > load_permutations_file (const std::string& filename); - - - - - } - } - } -} - -#endif diff --git a/core/math/stats/shuffle.cpp b/core/math/stats/shuffle.cpp new file mode 100644 index 0000000000..27c69f2b20 --- /dev/null +++ b/core/math/stats/shuffle.cpp @@ -0,0 +1,333 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "math/stats/shuffle.h" + +#include + +#include "math/math.h" + +namespace MR +{ + namespace Math + { + namespace Stats + { + + + + const char* error_types[] = { "ee", "ise", "both", nullptr }; + + + App::OptionGroup shuffle_options (const bool include_nonstationarity) + { + using namespace App; + + OptionGroup result = OptionGroup ("Options relating to shuffling of data for nonparametric statistical inference") + + + Option ("errors", "specify nature of errors for shuffling; options are: " + join(error_types, ",") + " (default: ee)") + + Argument ("spec").type_choice (error_types) + + // TODO Find a better place for this + //+ Option ("notest", "don't perform statistical inference; only output population statistics (effect size, stdev etc)") + + + Option ("nshuffles", "the number of shuffles (default: " + str(DEFAULT_NUMBER_SHUFFLES) + ")") + + Argument ("number").type_integer (1) + + + Option ("permutations", "manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, " + "where each relabelling is defined as a column vector of size m, and the number of columns, n, defines " + "the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). " + "Overrides the -nshuffles option.") + + Argument ("file").type_file_in() + + // TODO See what is available in PALM + + Option ("signflips", "manually define the signflips") + + Argument ("file").type_file_in(); + + if (include_nonstationarity) { + + result + + Option ("nonstationarity", "perform non-stationarity correction") + + + Option ("nshuffles_nonstationary", "the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: " + str(DEFAULT_NUMBER_SHUFFLES_NONSTATIONARITY) + ")") + + Argument ("number").type_integer (1) + + + Option ("permutations_nonstationarity", "manually define the permutations (relabelling) for computing the emprical statistics for non-stationarity correction. " + "The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, " + "and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM " + "(http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) " + "Overrides the -nshuffles_nonstationarity option.") + + Argument ("file").type_file_in() + + + Option ("signflips_nonstationarity", "manually define the signflips for computing the empirical statistics for non-stationarity correction") + + Argument ("file").type_file_in(); + + } + + return result; + } + + + + Shuffler::Shuffler (const size_t num_subjects, const bool is_nonstationarity, const std::string msg) : + rows (num_subjects), + nshuffles (is_nonstationarity ? DEFAULT_NUMBER_SHUFFLES_NONSTATIONARITY : DEFAULT_NUMBER_SHUFFLES), + counter (0) + { + using namespace App; + auto opt = get_options ("errors"); + bool ee = true, ise = false; + if (opt.size()) { + switch (int(opt[0][0])) { + case 0: ee = true; ise = false; break; + case 1: ee = false; ise = true; break; + case 2: ee = true; ise = true; break; + } + } + + bool nshuffles_explicit = false; + opt = get_options (is_nonstationarity ? "nshuffles_nonstationarity" : "nshuffles"); + if (opt.size()) { + nshuffles = opt[0][0]; + nshuffles_explicit = true; + } + + opt = get_options (is_nonstationarity ? "permutations_nonstationarity" : "permutations"); + if (opt.size()) { + if (ee) { + load_permutations (opt[0][0]); + if (permutations[0].size() != rows) + throw Exception ("Number of entries per shuffle in file \"" + std::string (opt[0][0]) + "\" does not match number of rows in design matrix (" + str(rows) + ")"); + if (nshuffles_explicit && nshuffles != permutations.size()) + throw Exception ("Number of shuffles explicitly requested (" + str(nshuffles) + ") does not match number of shuffles in file \"" + std::string (opt[0][0]) + "\" (" + str(permutations.size()) + ")"); + nshuffles = permutations.size(); + } else { + throw Exception ("Cannot manually provide permutations if errors are not exchangeable"); + } + } else if (ee) { + // Only include the default shuffling if this is the actual permutation testing; + // if we're doing nonstationarity correction, don't include the default + generate_permutations (nshuffles, rows, !is_nonstationarity); + } + + opt = get_options (is_nonstationarity ? "signflips_nonstationarity" : "signflips"); + if (opt.size()) { + if (ise) { + load_signflips (opt[0][0]); + if (signflips[0].size() != rows) + throw Exception ("Number of entries per shuffle in file \"" + std::string (opt[0][0]) + "\" does not match number of rows in design matrix (" + str(rows) + ")"); + if (nshuffles_explicit && nshuffles != signflips.size()) + throw Exception ("Number of shuffles explicitly requested (" + str(nshuffles) + ") does not match number of shuffles in file \"" + std::string (opt[0][0]) + "\" (" + str(signflips.size()) + ")"); + if (permutations.size() && signflips.size() != permutations.size()) + throw Exception ("Number of permutations (" + str(permutations.size()) + ") does not match number of signflips (" + str(signflips.size()) + ")"); + nshuffles = signflips.size(); + } else { + throw Exception ("Cannot manually provide signflips if errors are not independent and symmetric"); + } + } else if (ise) { + generate_signflips (nshuffles, rows, !is_nonstationarity); + } + + if (msg.size()) + progress.reset (new ProgressBar (msg, nshuffles)); + } + + + + + bool Shuffler::operator() (Shuffle& output) + { + output.index = counter; + if (counter + 1 >= nshuffles) { + if (progress) + progress.reset (nullptr); + counter = nshuffles; + output.data.resize (0, 0); + return false; + } + output.data = matrix_type::Zero (rows, rows); + if (permutations.size()) { + for (size_t i = 0; i != rows; ++i) + output.data (i, permutations[counter][i]) = 1.0; + } + if (signflips.size()) { + for (size_t i = 0; i != rows; ++i) { + if (signflips[counter][i]) + output.data.row (i) *= -1.0; + } + } + ++counter; + if (progress) + ++(*progress); + return true; + } + + + + bool Shuffler::is_duplicate (const PermuteLabels& v1, const PermuteLabels& v2) const + { + assert (v1.size() == v2.size()); + for (size_t i = 0; i < v1.size(); i++) { + if (v1[i] != v2[i]) + return false; + } + return true; + } + + + + bool Shuffler::is_duplicate (const PermuteLabels& perm) const + { + for (const auto p : permutations) { + if (is_duplicate (perm, p)) + return true; + } + return false; + } + + + + void Shuffler::generate_permutations (const size_t num_perms, + const size_t num_subjects, + const bool include_default) + { + permutations.clear(); + permutations.reserve (num_perms); + PermuteLabels default_labelling (num_subjects); + for (size_t i = 0; i < num_subjects; ++i) + default_labelling[i] = i; + size_t p = 0; + if (include_default) { + permutations.push_back (default_labelling); + ++p; + } + for (; p != num_perms; ++p) { + PermuteLabels permuted_labelling (default_labelling); + do { + std::random_shuffle (permuted_labelling.begin(), permuted_labelling.end()); + } while (is_duplicate (permuted_labelling)); + permutations.push_back (permuted_labelling); + } + } + + + + + void Shuffler::load_permutations (const std::string& filename) + { + vector > temp = load_matrix_2D_vector (filename); + if (!temp.size()) + throw Exception ("no data found in permutations file: " + str(filename)); + + const size_t min_value = *std::min_element (std::begin (temp[0]), std::end (temp[0])); + if (min_value > 1) + throw Exception ("indices for relabelling in permutations file must start from either 0 or 1"); + + // TODO Support transposed permutations + permutations.assign (temp[0].size(), PermuteLabels (temp.size())); + for (size_t i = 0; i != temp[0].size(); i++) { + for (size_t j = 0; j != temp.size(); j++) + permutations[i][j] = temp[j][i] - min_value; + } + } + + + + + bool Shuffler::is_duplicate (const BitSet& sign) const + { + for (const auto s : signflips) { + if (sign == s) + return true; + } + return false; + } + + + + void Shuffler::generate_signflips (const size_t num_signflips, + const size_t num_subjects, + const bool include_default) + { + signflips.clear(); + signflips.reserve (num_signflips); + size_t s = 0; + if (include_default) { + BitSet default_labelling (num_subjects, false); + signflips.push_back (default_labelling); + ++s; + } + std::random_device rd; + std::mt19937 generator (rd()); + std::uniform_int_distribution<> distribution (0, 1); + for (; s != num_signflips; ++s) { + BitSet rows_to_flip (num_subjects); + do { + // TODO Should be a faster mechanism for generating / storing random bits + for (size_t index = 0; index != num_subjects; ++index) + rows_to_flip[index] = distribution (generator); + } while (is_duplicate (rows_to_flip)); + signflips.push_back (rows_to_flip); + } + } + + + + void Shuffler::load_signflips (const std::string&) + { + // TODO + assert (0); + } + + + + + + + + + + + void statistic2pvalue (const matrix_type& null_dist, const matrix_type& stats, matrix_type& pvalues) + { + pvalues.resize (stats.rows(), stats.cols()); + for (ssize_t contrast = 0; contrast != stats.cols(); ++contrast) { + vector sorted_null_dist; + sorted_null_dist.reserve (null_dist.rows()); + for (ssize_t perm = 0; perm != null_dist.rows(); ++perm) + sorted_null_dist.push_back (null_dist(perm, contrast)); + std::sort (sorted_null_dist.begin(), sorted_null_dist.end()); + for (ssize_t element = 0; element != stats.rows(); ++element) { + if (stats(element, contrast) > 0.0) { + value_type pvalue = 1.0; + for (size_t j = 0; j < size_t(sorted_null_dist.size()); ++j) { + if (stats(element, contrast) < sorted_null_dist[j]) { + pvalue = value_type(j) / value_type(sorted_null_dist.size()); + break; + } + } + pvalues(element, contrast) = pvalue; + } else { + pvalues(element, contrast) = 0.0; + } + } + } + } + + + + + } + } +} diff --git a/core/math/stats/shuffle.h b/core/math/stats/shuffle.h new file mode 100644 index 0000000000..b7190ae0f2 --- /dev/null +++ b/core/math/stats/shuffle.h @@ -0,0 +1,121 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#ifndef __math_stats_shuffle_h__ +#define __math_stats_shuffle_h__ + +#include "app.h" +#include "progressbar.h" +#include "types.h" + +#include "misc/bitset.h" + +#include "math/stats/typedefs.h" + + +#define DEFAULT_NUMBER_SHUFFLES 5000 +#define DEFAULT_NUMBER_SHUFFLES_NONSTATIONARITY 5000 + + +namespace MR +{ + namespace Math + { + namespace Stats + { + + + + // TODO Generic command-line options: + // - Set nature of errors + // - Set number of shuffles (actual & nonstationarity correction) + // - Import permutations (actual & nonstationarity correction) + // - Import sign-flips (actual & nonstationarity correction) + // - (future) Set exchangeability blocks + + extern const char* error_types[]; + App::OptionGroup shuffle_options (const bool include_nonstationarity); + + + + class Shuffle + { NOMEMALIGN + public: + size_t index; + matrix_type data; + }; + + + + class Shuffler + { NOMEMALIGN + public: + typedef vector PermuteLabels; + + // TODO Consider alternative interface allowing class to be initialised without + // ever accessing command-line options + + Shuffler (const size_t num_subjects, bool is_nonstationarity, const std::string msg = ""); + + // Don't store the full set of shuffling matrices; + // generate each as it is required, based on the more compressed representations + bool operator() (Shuffle& output); + + size_t size() const { return nshuffles; } + + + private: + const size_t rows; + vector permutations; + vector signflips; + size_t nshuffles, counter; + std::unique_ptr progress; + + + // For generating unique permutations + bool is_duplicate (const PermuteLabels&, const PermuteLabels&) const; + bool is_duplicate (const PermuteLabels&) const; + + // Note that this function does not take into account grouping of subjects and therefore generated + // permutations are not guaranteed to be unique wrt the computed test statistic. + // Providing the number of subjects is large then the likelihood of generating duplicates is low. + void generate_permutations (const size_t num_perms, + const size_t num_subjects, + const bool include_default); + + void load_permutations (const std::string& filename); + + // Similar functions required for sign-flipping + bool is_duplicate (const BitSet&) const; + void generate_signflips (const size_t num_signflips, + const size_t num_subjects, + const bool include_default); + void load_signflips (const std::string& filename); + + }; + + + + // TODO Some of these should be vector_type's? + // - No, represent multiple contrasts + // TODO Should live elsewhere? + void statistic2pvalue (const matrix_type& null_dist, const matrix_type& stats, matrix_type& pvalues); + + + + } + } +} + +#endif diff --git a/src/stats/permstack.cpp b/src/stats/permstack.cpp deleted file mode 100644 index 89b53bd963..0000000000 --- a/src/stats/permstack.cpp +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright (c) 2008-2017 the MRtrix3 contributors - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/. - * - * MRtrix is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/. - */ - - -#include "stats/permstack.h" - -namespace MR -{ - namespace Stats - { - namespace PermTest - { - - - - PermutationStack::PermutationStack (const size_t num_permutations, const size_t num_samples, const std::string msg, const bool include_default) : - num_permutations (num_permutations), - counter (0), - progress (msg, num_permutations) - { - Math::Stats::Permutation::generate (num_permutations, num_samples, permutations, include_default); - } - - PermutationStack::PermutationStack (const vector< vector >& permutations, const std::string msg) : - num_permutations (permutations.size()), - permutations (permutations), - counter (0), - progress (msg, permutations.size()) { } - - - - bool PermutationStack::operator() (Permutation& out) - { - if (counter < num_permutations) { - out.index = counter; - out.data = permutations[counter++]; - ++progress; - return true; - } else { - out.index = num_permutations; - out.data.clear(); - return false; - } - } - - - - } - } -} diff --git a/src/stats/permstack.h b/src/stats/permstack.h deleted file mode 100644 index 252398f44b..0000000000 --- a/src/stats/permstack.h +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (c) 2008-2017 the MRtrix3 contributors - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/. - * - * MRtrix is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/. - */ - - -#ifndef __stats_permstack_h__ -#define __stats_permstack_h__ - -#include -#include -#include - -#include "progressbar.h" -#include "math/stats/permutation.h" - -namespace MR -{ - namespace Stats - { - namespace PermTest - { - - - class Permutation - { MEMALIGN (Permutation) - public: - size_t index; - vector data; - }; - - - class PermutationStack - { MEMALIGN (PermutationStack) - public: - PermutationStack (const size_t num_permutations, const size_t num_samples, const std::string msg, const bool include_default = true); - - PermutationStack (const vector< vector >& permutations, const std::string msg); - - bool operator() (Permutation&); - - const vector& operator[] (size_t index) const { - return permutations[index]; - } - - const size_t num_permutations; - - protected: - vector< vector > permutations; - size_t counter; - ProgressBar progress; - }; - - - - - } - } -} - -#endif diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index 91767584c3..73d431c5c5 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -23,39 +23,6 @@ namespace MR - const App::OptionGroup Options (const bool include_nonstationarity) - { - using namespace App; - - OptionGroup result = OptionGroup ("Options for permutation testing") - + Option ("notest", "don't perform permutation testing and only output population statistics (effect size, stdev etc)") - + Option ("nperms", "the number of permutations (Default: " + str(DEFAULT_NUMBER_PERMUTATIONS) + ")") - + Argument ("num").type_integer (1) - + Option ("permutations", "manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, " - "where each relabelling is defined as a column vector of size m, and the number of columns, n, defines " - "the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). " - "Overrides the nperms option.") - + Argument ("file").type_file_in(); - - if (include_nonstationarity) { - result - + Option ("nonstationary", "perform non-stationarity correction") - + Option ("nperms_nonstationary", "the number of permutations used when precomputing the empirical statistic image for nonstationary correction (Default: " + str(DEFAULT_NUMBER_PERMUTATIONS_NONSTATIONARITY) + ")") - + Argument ("num").type_integer (1) - + Option ("permutations_nonstationary", "manually define the permutations (relabelling) for computing the emprical statistic image for nonstationary correction. " - "The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, " - "and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM " - "(http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) " - "Overrides the nperms_nonstationary option.") - + Argument ("file").type_file_in(); - } - - - return result; - } - - - PreProcessor::PreProcessor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, matrix_type& global_enhanced_sum, @@ -88,11 +55,11 @@ namespace MR - bool PreProcessor::operator() (const Permutation& permutation) + bool PreProcessor::operator() (const Math::Stats::Shuffle& shuffle) { - if (permutation.data.empty()) + if (!shuffle.data.rows()) return false; - (*stats_calculator) (permutation.data, stats); + (*stats_calculator) (shuffle.data, stats); (*enhancer) (stats, enhanced_stats); for (ssize_t c = 0; c != enhanced_stats.rows(); ++c) { for (ssize_t i = 0; i < enhanced_stats.cols(); ++i) { @@ -146,9 +113,9 @@ namespace MR - bool Processor::operator() (const Permutation& permutation) + bool Processor::operator() (const Math::Stats::Shuffle& shuffle) { - (*stats_calculator) (permutation.data, statistics); + (*stats_calculator) (shuffle.data, statistics); if (enhancer) (*enhancer) (statistics, enhanced_statistics); else @@ -157,7 +124,7 @@ namespace MR if (empirical_enhanced_statistics.size()) enhanced_statistics.array() /= empirical_enhanced_statistics.array(); - perm_dist.row(permutation.index) = enhanced_statistics.colwise().maxCoeff(); + perm_dist.row(shuffle.index) = enhanced_statistics.colwise().maxCoeff(); for (ssize_t contrast = 0; contrast != enhanced_statistics.cols(); ++contrast) { for (ssize_t element = 0; element != enhanced_statistics.rows(); ++element) { @@ -177,12 +144,14 @@ namespace MR void precompute_empirical_stat (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - PermutationStack& perm_stack, matrix_type& empirical_statistic) + matrix_type& empirical_statistic) { + assert (stats_calculator); vector> global_enhanced_count (empirical_statistic.rows(), vector (empirical_statistic.cols(), 0)); { + Math::Stats::Shuffler shuffler (stats_calculator->num_subjects(), true, "Pre-computing empirical statistic for non-stationarity correction"); PreProcessor preprocessor (stats_calculator, enhancer, empirical_statistic, global_enhanced_count); - Thread::run_queue (perm_stack, Permutation(), Thread::multi (preprocessor)); + Thread::run_queue (shuffler, Math::Stats::Shuffle(), Thread::multi (preprocessor)); } for (ssize_t row = 0; row != empirical_statistic.rows(); ++row) { for (ssize_t i = 0; i != empirical_statistic.cols(); ++i) { @@ -201,11 +170,9 @@ namespace MR matrix_type& default_enhanced_statistics, matrix_type& default_statistics) { - vector default_labelling (stats_calculator->num_subjects()); - for (size_t i = 0; i < default_labelling.size(); ++i) - default_labelling[i] = i; - - (*stats_calculator) (default_labelling, default_statistics); + assert (stats_calculator); + const matrix_type default_shuffle (matrix_type::Identity (stats_calculator->num_subjects(), stats_calculator->num_subjects())); + (*stats_calculator) (default_shuffle, default_statistics); if (enhancer) (*enhancer) (default_statistics, default_enhanced_statistics); @@ -219,14 +186,17 @@ namespace MR - void run_permutations (PermutationStack& perm_stack, - const std::shared_ptr stats_calculator, + void run_permutations (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, matrix_type& perm_dist, matrix_type& uncorrected_pvalues) { + assert (stats_calculator); + Math::Stats::Shuffler shuffler (stats_calculator->num_subjects(), false, "Running permutations"); + perm_dist.resize (stats_calculator->num_outputs(), shuffler.size()); + uncorrected_pvalues.resize (stats_calculator->num_outputs(), stats_calculator->num_elements()); vector> global_uncorrected_pvalue_count (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)); { Processor processor (stats_calculator, enhancer, @@ -234,52 +204,18 @@ namespace MR default_enhanced_statistics, perm_dist, global_uncorrected_pvalue_count); - Thread::run_queue (perm_stack, Permutation(), Thread::multi (processor)); + Thread::run_queue (shuffler, Math::Stats::Shuffle(), Thread::multi (processor)); } for (size_t contrast = 0; contrast != stats_calculator->num_outputs(); ++contrast) { for (size_t element = 0; element != stats_calculator->num_elements(); ++element) - uncorrected_pvalues(element, contrast) = global_uncorrected_pvalue_count[contrast][element] / default_type(perm_stack.num_permutations); + uncorrected_pvalues(element, contrast) = global_uncorrected_pvalue_count[contrast][element] / default_type(shuffler.size()); } } - void run_permutations (const vector>& permutations, - const std::shared_ptr stats_calculator, - const std::shared_ptr enhancer, - const matrix_type& empirical_enhanced_statistic, - const matrix_type& default_enhanced_statistics, - matrix_type& perm_dist, - matrix_type& uncorrected_pvalues) - { - PermutationStack perm_stack (permutations, "running " + str(permutations.size()) + " permutations"); - - run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, - default_enhanced_statistics, perm_dist, uncorrected_pvalues); - } - - - - - void run_permutations (const size_t num_permutations, - const std::shared_ptr stats_calculator, - const std::shared_ptr enhancer, - const matrix_type& empirical_enhanced_statistic, - const matrix_type& default_enhanced_statistics, - matrix_type& perm_dist, - matrix_type& uncorrected_pvalues) - { - PermutationStack perm_stack (num_permutations, stats_calculator->num_subjects(), "running " + str(num_permutations) + " permutations"); - - run_permutations (perm_stack, stats_calculator, enhancer, empirical_enhanced_statistic, - default_enhanced_statistics, perm_dist, uncorrected_pvalues); - } - - - - } } } diff --git a/src/stats/permtest.h b/src/stats/permtest.h index 80a3344a3f..2c7c73f4a1 100644 --- a/src/stats/permtest.h +++ b/src/stats/permtest.h @@ -24,11 +24,10 @@ #include "thread_queue.h" #include "math/math.h" #include "math/stats/glm.h" -#include "math/stats/permutation.h" +#include "math/stats/shuffle.h" #include "math/stats/typedefs.h" #include "stats/enhance.h" -#include "stats/permstack.h" #define DEFAULT_NUMBER_PERMUTATIONS 5000 @@ -50,9 +49,6 @@ namespace MR - const App::OptionGroup Options (const bool include_nonstationarity); - - /*! A class to pre-compute the empirical enhanced statistic image for non-stationarity correction */ class PreProcessor { MEMALIGN (PreProcessor) public: @@ -63,7 +59,7 @@ namespace MR ~PreProcessor(); - bool operator() (const Permutation&); + bool operator() (const Math::Stats::Shuffle&); protected: std::shared_ptr stats_calculator; @@ -92,7 +88,7 @@ namespace MR ~Processor(); - bool operator() (const Permutation&); + bool operator() (const Math::Stats::Shuffle&); protected: std::shared_ptr stats_calculator; @@ -113,7 +109,7 @@ namespace MR // Precompute the empircal test statistic for non-stationarity adjustment void precompute_empirical_stat (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, - PermutationStack& perm_stack, matrix_type& empirical_statistic); + matrix_type& empirical_statistic); @@ -128,37 +124,13 @@ namespace MR // Functions for running a large number of permutations - // Different interfaces depending on how the permutations themselves are constructed: - // - A pre-existing permutation stack class - // - Pre-defined permutations (likely provided via a command-line option) - // - A requested number of permutations - void run_permutations (PermutationStack& perm_stack, - const std::shared_ptr stats_calculator, - const std::shared_ptr enhancer, - const matrix_type& empirical_enhanced_statistic, - const matrix_type& default_enhanced_statistics, - matrix_type& perm_dist, - matrix_type& uncorrected_pvalues); - - - void run_permutations (const vector>& permutations, - const std::shared_ptr stats_calculator, + void run_permutations (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, matrix_type& perm_dist, matrix_type& uncorrected_pvalues); - - void run_permutations (const size_t num_permutations, - const std::shared_ptr stats_calculator, - const std::shared_ptr enhancer, - const matrix_type& empirical_enhanced_statistic, - const matrix_type& default_enhanced_statistics, - matrix_type& perm_dist, - matrix_type& uncorrected_pvalues); - - //! @} } diff --git a/src/stats/tfce.h b/src/stats/tfce.h index ab76da528a..b64ec35bed 100644 --- a/src/stats/tfce.h +++ b/src/stats/tfce.h @@ -17,7 +17,6 @@ #include "thread_queue.h" #include "filter/connected_components.h" -#include "math/stats/permutation.h" #include "math/stats/typedefs.h" #include "stats/enhance.h" From 7472788a343770bfbf2739eb0a395d52062d2b30 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 4 Dec 2017 19:05:49 +1100 Subject: [PATCH 0072/1471] Stats: Unify code across commands Code related to assessing the default permutation when the -column option is used is now shared across statistical inference commands. --- cmd/connectomestats.cpp | 106 +++-------------------------------- cmd/fixelcfestats.cpp | 103 +++------------------------------- cmd/mrclusterstats.cpp | 103 ++++------------------------------ cmd/vectorstats.cpp | 103 +++------------------------------- core/math/stats/glm.cpp | 120 ++++++++++++++++++++++++++++++++++++---- core/math/stats/glm.h | 24 +++++++- 6 files changed, 166 insertions(+), 393 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 815ef9a1e1..423f9073d7 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -263,112 +263,18 @@ void run() } const bool nans_in_data = data.allFinite(); - // Construct the class for performing the initial statistical tests - std::shared_ptr glm_test; - if (extra_columns.size() || nans_in_data) { - glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); - } else { - glm_test.reset (new GLM::TestFixed (data, design, contrasts)); - } - // Only add contrast row number to image outputs if there's more than one contrast auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; { matrix_type betas (num_factors, num_edges); - // TODO Pretty sure these are transposed with respect to what I'd prefer them to be matrix_type abs_effect_size (num_contrasts, num_edges), std_effect_size (num_contrasts, num_edges); vector_type stdev (num_edges); - if (extra_columns.size()) { - - // For each variable of interest (e.g. beta coefficients, effect size etc.) need to: - // Construct the output data vector, with size = num_edges - // For each edge: - // Use glm_test to obtain the design matrix for the default permutation for that edge - // Use the relevant Math::Stats::GLM function to get the value of interest for just that edge - // (will still however need to come out as a matrix_type) - // Write that value to data vector - // Finally, write results to connectome files - class Source - { NOMEMALIGN - public: - Source (const size_t num_edges) : - num_edges (num_edges), - counter (0), - progress (new ProgressBar ("calculating basic properties of of default permutation", num_edges)) { } - - bool operator() (size_t& edge_index) - { - edge_index = counter++; - if (counter >= num_edges) { - progress.reset(); - return false; - } - assert (progress); - ++(*progress); - return true; - } - - private: - const size_t num_edges; - size_t counter; - std::unique_ptr progress; - }; - - class Functor - { MEMALIGN(Functor) - public: - Functor (const matrix_type& data, std::shared_ptr glm_test, const vector& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : - data (data), - glm_test (glm_test), - contrasts (contrasts), - global_betas (betas), - global_abs_effect_size (abs_effect_size), - global_std_effect_size (std_effect_size), - global_stdev (stdev) { } - - bool operator() (const size_t& edge_index) - { - const matrix_type data_edge = data.row (edge_index); - const matrix_type design_edge = dynamic_cast(glm_test.get())->default_design (edge_index); - Math::Stats::GLM::all_stats (data_edge, design_edge, contrasts, - local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); - global_betas.col (edge_index) = local_betas; - global_abs_effect_size.col(edge_index) = local_abs_effect_size.col(0); - global_std_effect_size.col(edge_index) = local_std_effect_size.col(0); - global_stdev[edge_index] = local_stdev[0]; - return true; - } - - private: - const matrix_type& data; - const std::shared_ptr glm_test; - const vector& contrasts; - matrix_type& global_betas; - matrix_type& global_abs_effect_size; - matrix_type& global_std_effect_size; - vector_type& global_stdev; - matrix_type local_betas, local_abs_effect_size, local_std_effect_size; - vector_type local_stdev; - }; - - Source source (num_edges); - Functor functor (data, glm_test, contrasts, - betas, abs_effect_size, std_effect_size, stdev); - Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); - - - } else { - - ProgressBar progress ("calculating basic properties of default permutation"); - Math::Stats::GLM::all_stats (data, design, contrasts, - betas, abs_effect_size, std_effect_size, stdev); - } + Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, + betas, abs_effect_size, std_effect_size, stdev); // TODO Contrasts should be somehow named, in order to differentiate between t-tests and F-tests - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); for (ssize_t i = 0; i != num_factors; ++i) { save_matrix (mat2vec.V2M (betas.row(i)), "beta" + str(i) + ".csv"); @@ -379,9 +285,15 @@ void run() save_matrix (mat2vec.V2M (std_effect_size.row(i)), "std_effect" + postfix(i) + ".csv"); ++progress; } save_matrix (mat2vec.V2M (stdev), "std_dev.csv"); - } + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); + } else { + glm_test.reset (new GLM::TestFixed (data, design, contrasts)); + } // If performing non-stationarity adjustment we need to pre-compute the empirical statistic matrix_type empirical_statistic; diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 7bc621a745..2979844d2b 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -398,15 +398,6 @@ void run() } } - - // Construct the class for performing the initial statistical tests - std::shared_ptr glm_test; - if (extra_columns.size() || nans_in_data) { - glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); - } else { - glm_test.reset (new GLM::TestFixed (data, design, contrasts)); - } - // Only add contrast row number to image outputs if there's more than one contrast auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; @@ -415,91 +406,8 @@ void run() matrix_type abs_effect_size (num_contrasts, num_fixels), std_effect_size (num_contrasts, num_fixels); vector_type stdev (num_fixels); - if (extra_columns.size()) { - - // For each variable of interest (e.g. beta coefficients, effect size etc.) need to: - // Construct the output data vector, with size = num_fixels - // For each fixel: - // Use glm_test to obtain the design matrix for the default permutation for that fixel - // Use the relevant Math::Stats::GLM function to get the value of interest for just that fixel - // (will still however need to come out as a matrix_type) - // Write that value to data vector - // Finally, use write_fixel_output() function to write to an image file - class Source - { NOMEMALIGN - public: - Source (const size_t num_fixels) : - num_fixels (num_fixels), - counter (0), - progress (new ProgressBar ("calculating basic properties of default permutation", num_fixels)) { } - - bool operator() (size_t& fixel_index) - { - fixel_index = counter++; - if (counter >= num_fixels) { - progress.reset(); - return false; - } - assert (progress); - ++(*progress); - return true; - } - - private: - const size_t num_fixels; - size_t counter; - std::unique_ptr progress; - }; - - class Functor - { MEMALIGN(Functor) - public: - Functor (const matrix_type& data, std::shared_ptr glm_test, const vector& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : - data (data), - glm_test (glm_test), - contrasts (contrasts), - global_betas (betas), - global_abs_effect_size (abs_effect_size), - global_std_effect_size (std_effect_size), - global_stdev (stdev) { } - - bool operator() (const size_t& fixel_index) - { - const matrix_type data_fixel = data.row (fixel_index); - const matrix_type design_fixel = dynamic_cast(glm_test.get())->default_design (fixel_index); - Math::Stats::GLM::all_stats (data_fixel, design_fixel, contrasts, - local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); - global_betas.col(fixel_index) = local_betas; - global_abs_effect_size.col(fixel_index) = local_abs_effect_size.col(0); - global_std_effect_size.col(fixel_index) = local_std_effect_size.col(0); - global_stdev[fixel_index] = local_stdev[0]; - return true; - } - - private: - const matrix_type& data; - const std::shared_ptr glm_test; - const vector& contrasts; - matrix_type& global_betas; - matrix_type& global_abs_effect_size; - matrix_type& global_std_effect_size; - vector_type& global_stdev; - matrix_type local_betas, local_abs_effect_size, local_std_effect_size; - vector_type local_stdev; - }; - - Source source (num_fixels); - Functor functor (data, glm_test, contrasts, - betas, abs_effect_size, std_effect_size, stdev); - Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); - - } else { - - ProgressBar progress ("calculating basic properties of default permutation"); - Math::Stats::GLM::all_stats (data, design, contrasts, - betas, abs_effect_size, std_effect_size, stdev); - } + Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, + betas, abs_effect_size, std_effect_size, stdev); ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); for (ssize_t i = 0; i != num_factors; ++i) { @@ -511,7 +419,14 @@ void run() write_fixel_output (Path::join (output_fixel_directory, "std_effect" + postfix(i) + ".mif"), std_effect_size.row(i), output_header); ++progress; } write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), stdev, output_header); + } + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); + } else { + glm_test.reset (new GLM::TestFixed (data, design, contrasts)); } // Construct the class for performing fixel-based statistical enhancement diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index eb6f8d9498..e1088e6376 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -277,14 +277,6 @@ void run() { matrix_type tvalue_output (num_contrasts, num_voxels); matrix_type empirical_enhanced_statistic; - // Construct the class for performing the initial statistical tests - std::shared_ptr glm_test; - if (extra_columns.size() || nans_in_data) { - glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); - } else { - glm_test.reset (new GLM::TestFixed (data, design, contrasts)); - } - // Only add contrast row number to image outputs if there's more than one contrast auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; @@ -293,91 +285,8 @@ void run() { matrix_type abs_effect_size (num_contrasts, num_voxels), std_effect_size (num_contrasts, num_voxels); vector_type stdev (num_voxels); - if (extra_columns.size()) { - - // For each variable of interest (e.g. beta coefficients, effect size etc.) need to: - // Construct the output data vector, with size = num_voxels - // For each voxel: - // Use glm_test to obtain the design matrix for the default permutation for that voxel - // Use the relevant Math::Stats::GLM function to get the value of interest for just that voxel - // (will still however need to come out as a matrix_type) - // Write that value to data vector - // Finally, use write_output() function to write to an image file - class Source - { NOMEMALIGN - public: - Source (const size_t num_voxels) : - num_voxels (num_voxels), - counter (0), - progress (new ProgressBar ("calculating basic properties of default permutation", num_voxels)) { } - - bool operator() (size_t& voxel_index) - { - voxel_index = counter++; - if (counter >= num_voxels) { - progress.reset(); - return false; - } - assert (progress); - ++(*progress); - return true; - } - - private: - const size_t num_voxels; - size_t counter; - std::unique_ptr progress; - }; - - class Functor - { MEMALIGN(Functor) - public: - Functor (const matrix_type& data, std::shared_ptr glm_test, const vector& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : - data (data), - glm_test (glm_test), - contrasts (contrasts), - global_betas (betas), - global_abs_effect_size (abs_effect_size), - global_std_effect_size (std_effect_size), - global_stdev (stdev) { } - - bool operator() (const size_t& voxel_index) - { - const matrix_type data_voxel = data.row (voxel_index); - const matrix_type design_voxel = dynamic_cast(glm_test.get())->default_design (voxel_index); - Math::Stats::GLM::all_stats (data_voxel, design_voxel, contrasts, - local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); - global_betas.col (voxel_index) = local_betas; - global_abs_effect_size.col(voxel_index) = local_abs_effect_size.col(0); - global_std_effect_size.col(voxel_index) = local_std_effect_size.col(0); - global_stdev[voxel_index] = local_stdev[0]; - return true; - } - - private: - const matrix_type& data; - const std::shared_ptr glm_test; - const vector& contrasts; - matrix_type& global_betas; - matrix_type& global_abs_effect_size; - matrix_type& global_std_effect_size; - vector_type& global_stdev; - matrix_type local_betas, local_abs_effect_size, local_std_effect_size; - vector_type local_stdev; - }; - - Source source (num_voxels); - Functor functor (data, glm_test, contrasts, - betas, abs_effect_size, std_effect_size, stdev); - Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); - - } else { - - ProgressBar progress ("calculating basic properties of default permutation"); - Math::Stats::GLM::all_stats (data, design, contrasts, - betas, abs_effect_size, std_effect_size, stdev); - } + Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, + betas, abs_effect_size, std_effect_size, stdev); ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); for (ssize_t i = 0; i != num_factors; ++i) { @@ -391,6 +300,14 @@ void run() { write_output (stdev, v2v, prefix + "std_dev.mif", output_header); } + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); + } else { + glm_test.reset (new GLM::TestFixed (data, design, contrasts)); + } + std::shared_ptr enhancer; if (use_tfce) { std::shared_ptr base (new Stats::Cluster::ClusterSize (connector, cluster_forming_threshold)); diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index bff08f6f81..7548ae5f00 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -181,15 +181,6 @@ void run() } } - // Construct the class for performing the initial statistical tests - std::shared_ptr glm_test; - if (extra_columns.size() || nans_in_data) { - glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); - } else { - glm_test.reset (new GLM::TestFixed (data, design, contrasts)); - } - - // Only add contrast row number to image outputs if there's more than one contrast auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; @@ -198,91 +189,8 @@ void run() matrix_type abs_effect_size (num_contrasts, num_elements), std_effect_size (num_contrasts, num_elements); vector_type stdev (num_elements); - if (extra_columns.size()) { - - // For each variable of interest (e.g. beta coefficients, effect size etc.) need to: - // Construct the output data vector, with size = num_voxels - // For each voxel: - // Use glm_test to obtain the design matrix for the default permutation for that voxel - // Use the relevant Math::Stats::GLM function to get the value of interest for just that voxel - // (will still however need to come out as a matrix_type) - // Write that value to data vector - // Finally, use write_output() function to write to an image file - class Source - { NOMEMALIGN - public: - Source (const size_t num_elements) : - num_elements (num_elements), - counter (0), - progress (new ProgressBar ("calculating basic properties of default permutation", num_elements)) { } - - bool operator() (size_t& index) - { - index = counter++; - if (counter >= num_elements) { - progress.reset(); - return false; - } - assert (progress); - ++(*progress); - return true; - } - - private: - const size_t num_elements; - size_t counter; - std::unique_ptr progress; - }; - - class Functor - { MEMALIGN(Functor) - public: - Functor (const matrix_type& data, std::shared_ptr glm_test, const vector& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : - data (data), - glm_test (glm_test), - contrasts (contrasts), - global_betas (betas), - global_abs_effect_size (abs_effect_size), - global_std_effect_size (std_effect_size), - global_stdev (stdev) { } - - bool operator() (const size_t& index) - { - const matrix_type data_element = data.row (index); - const matrix_type design_element = dynamic_cast(glm_test.get())->default_design (index); - Math::Stats::GLM::all_stats (data_element, design_element, contrasts, - local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); - global_betas.col (index) = local_betas; - global_abs_effect_size.col(index) = local_abs_effect_size.col(0); - global_std_effect_size.col(index) = local_std_effect_size.col(0); - global_stdev[index] = local_stdev[0]; - return true; - } - - private: - const matrix_type& data; - const std::shared_ptr glm_test; - const vector& contrasts; - matrix_type& global_betas; - matrix_type& global_abs_effect_size; - matrix_type& global_std_effect_size; - vector_type& global_stdev; - matrix_type local_betas, local_abs_effect_size, local_std_effect_size; - vector_type local_stdev; - }; - - Source source (num_elements); - Functor functor (data, glm_test, contrasts, - betas, abs_effect_size, std_effect_size, stdev); - Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); - - } else { - - ProgressBar progress ("calculating basic properties of default permutation"); - Math::Stats::GLM::all_stats (data, design, contrasts, - betas, abs_effect_size, std_effect_size, stdev); - } + Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, + betas, abs_effect_size, std_effect_size, stdev); ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", 2 + (2 * num_contrasts)); save_matrix (betas, output_prefix + "betas.csv"); ++progress; @@ -293,6 +201,13 @@ void run() save_vector (stdev, output_prefix + "std_dev.csv"); } + // Construct the class for performing the initial statistical tests + std::shared_ptr glm_test; + if (extra_columns.size() || nans_in_data) { + glm_test.reset (new GLM::TestVariable (extra_columns, data, design, contrasts, nans_in_data, nans_in_columns)); + } else { + glm_test.reset (new GLM::TestFixed (data, design, contrasts)); + } // Precompute default statistic // Don't use convenience function: No enhancer! diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index f42980e573..8bd1f80260 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -16,6 +16,7 @@ #include "debug.h" #include "misc/bitset.h" +#include "thread_queue.h" #define GLM_BATCH_SIZE 1024 @@ -53,7 +54,10 @@ namespace MR vector_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast) { - return matrix_type(contrast) * solve_betas (measurements, design); + if (contrast.is_F()) + return vector_type::Constant (measurements.rows(), NaN); + else + return matrix_type(contrast) * solve_betas (measurements, design); } matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) @@ -79,7 +83,10 @@ namespace MR vector_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast) { - return abs_effect_size (measurements, design, contrast).array() / stdev (measurements, design).array(); + if (contrast.is_F()) + return vector_type::Constant (measurements.rows(), NaN); + else + return abs_effect_size (measurements, design, contrast).array() / stdev (measurements, design).array(); } matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) @@ -101,38 +108,127 @@ namespace MR matrix_type& std_effect_size, vector_type& stdev) { - betas = solve_betas (measurements, design); + ProgressBar progress ("calculating basic properties of default permutation"); + betas = solve_betas (measurements, design); ++progress; std::cerr << "Betas: " << betas.rows() << " x " << betas.cols() << ", max " << betas.array().maxCoeff() << "\n"; abs_effect_size.resize (measurements.rows(), contrasts.size()); - // TESTME Surely this doesn't make sense for an F-test? for (size_t ic = 0; ic != contrasts.size(); ++ic) { if (contrasts[ic].is_F()) { - abs_effect_size.col (ic).setZero(); + abs_effect_size.col (ic).fill (NaN); } else { abs_effect_size.col (ic) = (matrix_type (contrasts[ic]) * betas).row (0); } } + ++progress; std::cerr << "abs_effect_size: " << abs_effect_size.rows() << " x " << abs_effect_size.cols() << ", max " << abs_effect_size.array().maxCoeff() << "\n"; matrix_type residuals = measurements.transpose() - design * betas; - residuals = residuals.array().pow (2.0); + residuals = residuals.array().pow (2.0); ++progress; std::cerr << "residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; matrix_type one_over_dof (1, measurements.cols()); one_over_dof.fill (1.0 / value_type(design.rows()-Math::rank (design))); std::cerr << "one_over_dof: " << one_over_dof.rows() << " x " << one_over_dof.cols() << ", max " << one_over_dof.array().maxCoeff() << "\n"; VAR (design.rows()); VAR (Math::rank (design)); - stdev = (one_over_dof * residuals).array().sqrt().row(0); + stdev = (one_over_dof * residuals).array().sqrt().row(0); ++progress; std::cerr << "stdev: " << stdev.size() << ", max " << stdev.array().maxCoeff() << "\n"; - // TODO Should be a cleaner way of doing this (broadcasting?) - matrix_type stdev_fill (abs_effect_size.rows(), abs_effect_size.cols()); - for (ssize_t i = 0; i != stdev_fill.cols(); ++i) - stdev_fill.col(i) = stdev; - std_effect_size = abs_effect_size.array() / stdev_fill.array(); + std_effect_size = abs_effect_size.array() / stdev.array(); ++progress; std::cerr << "std_effect_size: " << std_effect_size.rows() << " x " << std_effect_size.cols() << ", max " << std_effect_size.array().maxCoeff() << "\n"; } + void all_stats (const matrix_type& measurements, + const matrix_type& fixed_design, + const vector& extra_columns, + const vector& contrasts, + matrix_type& betas, + matrix_type& abs_effect_size, + matrix_type& std_effect_size, + vector_type& stdev) + { + if (extra_columns.empty()) { + all_stats (measurements, fixed_design, contrasts, betas, abs_effect_size, std_effect_size, stdev); + return; + } + + class Source + { NOMEMALIGN + public: + Source (const size_t num_elements) : + num_elements (num_elements), + counter (0), + progress (new ProgressBar ("calculating basic properties of default permutation", num_elements)) { } + bool operator() (size_t& element_index) + { + element_index = counter++; + if (counter >= num_elements) { + progress.reset(); + return false; + } + assert (progress); + ++(*progress); + return true; + } + private: + const size_t num_elements; + size_t counter; + std::unique_ptr progress; + }; + + class Functor + { MEMALIGN(Functor) + public: + Functor (const matrix_type& data, const matrix_type& design_fixed, const vector& extra_columns, const vector& contrasts, + matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : + data (data), + design_fixed (design_fixed), + extra_columns (extra_columns), + contrasts (contrasts), + global_betas (betas), + global_abs_effect_size (abs_effect_size), + global_std_effect_size (std_effect_size), + global_stdev (stdev) + { + assert (design_fixed.cols() + extra_columns.size() == contrasts[0].cols()); + } + bool operator() (const size_t& element_index) + { + const matrix_type element_data = data.row (element_index); + matrix_type element_design (design_fixed.rows(), design_fixed.cols() + extra_columns.size()); + element_design.leftCols (design_fixed.cols()) = design_fixed; + // For each element-wise design matrix column, + // acquire the data for this particular element, without permutation + for (size_t col = 0; col != extra_columns.size(); ++col) + element_design.col (design_fixed.cols() + col) = (extra_columns[col]) (element_index); + Math::Stats::GLM::all_stats (element_data, element_design, contrasts, + local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + global_betas.col (element_index) = local_betas; + global_abs_effect_size.col (element_index) = local_abs_effect_size.col(0); + global_std_effect_size.col (element_index) = local_std_effect_size.col(0); + global_stdev[element_index] = local_stdev[0]; + return true; + } + private: + const matrix_type& data; + const matrix_type& design_fixed; + const vector& extra_columns; + const vector& contrasts; + matrix_type& global_betas; + matrix_type& global_abs_effect_size; + matrix_type& global_std_effect_size; + vector_type& global_stdev; + matrix_type local_betas, local_abs_effect_size, local_std_effect_size; + vector_type local_stdev; + }; + + Source source (measurements.rows()); + Functor functor (measurements, fixed_design, extra_columns, contrasts, + betas, abs_effect_size, std_effect_size, stdev); + Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); + } + + + diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 5a79f296fc..3f9a55be25 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -109,7 +109,7 @@ namespace MR /*! Compute the effect of interest * @param measurements a matrix storing the measured data for each subject in a column * @param design the design matrix - * @param contrast a matrix defining the group difference + * @param contrast a Contrast class instance defining the contrast of interest * @return the matrix containing the output effect */ vector_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast); @@ -129,7 +129,7 @@ namespace MR /*! Compute cohen's d, the standardised effect size between two means * @param measurements a matrix storing the measured data for each subject in a column * @param design the design matrix - * @param contrast a matrix defining the group difference + * @param contrast a Contrast class instance defining the contrast of interest * @return the matrix containing the output standardised effect size */ vector_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast); @@ -138,9 +138,11 @@ namespace MR /*! Compute all GLM-related statistics + * This function can be used when the design matrix remains fixed for all + * elements to be tested. * @param measurements a matrix storing the measured data for each subject in a column * @param design the design matrix - * @param contrast a matrix defining the group difference + * @param contrasts a vector of Contrast class instances defining the contrasts of interest * @param betas the matrix containing the output GLM betas * @param abs_effect_size the matrix containing the output effect * @param std_effect_size the matrix containing the output standardised effect size @@ -148,6 +150,22 @@ namespace MR */ void all_stats (const matrix_type& measurements, const matrix_type& design, const vector& contrasts, matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev); + + /*! Compute all GLM-related statistics + * This function can be used when the design matrix varies between elements, + * due to importing external data for each element from external files + * @param measurements a matrix storing the measured data for each subject in a column + * @param design the fixed portion of the design matrix + * @param extra_columns the variable columns of the design matrix + * @param contrasts a vector of Contrast class instances defining the contrasts of interest + * @param betas the matrix containing the output GLM betas + * @param abs_effect_size the matrix containing the output effect + * @param std_effect_size the matrix containing the output standardised effect size + * @param stdev the matrix containing the output standard deviation + */ + void all_stats (const matrix_type& measurements, const matrix_type& design, const vector& extra_columns, + const vector& contrasts, matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev); + //! @} From c7c72eceb04ccbe619e647c2f3725c820be6d59d Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 4 Dec 2017 21:08:19 +1100 Subject: [PATCH 0073/1471] Stats: Standardise definition of -column option --- cmd/connectomestats.cpp | 10 +++---- cmd/fixelcfestats.cpp | 6 ++--- cmd/mrclusterstats.cpp | 6 ++--- cmd/vectorstats.cpp | 6 +---- core/math/stats/glm.cpp | 14 ++++++++++ core/math/stats/glm.h | 13 ++++++--- docs/reference/commands/connectomestats.rst | 29 +++++++++++++-------- docs/reference/commands/fixelcfestats.rst | 29 +++++++++++++-------- docs/reference/commands/mrclusterstats.rst | 29 +++++++++++++-------- docs/reference/commands/vectorstats.rst | 21 ++++++++++----- 10 files changed, 100 insertions(+), 63 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 423f9073d7..4f3c99248a 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -79,16 +79,12 @@ void usage () // TODO OptionGroup these, and provide a generic loader function + Stats::TFCE::Options (TFCE_DH_DEFAULT, TFCE_E_DEFAULT, TFCE_H_DEFAULT) + + Math::Stats::GLM::glm_options ("edge") + + OptionGroup ("Additional options for connectomestats") + Option ("threshold", "the t-statistic value to use in threshold-based clustering algorithms") - + Argument ("value").type_float (0.0) - - // TODO Generalise this across commands - + Option ("column", "add a column to the design matrix corresponding to subject edge-wise values " - "(the contrast vector length must include columns for these additions)").allow_multiple() - + Argument ("path").type_file_in(); - + + Argument ("value").type_float (0.0); REFERENCES + "* If using the NBS algorithm: \n" "Zalesky, A.; Fornito, A. & Bullmore, E. T. Network-based statistic: Identifying differences in brain networks. \n" diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 2979844d2b..3eb1395783 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -102,11 +102,9 @@ void usage () + Option ("cfe_c", "cfe connectivity exponent (default: " + str(DEFAULT_CFE_C, 2) + ")") + Argument ("value").type_float (0.0, 100.0) - + OptionGroup ("Additional options for fixelcfestats") + + Math::Stats::GLM::glm_options ("fixel") - + Option ("column", "add a column to the design matrix corresponding to subject fixel-wise values " - "(the contrast vector length must include columns for these additions)").allow_multiple() - + Argument ("path").type_file_in() + + OptionGroup ("Additional options for fixelcfestats") + Option ("smooth", "smooth the fixel value along the fibre tracts using a Gaussian kernel with the supplied FWHM (default: " + str(DEFAULT_SMOOTHING_STD, 2) + "mm)") + Argument ("FWHM").type_float (0.0, 200.0) diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index e1088e6376..71cfbf37dd 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -77,16 +77,14 @@ void usage () + Stats::TFCE::Options (DEFAULT_TFCE_DH, DEFAULT_TFCE_E, DEFAULT_TFCE_H) + + Math::Stats::GLM::glm_options ("voxel") + + OptionGroup ("Additional options for mrclusterstats") + Option ("threshold", "the cluster-forming threshold to use for a standard cluster-based analysis. " "This disables TFCE, which is the default otherwise.") + Argument ("value").type_float (1.0e-6) - + Option ("column", "add a column to the design matrix corresponding to subject voxel-wise values " - "(the contrast vector length must include columns for these additions)").allow_multiple() - + Argument ("path").type_file_in() - + Option ("connectivity", "use 26-voxel-neighbourhood connectivity (Default: 6)"); } diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 7548ae5f00..e4ed38bbdc 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -62,11 +62,7 @@ void usage () OPTIONS + Math::Stats::shuffle_options (false) - + OptionGroup ("Additional options for vectorstats") - - + Option ("column", "add a column to the design matrix corresponding to subject element-wise values " - "(the contrast vector length must include columns for these additions)").allow_multiple() - + Argument ("path").type_file_in(); + + Math::Stats::GLM::glm_options ("element"); } diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 8bd1f80260..696d37a249 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -43,6 +43,20 @@ namespace MR "The contrast matrix must also reflect the presence of this additional column."; + App::OptionGroup glm_options (const std::string& element_name) + { + using namespace App; + OptionGroup result = OptionGroup ("Options related to the General Linear Model (GLM)") + + Option ("column", "add a column to the design matrix corresponding to subject " + element_name + "-wise values " + "(note that the contrast matrix must include an additional column for each use of this option); " + "the text file provided via this option should contain a file name for each subject").allow_multiple() + + Argument ("path").type_file_in(); + return result; + } + + + + matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design) diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 3f9a55be25..cdfa9fd0ff 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -15,6 +15,8 @@ #ifndef __math_stats_glm_h__ #define __math_stats_glm_h__ +#include "app.h" + #include "math/least_squares.h" #include "math/stats/import.h" #include "math/stats/typedefs.h" @@ -29,6 +31,13 @@ namespace MR { + + extern const char* const column_ones_description; + + App::OptionGroup glm_options (const std::string& element_name); + + + // TODO Define a base class to contain information regarding an individual contrast, and // pre-compute as much as possible with regards to Freedman-Lane // Note: This can be constructed for both t-tests and F-tests @@ -91,10 +100,6 @@ namespace MR - extern const char* const column_ones_description; - - - /** \addtogroup Statistics @{ */ /*! Compute a matrix of the beta coefficients diff --git a/docs/reference/commands/connectomestats.rst b/docs/reference/commands/connectomestats.rst index bd2e444f47..505c0d2425 100644 --- a/docs/reference/commands/connectomestats.rst +++ b/docs/reference/commands/connectomestats.rst @@ -24,25 +24,29 @@ Usage Description ----------- -In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experiment al design; hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. +In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experimental design. Hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. Options ------- -Options for permutation testing -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Options relating to shuffling of data for nonparametric statistical inference +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-notest** don't perform permutation testing and only output population statistics (effect size, stdev etc) +- **-errors spec** specify nature of errors for shuffling; options are: ee,ise,both (default: ee) -- **-nperms num** the number of permutations (Default: 5000) +- **-nshuffles number** the number of shuffles (default: 5000) -- **-permutations file** manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). Overrides the nperms option. +- **-permutations file** manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). Overrides the -nshuffles option. -- **-nonstationary** perform non-stationarity correction +- **-signflips file** manually define the signflips -- **-nperms_nonstationary num** the number of permutations used when precomputing the empirical statistic image for nonstationary correction (Default: 5000) +- **-nonstationarity** perform non-stationarity correction -- **-permutations_nonstationary file** manually define the permutations (relabelling) for computing the emprical statistic image for nonstationary correction. The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) Overrides the nperms_nonstationary option. +- **-nshuffles_nonstationary number** the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: 5000) + +- **-permutations_nonstationarity file** manually define the permutations (relabelling) for computing the emprical statistics for non-stationarity correction. The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) Overrides the -nshuffles_nonstationarity option. + +- **-signflips_nonstationarity file** manually define the signflips for computing the empirical statistics for non-stationarity correction Options for controlling TFCE behaviour ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -53,13 +57,16 @@ Options for controlling TFCE behaviour - **-tfce_h value** tfce height exponent (default: 3) +Options related to the General Linear Model (GLM) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- **-column path** add a column to the design matrix corresponding to subject edge-wise values (note that the contrast matrix must include an additional column for each use of this option); the text file provided via this option should contain a file name for each subject + Additional options for connectomestats ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - **-threshold value** the t-statistic value to use in threshold-based clustering algorithms -- **-column path** add a column to the design matrix corresponding to subject edge-wise values (the contrast vector length must include columns for these additions) - Standard options ^^^^^^^^^^^^^^^^ diff --git a/docs/reference/commands/fixelcfestats.rst b/docs/reference/commands/fixelcfestats.rst index 89e3f0d722..ca44027c97 100644 --- a/docs/reference/commands/fixelcfestats.rst +++ b/docs/reference/commands/fixelcfestats.rst @@ -25,25 +25,29 @@ Usage Description ----------- -In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experiment al design; hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. +In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experimental design. Hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. Options ------- -Options for permutation testing -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Options relating to shuffling of data for nonparametric statistical inference +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-notest** don't perform permutation testing and only output population statistics (effect size, stdev etc) +- **-errors spec** specify nature of errors for shuffling; options are: ee,ise,both (default: ee) -- **-nperms num** the number of permutations (Default: 5000) +- **-nshuffles number** the number of shuffles (default: 5000) -- **-permutations file** manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). Overrides the nperms option. +- **-permutations file** manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). Overrides the -nshuffles option. -- **-nonstationary** perform non-stationarity correction +- **-signflips file** manually define the signflips -- **-nperms_nonstationary num** the number of permutations used when precomputing the empirical statistic image for nonstationary correction (Default: 5000) +- **-nonstationarity** perform non-stationarity correction -- **-permutations_nonstationary file** manually define the permutations (relabelling) for computing the emprical statistic image for nonstationary correction. The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) Overrides the nperms_nonstationary option. +- **-nshuffles_nonstationary number** the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: 5000) + +- **-permutations_nonstationarity file** manually define the permutations (relabelling) for computing the emprical statistics for non-stationarity correction. The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) Overrides the -nshuffles_nonstationarity option. + +- **-signflips_nonstationarity file** manually define the signflips for computing the empirical statistics for non-stationarity correction Parameters for the Connectivity-based Fixel Enhancement algorithm ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -56,11 +60,14 @@ Parameters for the Connectivity-based Fixel Enhancement algorithm - **-cfe_c value** cfe connectivity exponent (default: 0.5) +Options related to the General Linear Model (GLM) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- **-column path** add a column to the design matrix corresponding to subject fixel-wise values (note that the contrast matrix must include an additional column for each use of this option); the text file provided via this option should contain a file name for each subject + Additional options for fixelcfestats ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-column path** add a column to the design matrix corresponding to subject fixel-wise values (the contrast vector length must include columns for these additions) - - **-smooth FWHM** smooth the fixel value along the fibre tracts using a Gaussian kernel with the supplied FWHM (default: 10mm) - **-connectivity threshold** a threshold to define the required fraction of shared connections to be included in the neighbourhood (default: 0.01) diff --git a/docs/reference/commands/mrclusterstats.rst b/docs/reference/commands/mrclusterstats.rst index 4ab07c910e..5cb911fe94 100644 --- a/docs/reference/commands/mrclusterstats.rst +++ b/docs/reference/commands/mrclusterstats.rst @@ -24,25 +24,29 @@ Usage Description ----------- -In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experiment al design; hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. +In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experimental design. Hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. Options ------- -Options for permutation testing -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Options relating to shuffling of data for nonparametric statistical inference +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-notest** don't perform permutation testing and only output population statistics (effect size, stdev etc) +- **-errors spec** specify nature of errors for shuffling; options are: ee,ise,both (default: ee) -- **-nperms num** the number of permutations (Default: 5000) +- **-nshuffles number** the number of shuffles (default: 5000) -- **-permutations file** manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). Overrides the nperms option. +- **-permutations file** manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). Overrides the -nshuffles option. -- **-nonstationary** perform non-stationarity correction +- **-signflips file** manually define the signflips -- **-nperms_nonstationary num** the number of permutations used when precomputing the empirical statistic image for nonstationary correction (Default: 5000) +- **-nonstationarity** perform non-stationarity correction -- **-permutations_nonstationary file** manually define the permutations (relabelling) for computing the emprical statistic image for nonstationary correction. The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) Overrides the nperms_nonstationary option. +- **-nshuffles_nonstationary number** the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: 5000) + +- **-permutations_nonstationarity file** manually define the permutations (relabelling) for computing the emprical statistics for non-stationarity correction. The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) Overrides the -nshuffles_nonstationarity option. + +- **-signflips_nonstationarity file** manually define the signflips for computing the empirical statistics for non-stationarity correction Options for controlling TFCE behaviour ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -53,13 +57,16 @@ Options for controlling TFCE behaviour - **-tfce_h value** tfce height exponent (default: 2) +Options related to the General Linear Model (GLM) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- **-column path** add a column to the design matrix corresponding to subject voxel-wise values (note that the contrast matrix must include an additional column for each use of this option); the text file provided via this option should contain a file name for each subject + Additional options for mrclusterstats ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - **-threshold value** the cluster-forming threshold to use for a standard cluster-based analysis. This disables TFCE, which is the default otherwise. -- **-column path** add a column to the design matrix corresponding to subject voxel-wise values (the contrast vector length must include columns for these additions) - - **-connectivity** use 26-voxel-neighbourhood connectivity (Default: 6) Standard options diff --git a/docs/reference/commands/vectorstats.rst b/docs/reference/commands/vectorstats.rst index 55cfd7c0d1..5847f50aa5 100644 --- a/docs/reference/commands/vectorstats.rst +++ b/docs/reference/commands/vectorstats.rst @@ -23,19 +23,28 @@ Usage Description ----------- -In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experiment al design; hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. +This command can be used to perform permutation testing of any form of data. The data for each input subject must be stored in a text file, with one value per row. The data for each row across subjects will be tested independently, i.e. there is no statistical enhancement that occurs between the data; however family-wise error control will be used. + +In some software packages, a column of ones is automatically added to the GLM design matrix; the purpose of this column is to estimate the "global intercept", which is the predicted value of the observed variable if all explanatory variables were to be zero. However there are rare situations where including such a column would not be appropriate for a particular experimental design. Hence, in MRtrix3 statistical inference commands, it is up to the user to determine whether or not this column of ones should be included in their design matrix, and add it explicitly if necessary. The contrast matrix must also reflect the presence of this additional column. Options ------- -Options for permutation testing -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Options relating to shuffling of data for nonparametric statistical inference +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- **-errors spec** specify nature of errors for shuffling; options are: ee,ise,both (default: ee) + +- **-nshuffles number** the number of shuffles (default: 5000) + +- **-permutations file** manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). Overrides the -nshuffles option. -- **-notest** don't perform permutation testing and only output population statistics (effect size, stdev etc) +- **-signflips file** manually define the signflips -- **-nperms num** the number of permutations (Default: 5000) +Options related to the General Linear Model (GLM) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-permutations file** manually define the permutations (relabelling). The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM). Overrides the nperms option. +- **-column path** add a column to the design matrix corresponding to subject element-wise values (note that the contrast matrix must include an additional column for each use of this option); the text file provided via this option should contain a file name for each subject Standard options ^^^^^^^^^^^^^^^^ From 44776387e084c985823f5fbb28da429f1f4b8719 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 4 Dec 2017 22:15:01 +1100 Subject: [PATCH 0074/1471] Stats commands: New options -ftests, -fonly * New option -ftests: Provide a matrix containing zeros and ones that specifies a set of F-tests, where each F-test includes some subset of the rows in the contrast matrix. * New option -fonly: Only perform F-tests. Once the F-tests have been constructed based on selecting rows in the contrast matrix, discard the t-tests constructed from the contrast matrix such that statistical testing is not performed and outputs are not generated. --- cmd/connectomestats.cpp | 16 +++---- cmd/fixelcfestats.cpp | 10 ++--- cmd/mrclusterstats.cpp | 12 ++--- cmd/vectorstats.cpp | 13 ++---- core/math/stats/glm.cpp | 49 +++++++++++++++++++++ core/math/stats/glm.h | 6 +++ docs/reference/commands/connectomestats.rst | 4 ++ docs/reference/commands/fixelcfestats.rst | 4 ++ docs/reference/commands/mrclusterstats.rst | 4 ++ docs/reference/commands/vectorstats.rst | 4 ++ 10 files changed, 87 insertions(+), 35 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 4f3c99248a..aca42e98b9 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -206,19 +206,13 @@ void run() if (size_t(design.rows()) != importer.size()) throw Exception ("number of subjects (" + str(importer.size()) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); - // Load contrast matrix - // TODO Eventually this should be functionalised, and include F-tests - // TODO Eventually will want ability to disable t-test output, and output F-tests only - vector contrasts; - { - const matrix_type contrast_matrix = load_matrix (argument[3]); - for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) - contrasts.emplace_back (Contrast (contrast_matrix.row (row))); - } + // Load contrasts + const vector contrasts = Math::Stats::GLM::load_contrasts (argument[3]); const size_t num_contrasts = contrasts.size(); + CONSOLE ("Number of contrasts: " + str(num_contrasts)); - // Before validating the contrast matrix, we first need to see if there are any - // additional design matrix columns coming from fixel-wise subject data + // Before validating the contrasts, we first need to see if there are any + // additional design matrix columns coming from edge-wise subject data vector extra_columns; bool nans_in_columns = false; auto opt = get_options ("column"); diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 3eb1395783..c63d29dad1 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -234,14 +234,10 @@ void run() if (design.rows() != (ssize_t)importer.size()) throw Exception ("number of input files does not match number of rows in design matrix"); - // Load contrast matrix - vector contrasts; - { - const matrix_type contrast_matrix = load_matrix (argument[3]); - for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) - contrasts.emplace_back (Contrast (contrast_matrix.row (row))); - } + // Load contrasts + const vector contrasts = Math::Stats::GLM::load_contrasts (argument[3]); const size_t num_contrasts = contrasts.size(); + CONSOLE ("Number of contrasts: " + str(num_contrasts)); // Before validating the contrast matrix, we first need to see if there are any // additional design matrix columns coming from fixel-wise subject data diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 71cfbf37dd..50df7f13da 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -204,16 +204,12 @@ void run() { if (design.rows() != (ssize_t)importer.size()) throw Exception ("number of input files does not match number of rows in design matrix"); - // Load contrast matrix - vector contrasts; - { - const matrix_type contrast_matrix = load_matrix (argument[2]); - for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) - contrasts.emplace_back (Contrast (contrast_matrix.row (row))); - } + // Load contrasts + const vector contrasts = Math::Stats::GLM::load_contrasts (argument[2]); const size_t num_contrasts = contrasts.size(); + CONSOLE ("Number of contrasts: " + str(num_contrasts)); - // Before validating the contrast matrix, we first need to see if there are any + // Before validating the contrasts, we first need to see if there are any // additional design matrix columns coming from voxel-wise subject data // TODO Functionalise this vector extra_columns; diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index e4ed38bbdc..ea2ad5a81f 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -128,18 +128,13 @@ void run() if (size_t(design.rows()) != num_subjects) throw Exception ("Number of subjects (" + str(num_subjects) + ") does not match number of rows in design matrix (" + str(design.rows()) + ")"); - // Load contrast matrix - vector contrasts; - { - const matrix_type contrast_matrix = load_matrix (argument[2]); - for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) - contrasts.emplace_back (Contrast (contrast_matrix.row (row))); - } + // Load contrasts + const vector contrasts = Math::Stats::GLM::load_contrasts (argument[2]); const size_t num_contrasts = contrasts.size(); CONSOLE ("Number of contrasts: " + str(num_contrasts)); - // Before validating the contrast matrix, we first need to see if there are any - // additional design matrix columns coming from voxel-wise subject data + // Before validating the contrasts, we first need to see if there are any + // additional design matrix columns coming from element-wise subject data vector extra_columns; bool nans_in_columns = false; auto opt = get_options ("column"); diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 696d37a249..32cde41e8f 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -43,20 +43,69 @@ namespace MR "The contrast matrix must also reflect the presence of this additional column."; + App::OptionGroup glm_options (const std::string& element_name) { using namespace App; OptionGroup result = OptionGroup ("Options related to the General Linear Model (GLM)") + + + Option ("ftests", "perform F-tests; input text file should contain, for each F-test, a column containing " + "ones and zeros, where ones indicate those rows of the contrast matrix to be included " + "in the F-test.") + + Argument ("path").type_file_in() + + + Option ("fonly", "only assess F-tests; do not perform statistical inference on entries in the contrast matrix") + + Option ("column", "add a column to the design matrix corresponding to subject " + element_name + "-wise values " "(note that the contrast matrix must include an additional column for each use of this option); " "the text file provided via this option should contain a file name for each subject").allow_multiple() + Argument ("path").type_file_in(); + return result; } + vector load_contrasts (const std::string& file_path) + { + vector contrasts; + const matrix_type contrast_matrix = load_matrix (file_path); + for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) + contrasts.emplace_back (Contrast (contrast_matrix.row (row))); + auto opt = App::get_options ("ftests"); + if (opt.size()) { + const matrix_type ftest_matrix = load_matrix (opt[0][0]); + if (ftest_matrix.rows() != contrast_matrix.rows()) + throw Exception ("Number of rows in F-test matrix (" + str(ftest_matrix.rows()) + ") does not match number of rows in contrast matrix (" + str(contrast_matrix.rows()) + ")"); + if (!((ftest_matrix.array() == 0.0) + (ftest_matrix.array() == 1.0)).all()) + throw Exception ("F-test array must contain ones and zeros only"); + for (ssize_t ftest_index = 0; ftest_index != ftest_matrix.cols(); ++ftest_index) { + if (!ftest_matrix.col (ftest_index).count()) + throw Exception ("Column " + sstr(ftest_index+1) + " of F-test matrix does not contain any ones"); + matrix_type this_f_matrix (ftest_matrix.col (ftest_index).count(), contrast_matrix.cols()); + ssize_t ftest_row = 0; + for (ssize_t contrast_row = 0; contrast_row != contrast_matrix.rows(); ++contrast_row) { + if (ftest_matrix (contrast_row, ftest_index)) + this_f_matrix.row (ftest_row++) = contrast_matrix.row (contrast_row); + } + contrasts.emplace_back (Contrast (this_f_matrix)); + } + if (App::get_options ("fonly").size()) { + vector new_contrasts; + for (size_t index = contrast_matrix.rows(); index != contrasts.size(); ++index) + new_contrasts.push_back (std::move (contrasts[index])); + std::swap (contrasts, new_contrasts); + } + } else if (App::get_options ("fonly").size()) { + throw Exception ("Cannot perform F-tests exclusively: No F-test matrix was provided"); + } + return contrasts; + } + + + + matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design) diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index cdfa9fd0ff..22d6c88cd4 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -16,6 +16,7 @@ #define __math_stats_glm_h__ #include "app.h" +#include "types.h" #include "math/least_squares.h" #include "math/stats/import.h" @@ -100,6 +101,11 @@ namespace MR + + vector load_contrasts (const std::string& file_path); + + + /** \addtogroup Statistics @{ */ /*! Compute a matrix of the beta coefficients diff --git a/docs/reference/commands/connectomestats.rst b/docs/reference/commands/connectomestats.rst index 505c0d2425..367be0a0d4 100644 --- a/docs/reference/commands/connectomestats.rst +++ b/docs/reference/commands/connectomestats.rst @@ -60,6 +60,10 @@ Options for controlling TFCE behaviour Options related to the General Linear Model (GLM) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +- **-ftests path** perform F-tests; input text file should contain, for each F-test, a column containing ones and zeros, where ones indicate those rows of the contrast matrix to be included in the F-test. + +- **-fonly** only assess F-tests; do not perform statistical inference on entries in the contrast matrix + - **-column path** add a column to the design matrix corresponding to subject edge-wise values (note that the contrast matrix must include an additional column for each use of this option); the text file provided via this option should contain a file name for each subject Additional options for connectomestats diff --git a/docs/reference/commands/fixelcfestats.rst b/docs/reference/commands/fixelcfestats.rst index ca44027c97..4e9bd62999 100644 --- a/docs/reference/commands/fixelcfestats.rst +++ b/docs/reference/commands/fixelcfestats.rst @@ -63,6 +63,10 @@ Parameters for the Connectivity-based Fixel Enhancement algorithm Options related to the General Linear Model (GLM) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +- **-ftests path** perform F-tests; input text file should contain, for each F-test, a column containing ones and zeros, where ones indicate those rows of the contrast matrix to be included in the F-test. + +- **-fonly** only assess F-tests; do not perform statistical inference on entries in the contrast matrix + - **-column path** add a column to the design matrix corresponding to subject fixel-wise values (note that the contrast matrix must include an additional column for each use of this option); the text file provided via this option should contain a file name for each subject Additional options for fixelcfestats diff --git a/docs/reference/commands/mrclusterstats.rst b/docs/reference/commands/mrclusterstats.rst index 5cb911fe94..138134afd6 100644 --- a/docs/reference/commands/mrclusterstats.rst +++ b/docs/reference/commands/mrclusterstats.rst @@ -60,6 +60,10 @@ Options for controlling TFCE behaviour Options related to the General Linear Model (GLM) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +- **-ftests path** perform F-tests; input text file should contain, for each F-test, a column containing ones and zeros, where ones indicate those rows of the contrast matrix to be included in the F-test. + +- **-fonly** only assess F-tests; do not perform statistical inference on entries in the contrast matrix + - **-column path** add a column to the design matrix corresponding to subject voxel-wise values (note that the contrast matrix must include an additional column for each use of this option); the text file provided via this option should contain a file name for each subject Additional options for mrclusterstats diff --git a/docs/reference/commands/vectorstats.rst b/docs/reference/commands/vectorstats.rst index 5847f50aa5..7a1245c196 100644 --- a/docs/reference/commands/vectorstats.rst +++ b/docs/reference/commands/vectorstats.rst @@ -44,6 +44,10 @@ Options relating to shuffling of data for nonparametric statistical inference Options related to the General Linear Model (GLM) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +- **-ftests path** perform F-tests; input text file should contain, for each F-test, a column containing ones and zeros, where ones indicate those rows of the contrast matrix to be included in the F-test. + +- **-fonly** only assess F-tests; do not perform statistical inference on entries in the contrast matrix + - **-column path** add a column to the design matrix corresponding to subject element-wise values (note that the contrast matrix must include an additional column for each use of this option); the text file provided via this option should contain a file name for each subject Standard options From e96f7db8a193cd3d36a4ebfbb426976365ea2c8b Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 6 Dec 2017 12:36:13 +1100 Subject: [PATCH 0075/1471] Many updates as part of GLM upgrades - Data measurements are now transposed with respect to the previous behaviour: Data elements across columns, and subjects across rows. This means that all subject data for any particular element being tested are stored within a single column, which is consistent with Eigen's default column-major storage. This also removes many uses of the .transpose() function that were previously required. - Each contrast is given a unique name, in order to separate t-test and F-test file outputs more clearly. - abs_effect and std_effect outputs are skipped for F-tests, as these do not make sense. - Fixes and general neatening of code for generating default permutation statistics. - Some neatening and fixing of GLM::TestFixed functor to work with F-tests. Note however that rank-deficient F-tests will currently cause an error. - Fix generation of sign-flipping matrices under assumption of independent and symmetric errors; one-sample t-tests now appear to be working. - Perform vectorstats tests; note that the generation of test data, and subsequent testing of vectorstats output data, are done by scripts stored in the testing data repository, in order for the tests themselves to each execute via a single line of bash. --- cmd/connectomestats.cpp | 22 ++--- cmd/fixelcfestats.cpp | 30 +++---- cmd/mrclusterstats.cpp | 32 ++++--- cmd/vectorstats.cpp | 20 +++-- core/math/stats/glm.cpp | 163 ++++++++++++++++++------------------ core/math/stats/glm.h | 40 ++++----- core/math/stats/import.h | 12 +-- core/math/stats/shuffle.cpp | 15 +++- src/stats/permtest.cpp | 5 +- testing/data | 2 +- testing/tests/vectorstats | 42 +--------- 11 files changed, 177 insertions(+), 206 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index aca42e98b9..f860559acc 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -131,10 +131,10 @@ class SubjectConnectomeImport : public SubjectDataImportBase mat2vec.M2V (M, data); } - void operator() (matrix_type::ColXpr column) const override + void operator() (matrix_type::RowXpr row) const override { - assert (column.rows() == data.size()); - column = data; + assert (row.size() == data.size()); + row = data; } default_type operator[] (const size_t index) const override @@ -243,22 +243,22 @@ void run() // For compatibility with existing statistics code, symmetric matrix data is adjusted // into vector form - one row per edge in the symmetric connectome. This has already // been performed when the CohortDataImport class is initialised. - matrix_type data (num_edges, importer.size()); + matrix_type data (importer.size(), num_edges); { ProgressBar progress ("Agglomerating input connectome data", importer.size()); for (size_t subject = 0; subject < importer.size(); subject++) { - (*importer[subject]) (data.col (subject)); + (*importer[subject]) (data.row (subject)); ++progress; } } const bool nans_in_data = data.allFinite(); // Only add contrast row number to image outputs if there's more than one contrast - auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; + auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + contrasts[i].name()) : ""; }; { matrix_type betas (num_factors, num_edges); - matrix_type abs_effect_size (num_contrasts, num_edges), std_effect_size (num_contrasts, num_edges); + matrix_type abs_effect_size (num_edges, num_contrasts), std_effect_size (num_edges, num_contrasts); vector_type stdev (num_edges); Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, @@ -271,8 +271,10 @@ void run() ++progress; } for (size_t i = 0; i != num_contrasts; ++i) { - save_matrix (mat2vec.V2M (abs_effect_size.row(i)), "abs_effect" + postfix(i) + ".csv"); ++progress; - save_matrix (mat2vec.V2M (std_effect_size.row(i)), "std_effect" + postfix(i) + ".csv"); ++progress; + if (!contrasts[i].is_F()) { + save_matrix (mat2vec.V2M (abs_effect_size.row(i)), "abs_effect" + postfix(i) + ".csv"); ++progress; + save_matrix (mat2vec.V2M (std_effect_size.row(i)), "std_effect" + postfix(i) + ".csv"); ++progress; + } } save_matrix (mat2vec.V2M (stdev), "std_dev.csv"); } @@ -300,7 +302,7 @@ void run() Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_statistic, enhanced_output, tvalue_output); for (size_t i = 0; i != num_contrasts; ++i) { - save_matrix (mat2vec.V2M (tvalue_output.row(i)), output_prefix + "_tvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (tvalue_output.row(i)), output_prefix + "_" + (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".csv"); save_matrix (mat2vec.V2M (enhanced_output.row(i)), output_prefix + "_enhanced" + postfix(i) + ".csv"); } diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index c63d29dad1..3043ee2085 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -150,11 +150,11 @@ class SubjectFixelImport : public SubjectDataImportBase } } - void operator() (matrix_type::ColXpr column) const override + void operator() (matrix_type::RowXpr row) const override { - assert (column.rows() == size()); + assert (row.size() == size()); Image temp (data); // For thread-safety - column = temp.row(0); + row = temp.row(0); } default_type operator[] (const size_t index) const override @@ -353,21 +353,21 @@ void run() // Load input data - matrix_type data = matrix_type::Zero (num_fixels, importer.size()); + matrix_type data = matrix_type::Zero (importer.size(), num_fixels); bool nans_in_data = false; { ProgressBar progress ("loading input images", importer.size()); for (size_t subject = 0; subject < importer.size(); subject++) { - (*importer[subject]) (data.col (subject)); + (*importer[subject]) (data.row (subject)); // Smooth the data vector_type smoothed_data (vector_type::Zero (num_fixels)); for (size_t fixel = 0; fixel < num_fixels; ++fixel) { - if (std::isfinite (data (fixel, subject))) { + if (std::isfinite (data (subject, fixel))) { value_type value = 0.0, sum_weights = 0.0; std::map::const_iterator it = smoothing_weights[fixel].begin(); for (; it != smoothing_weights[fixel].end(); ++it) { - if (std::isfinite (data (it->first, subject))) { - value += data (it->first, subject) * it->second; + if (std::isfinite (data (subject, it->first))) { + value += data (subject, it->first) * it->second; sum_weights += it->second; } } @@ -379,7 +379,7 @@ void run() smoothed_data (fixel) = NaN; } } - data.col (subject) = smoothed_data; + data.row (subject) = smoothed_data; if (!smoothed_data.allFinite()) nans_in_data = true; } @@ -393,11 +393,11 @@ void run() } // Only add contrast row number to image outputs if there's more than one contrast - auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; + auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + contrasts[i].name()) : ""; }; { matrix_type betas (num_factors, num_fixels); - matrix_type abs_effect_size (num_contrasts, num_fixels), std_effect_size (num_contrasts, num_fixels); + matrix_type abs_effect_size (num_fixels, num_contrasts), std_effect_size (num_fixels, num_contrasts); vector_type stdev (num_fixels); Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, @@ -409,8 +409,10 @@ void run() ++progress; } for (size_t i = 0; i != num_contrasts; ++i) { - write_fixel_output (Path::join (output_fixel_directory, "abs_effect" + postfix(i) + ".mif"), abs_effect_size.row(i), output_header); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "std_effect" + postfix(i) + ".mif"), std_effect_size.row(i), output_header); ++progress; + if (!contrasts[i].is_F()) { + write_fixel_output (Path::join (output_fixel_directory, "abs_effect" + postfix(i) + ".mif"), abs_effect_size.row(i), output_header); ++progress; + write_fixel_output (Path::join (output_fixel_directory, "std_effect" + postfix(i) + ".mif"), std_effect_size.row(i), output_header); ++progress; + } } write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), stdev, output_header); } @@ -445,7 +447,7 @@ void run() for (size_t i = 0; i != num_contrasts; ++i) { write_fixel_output (Path::join (output_fixel_directory, "cfe" + postfix(i) + ".mif"), cfe_output.row(i), output_header); - write_fixel_output (Path::join (output_fixel_directory, "tvalue" + postfix(i) + ".mif"), tvalue_output.row(i), output_header); + write_fixel_output (Path::join (output_fixel_directory, (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".mif"), tvalue_output.row(i), output_header); } // Perform permutation testing diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 50df7f13da..bf268312b8 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -130,14 +130,14 @@ class SubjectVoxelImport : public SubjectDataImportBase H (Header::open (path)), data (H.get_image()) { } - void operator() (matrix_type::ColXpr column) const override + void operator() (matrix_type::RowXpr row) const override { assert (v2v); - assert (column.rows() == size()); + assert (row.size() == size()); Image temp (data); // For thread-safety for (size_t i = 0; i != size(); ++i) { assign_pos_of ((*v2v)[i]).to (temp); - column[i] = temp.value(); + row[i] = temp.value(); } } @@ -233,18 +233,16 @@ void run() { + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); - matrix_type data (num_voxels, importer.size()); - bool nans_in_data = false; + matrix_type data (importer.size(), num_voxels); { // Load images ProgressBar progress ("loading input images", importer.size()); for (size_t subject = 0; subject < importer.size(); subject++) { - (*importer[subject]) (data.col (subject)); - if (!data.col (subject).allFinite()) - nans_in_data = true; + (*importer[subject]) (data.row (subject)); progress++; } } + const bool nans_in_data = !data.allFinite(); if (nans_in_data) { INFO ("Non-finite values present in data; rows will be removed from voxel-wise design matrices accordingly"); if (!extra_columns.size()) { @@ -267,16 +265,12 @@ void run() { const std::string prefix (argument[4]); - matrix_type default_cluster_output (num_contrasts, num_voxels); - matrix_type tvalue_output (num_contrasts, num_voxels); - matrix_type empirical_enhanced_statistic; - // Only add contrast row number to image outputs if there's more than one contrast - auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; + auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + contrasts[i].name()) : ""; }; { - matrix_type betas (num_contrasts, num_voxels); - matrix_type abs_effect_size (num_contrasts, num_voxels), std_effect_size (num_contrasts, num_voxels); + matrix_type betas (num_factors, num_voxels); + matrix_type abs_effect_size (num_voxels, num_contrasts), std_effect_size (num_voxels, num_contrasts); vector_type stdev (num_voxels); Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, @@ -288,8 +282,10 @@ void run() { ++progress; } for (size_t i = 0; i != num_contrasts; ++i) { - write_output (abs_effect_size.row(i), v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); ++progress; - write_output (std_effect_size.row(i), v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); ++progress; + if (!contrasts[i].is_F()) { + write_output (abs_effect_size.row(i), v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); ++progress; + write_output (std_effect_size.row(i), v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); ++progress; + } } write_output (stdev, v2v, prefix + "std_dev.mif", output_header); } @@ -310,6 +306,7 @@ void run() { enhancer.reset (new Stats::Cluster::ClusterSize (connector, cluster_forming_threshold)); } + matrix_type empirical_enhanced_statistic; if (do_nonstationary_adjustment) { if (!use_tfce) throw Exception ("nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); @@ -321,6 +318,7 @@ void run() { if (!get_options ("notest").size()) { matrix_type perm_distribution, uncorrected_pvalue; + matrix_type default_cluster_output (num_contrasts, num_voxels); Stats::PermTest::run_permutations (glm_test, enhancer, empirical_enhanced_statistic, default_cluster_output, perm_distribution, uncorrected_pvalue); diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index ea2ad5a81f..f9a64129fc 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -88,10 +88,10 @@ class SubjectVectorImport : public SubjectDataImportBase SubjectDataImportBase (path), data (load_vector (path)) { } - void operator() (matrix_type::ColXpr column) const override + void operator() (matrix_type::RowXpr row) const override { - assert (column.rows() == size()); - column = data; + assert (row.size() == size()); + row = data; } default_type operator[] (const size_t index) const override @@ -160,9 +160,9 @@ void run() const std::string output_prefix = argument[3]; // Load input data - matrix_type data (num_elements, num_subjects); + matrix_type data (num_subjects, num_elements); for (size_t subject = 0; subject != num_subjects; subject++) - (*importer[subject]) (data.col(subject)); + (*importer[subject]) (data.row(subject)); const bool nans_in_data = !data.allFinite(); if (nans_in_data) { @@ -173,7 +173,7 @@ void run() } // Only add contrast row number to image outputs if there's more than one contrast - auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + str(i)) : ""; }; + auto postfix = [&] (const size_t i) { return (num_contrasts > 1) ? ("_" + contrasts[i].name()) : ""; }; { matrix_type betas (num_factors, num_elements); @@ -186,8 +186,10 @@ void run() ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", 2 + (2 * num_contrasts)); save_matrix (betas, output_prefix + "betas.csv"); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { - save_vector (abs_effect_size.col(i), output_prefix + "abs_effect" + postfix(i) + ".csv"); ++progress; - save_vector (std_effect_size.col(i), output_prefix + "std_effect" + postfix(i) + ".csv"); ++progress; + if (!contrasts[i].is_F()) { + save_vector (abs_effect_size.col(i), output_prefix + "abs_effect" + postfix(i) + ".csv"); ++progress; + save_vector (std_effect_size.col(i), output_prefix + "std_effect" + postfix(i) + ".csv"); ++progress; + } } save_vector (stdev, output_prefix + "std_dev.csv"); } @@ -207,7 +209,7 @@ void run() matrix_type default_tvalues; (*glm_test) (default_shuffle, default_tvalues); for (size_t i = 0; i != num_contrasts; ++i) - save_matrix (default_tvalues.col(i), output_prefix + "tvalue" + postfix(i) + ".csv"); + save_matrix (default_tvalues.col(i), output_prefix + (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".csv"); // Perform permutation testing if (!get_options ("notest").size()) { diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 32cde41e8f..f8bc487718 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -72,7 +72,7 @@ namespace MR vector contrasts; const matrix_type contrast_matrix = load_matrix (file_path); for (ssize_t row = 0; row != contrast_matrix.rows(); ++row) - contrasts.emplace_back (Contrast (contrast_matrix.row (row))); + contrasts.emplace_back (Contrast (contrast_matrix.row (row), row)); auto opt = App::get_options ("ftests"); if (opt.size()) { const matrix_type ftest_matrix = load_matrix (opt[0][0]); @@ -82,14 +82,14 @@ namespace MR throw Exception ("F-test array must contain ones and zeros only"); for (ssize_t ftest_index = 0; ftest_index != ftest_matrix.cols(); ++ftest_index) { if (!ftest_matrix.col (ftest_index).count()) - throw Exception ("Column " + sstr(ftest_index+1) + " of F-test matrix does not contain any ones"); + throw Exception ("Column " + str(ftest_index+1) + " of F-test matrix does not contain any ones"); matrix_type this_f_matrix (ftest_matrix.col (ftest_index).count(), contrast_matrix.cols()); ssize_t ftest_row = 0; for (ssize_t contrast_row = 0; contrast_row != contrast_matrix.rows(); ++contrast_row) { if (ftest_matrix (contrast_row, ftest_index)) this_f_matrix.row (ftest_row++) = contrast_matrix.row (contrast_row); } - contrasts.emplace_back (Contrast (this_f_matrix)); + contrasts.emplace_back (Contrast (this_f_matrix, ftest_index)); } if (App::get_options ("fonly").size()) { vector new_contrasts; @@ -98,7 +98,7 @@ namespace MR std::swap (contrasts, new_contrasts); } } else if (App::get_options ("fonly").size()) { - throw Exception ("Cannot perform F-tests exclusively: No F-test matrix was provided"); + throw Exception ("Cannot perform F-tests exclusively (-fonly option): No F-test matrix was provided (-ftests option)"); } return contrasts; } @@ -110,7 +110,7 @@ namespace MR matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design) { - return design.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(measurements.transpose()); + return design.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(measurements); } @@ -125,7 +125,7 @@ namespace MR matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) { - matrix_type result (measurements.rows(), contrasts.size()); + matrix_type result (measurements.cols(), contrasts.size()); for (size_t ic = 0; ic != contrasts.size(); ++ic) result.col (ic) = abs_effect_size (measurements, design, contrasts[ic]); return result; @@ -135,11 +135,8 @@ namespace MR vector_type stdev (const matrix_type& measurements, const matrix_type& design) { - matrix_type residuals = measurements.transpose() - design * solve_betas (measurements, design); - residuals = residuals.array().pow (2.0); - matrix_type one_over_dof (1, measurements.cols()); - one_over_dof.fill (1.0 / value_type(design.rows()-Math::rank (design))); - return (one_over_dof * residuals).array().sqrt(); + const vector_type sse = (measurements - design * solve_betas (measurements, design)).colwise().squaredNorm(); + return (sse / value_type(design.rows()-Math::rank (design))).sqrt(); } @@ -147,15 +144,15 @@ namespace MR vector_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast) { if (contrast.is_F()) - return vector_type::Constant (measurements.rows(), NaN); + return vector_type::Constant (measurements.cols(), NaN); else - return abs_effect_size (measurements, design, contrast).array() / stdev (measurements, design).array(); + return abs_effect_size (measurements, design, contrast).array() / stdev (measurements, design); } matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) { - const auto stdev_reciprocal = vector_type::Ones (measurements.rows()).array() / stdev (measurements, design).array(); - matrix_type result (measurements.rows(), contrasts.size()); + const auto stdev_reciprocal = vector_type::Ones (measurements.cols()) / stdev (measurements, design); + matrix_type result (measurements.cols(), contrasts.size()); for (size_t ic = 0; ic != contrasts.size(); ++ic) result.col (ic) = abs_effect_size (measurements, design, contrasts[ic]) * stdev_reciprocal; return result; @@ -163,6 +160,8 @@ namespace MR +#define GLM_ALL_STATS_DEBUG + void all_stats (const matrix_type& measurements, const matrix_type& design, const vector& contrasts, @@ -171,10 +170,16 @@ namespace MR matrix_type& std_effect_size, vector_type& stdev) { +#ifndef GLM_ALL_STATS_DEBUG ProgressBar progress ("calculating basic properties of default permutation"); - betas = solve_betas (measurements, design); ++progress; +#endif + betas = solve_betas (measurements, design); +#ifdef GLM_ALL_STATS_DEBUG std::cerr << "Betas: " << betas.rows() << " x " << betas.cols() << ", max " << betas.array().maxCoeff() << "\n"; - abs_effect_size.resize (measurements.rows(), contrasts.size()); +#else + ++progress; +#endif + abs_effect_size.resize (measurements.cols(), contrasts.size()); for (size_t ic = 0; ic != contrasts.size(); ++ic) { if (contrasts[ic].is_F()) { abs_effect_size.col (ic).fill (NaN); @@ -182,20 +187,27 @@ namespace MR abs_effect_size.col (ic) = (matrix_type (contrasts[ic]) * betas).row (0); } } - ++progress; +#ifdef GLM_ALL_STATS_DEBUG std::cerr << "abs_effect_size: " << abs_effect_size.rows() << " x " << abs_effect_size.cols() << ", max " << abs_effect_size.array().maxCoeff() << "\n"; - matrix_type residuals = measurements.transpose() - design * betas; - residuals = residuals.array().pow (2.0); ++progress; - std::cerr << "residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; - matrix_type one_over_dof (1, measurements.cols()); - one_over_dof.fill (1.0 / value_type(design.rows()-Math::rank (design))); - std::cerr << "one_over_dof: " << one_over_dof.rows() << " x " << one_over_dof.cols() << ", max " << one_over_dof.array().maxCoeff() << "\n"; - VAR (design.rows()); - VAR (Math::rank (design)); - stdev = (one_over_dof * residuals).array().sqrt().row(0); ++progress; - std::cerr << "stdev: " << stdev.size() << ", max " << stdev.array().maxCoeff() << "\n"; - std_effect_size = abs_effect_size.array() / stdev.array(); ++progress; +#else + ++progress; +#endif + vector_type sse = (measurements - design * betas).colwise().squaredNorm(); +#ifdef GLM_ALL_STATS_DEBUG + std::cerr << "sse: " << sse.size() << ", max " << sse.maxCoeff() << "\n"; +#else + ++progress; +#endif + stdev = (sse / value_type(design.rows()-Math::rank (design))).sqrt(); +#ifdef GLM_ALL_STATS_DEBUG + std::cerr << "stdev: " << stdev.size() << ", max " << stdev.maxCoeff() << "\n"; +#else + ++progress; +#endif + std_effect_size = abs_effect_size.array().colwise() / stdev; +#ifdef GLM_ALL_STATS_DEBUG std::cerr << "std_effect_size: " << std_effect_size.rows() << " x " << std_effect_size.cols() << ", max " << std_effect_size.array().maxCoeff() << "\n"; +#endif } @@ -301,7 +313,9 @@ namespace MR // Split design matrix column-wise depending on whether entries in the contrast matrix are all zero // TODO Later, may include config variables / compiler flags to change model partitioning technique matrix_type X, Z; + //std::cerr << "Design:\n" << design << "\nContrast: " << c << "\n"; const size_t nonzero_column_count = c.colwise().any().count(); + //VAR (nonzero_column_count); X.resize (design.rows(), nonzero_column_count); Z.resize (design.rows(), design.cols() - nonzero_column_count); ssize_t ix = 0, iz = 0; @@ -311,6 +325,8 @@ namespace MR else Z.col (iz++) = design.col (ic); } + //std::cerr << X << "\n"; + //std::cerr << Z << "\n"; return Partition (X, Z); } @@ -340,11 +356,13 @@ namespace MR if (!(size_t(output.rows()) == num_elements() && size_t(output.cols()) == num_outputs())) output.resize (num_elements(), num_outputs()); - matrix_type beta, betahat; - vector_type F; + matrix_type PRz, Sy, beta, c_lambda, XtX; + vector_type sse; // Implement Freedman-Lane for fixed design matrix case // Each contrast needs to be handled explicitly on its own + // TESTME Need to see how an F-test goes here + // This may have an interaction with the model partitioning approach for (size_t ic = 0; ic != c.size(); ++ic) { // First, we perform permutation of the input data @@ -356,73 +374,53 @@ namespace MR //VAR (partitions[ic].Rz.cols()); //VAR (y.rows()); //VAR (y.cols()); - auto PRz = shuffling_matrix * partitions[ic].Rz; + PRz.noalias() = shuffling_matrix * partitions[ic].Rz; //VAR (PRz.rows()); //VAR (PRz.cols()); - // TODO Re-attempt performing this as a single matrix multiplication across all elements - matrix_type Sy (y.rows(), y.cols()); - for (ssize_t ie = 0; ie != y.rows(); ++ie) - Sy.row (ie) = PRz * y.row (ie).transpose(); + Sy.noalias() = PRz * y; //VAR (Sy.rows()); //VAR (Sy.cols()); - // TODO Change measurements matrix convention to store data for each subject in a row; - // means data across subjects for a particular element appear in a column, which is contiguous - // Now, we regress this shuffled data against the full model //VAR (pinvM.rows()); //VAR (pinvM.cols()); - beta.noalias() = pinvM * Sy.transpose(); + beta.noalias() = pinvM * Sy; //VAR (beta.rows()); //VAR (beta.cols()); //VAR (matrix_type(c[ic]).rows()); //VAR (matrix_type(c[ic]).cols()); - betahat = matrix_type(c[ic]) * beta; - //VAR (betahat.rows()); - //VAR (betahat.cols()); - //VAR (partitions[ic].X.rows()); - //VAR (partitions[ic].X.cols()); //VAR (Rm.rows()); //VAR (Rm.cols()); - auto XtX = partitions[ic].X.transpose()*partitions[ic].X; + XtX.noalias() = partitions[ic].X.transpose()*partitions[ic].X; //VAR (XtX.rows()); //VAR (XtX.cols()); const default_type one_over_dof = 1.0 / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z); - auto residuals = Rm*Sy.transpose(); - //VAR (residuals.rows()); - //VAR (residuals.cols()); - vector_type temp3 = residuals.colwise().squaredNorm(); - //VAR (temp3.rows()); - //VAR (temp3.cols()); + sse = (Rm*Sy).colwise().squaredNorm(); + //VAR (sse.size()); // FIXME This should be giving a vector, not a matrix - //auto temp4 = betahat.transpose() * (XtX * betahat) / c[ic].rank(); + //auto temp4 = c_lambda.transpose() * (XtX * c_lambda); //VAR (temp4.rows()); //VAR (temp4.cols()); //std::cerr << temp4 << "\n"; - F.resize (y.rows()); - for (ssize_t ie = 0; ie != y.rows(); ++ie) { - vector_type this_betahat = betahat.col (ie); - //VAR (this_betahat.size()); - auto temp2 = this_betahat.matrix() * (XtX * this_betahat.matrix()) / c[ic].rank(); - assert (temp2.rows() == 1); - assert (temp2.cols() == 1); - F[ie] = temp2 (0, 0) / (one_over_dof * temp3[ie]); - } - // TODO Try to use broadcasting here; it doesn't like having colwise() as the RHS argument - //F = (betahat.transpose().rowwise() * ((partitions[ic].X.transpose()*partitions[ic].X) * betahat.colwise()) / c[ic].rank()) / - // ((Rm*Sy.transpose()).colwise().squaredNorm() / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z)); - //VAR (F.size()); - - // Put the results into the output matrix, replacing NaNs with zeroes - // TODO Check again to see if this new statistic produces NaNs when input data are all zeroes - // We also need to convert F to t if necessary - for (ssize_t iF = 0; iF != F.size(); ++iF) { - if (!std::isfinite (F[iF])) { - output (iF, ic) = value_type(0); + for (ssize_t ie = 0; ie != num_elements(); ++ie) { + // FIXME Pretty sure that if rank(c)>1, this would need to be three-dimensional + // (2D matrix per element) + c_lambda.noalias() = matrix_type(c[ic]) * beta.col (ie); + //VAR (c_lambda.rows()); + //VAR (c_lambda.cols()); + //VAR (partitions[ic].X.rows()); + //VAR (partitions[ic].X.cols()); + // FIXME Issue here if rank of this_c_lambda is greater than rank of XtX + const auto numerator = (c_lambda.transpose() * XtX * c_lambda) / c[ic].rank(); + assert (numerator.rows() == 1); + assert (numerator.cols() == 1); + const value_type F = numerator (0, 0) / (one_over_dof * sse[ie]); + if (!std::isfinite (F)) { + output (ie, ic) = value_type(0); } else if (c[ic].is_F()) { - output (iF, ic) = F[iF]; + output (ie, ic) = F; } else { - assert (betahat.rows() == 1); - output (iF, ic) = std::sqrt (F[iF]) * (betahat (0, iF) > 0 ? 1.0 : -1.0); + assert (c_lambda.rows() == 1); + output (ie, ic) = std::sqrt (F) * (c_lambda.row(0).sum() > 0.0 ? 1.0 : -1.0); } } @@ -463,7 +461,7 @@ namespace MR output.resize (num_elements(), num_outputs()); // Let's loop over elements first, then contrasts in the inner loop - for (ssize_t element = 0; element != y.rows(); ++element) { + for (ssize_t element = 0; element != y.cols(); ++element) { // For each element (row in y), need to load the additional data for that element // for all subjects in order to construct the design matrix @@ -473,7 +471,7 @@ namespace MR // addition to the duplication of the fixed design matrix contents) would hurt bad matrix_type extra_data (num_subjects(), importers.size()); for (ssize_t col = 0; col != ssize_t(importers.size()); ++col) - extra_data.col(col) = importers[col] (element); + extra_data.col (col) = importers[col] (element); // What can we do here that's common across all contrasts? // - Import the element-wise data @@ -516,7 +514,7 @@ namespace MR Mfull_masked.block (0, 0, num_subjects(), M.cols()) = M; Mfull_masked.block (0, M.cols(), num_subjects(), extra_data.cols()) = extra_data; perm_matrix_masked = shuffling_matrix; - y_masked = y.row (element); + y_masked = y.col (element); } else { @@ -528,8 +526,7 @@ namespace MR if (element_mask[in_index]) { Mfull_masked.block (out_index, 0, 1, M.cols()) = M.row (in_index); Mfull_masked.block (out_index, M.cols(), 1, extra_data.cols()) = extra_data.row (in_index); - y_masked[out_index] = y (element, in_index); - ++out_index; + y_masked[out_index++] = y (in_index, element); } else { // Any row in the permutation matrix that contains a non-zero entry // in the column corresponding to in_row needs to be removed @@ -595,14 +592,14 @@ namespace MR - matrix_type TestVariable::default_design (const size_t index) const + /*matrix_type TestVariable::default_design (const size_t index) const { matrix_type output (M.rows(), M.cols() + importers.size()); output.block (0, 0, M.rows(), M.cols()) = M; for (size_t i = 0; i != importers.size(); ++i) output.col (M.cols() + i) = importers[i] (index); return output; - } + }*/ diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 22d6c88cd4..5d44a58873 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -76,15 +76,17 @@ namespace MR const size_t rank_x, rank_z; }; - Contrast (matrix_type::ConstRowXpr& in) : + Contrast (matrix_type::ConstRowXpr& in, const size_t index) : c (in), r (Math::rank (c)), - F (false) { } + F (false), + i (index) { } - Contrast (const matrix_type& in) : + Contrast (const matrix_type& in, const size_t index) : c (in), r (Math::rank (c)), - F (true) { } + F (true), + i (index) { } Partition operator() (const matrix_type&) const; @@ -92,11 +94,13 @@ namespace MR ssize_t cols() const { return c.cols(); } size_t rank() const { return r; } bool is_F() const { return F; } + std::string name() const { return std::string(F ? "F" : "c") + str(i+1); } private: const matrix_type c; const size_t r; const bool F; + const size_t i; }; @@ -109,19 +113,19 @@ namespace MR /** \addtogroup Statistics @{ */ /*! Compute a matrix of the beta coefficients - * @param measurements a matrix storing the measured data for each subject in a column + * @param measurements a matrix storing the measured data across subjects in each column * @param design the design matrix - * @return the matrix containing the output GLM betas + * @return the matrix containing the output GLM betas (one column of factor betas per element) */ matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design); /*! Compute the effect of interest - * @param measurements a matrix storing the measured data for each subject in a column + * @param measurements a matrix storing the measured data across subjects in each column * @param design the design matrix * @param contrast a Contrast class instance defining the contrast of interest - * @return the matrix containing the output effect + * @return the matrix containing the output absolute effect sizes (one column of element effect sizes per contrast) */ vector_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast); matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts); @@ -129,19 +133,19 @@ namespace MR /*! Compute the pooled standard deviation - * @param measurements a matrix storing the measured data for each subject in a column + * @param measurements a matrix storing the measured data across subjects in each column * @param design the design matrix - * @return the matrix containing the output standard deviation + * @return the vector containing the output standard deviation for each element */ vector_type stdev (const matrix_type& measurements, const matrix_type& design); /*! Compute cohen's d, the standardised effect size between two means - * @param measurements a matrix storing the measured data for each subject in a column + * @param measurements a matrix storing the measured data across subjects in each column * @param design the design matrix * @param contrast a Contrast class instance defining the contrast of interest - * @return the matrix containing the output standardised effect size + * @return the matrix containing the output standardised effect sizes (one column of element effect sizes per contrast) */ vector_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const Contrast& contrast); matrix_type std_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts); @@ -185,8 +189,6 @@ namespace MR // Define a base class for GLM tests - // Should support both T-tests and F-tests - // The latter will always produce 1 column only, whereas the former will produce the same number of columns as there are contrasts class TestBase { MEMALIGN(TestBase) public: @@ -195,7 +197,7 @@ namespace MR M (design), c (contrasts) { - assert (y.cols() == M.rows()); + assert (y.rows() == M.rows()); // Can no longer apply this assertion here; GLMTTestVariable later // expands the number of columns in M //assert (c.cols() == M.cols()); @@ -207,7 +209,7 @@ namespace MR */ virtual void operator() (const matrix_type& shuffling_matrix, matrix_type& output) const = 0; - size_t num_elements () const { return y.rows(); } + size_t num_elements () const { return y.cols(); } size_t num_outputs () const { return c.size(); } size_t num_subjects () const { return M.rows(); } virtual size_t num_factors() const { return M.cols(); } @@ -234,9 +236,9 @@ namespace MR { MEMALIGN(TestFixed) public: /*! - * @param measurements a matrix storing the measured data for each subject in a column + * @param measurements a matrix storing the measured data across subjects in each column * @param design the design matrix - * @param contrast a matrix containing the contrast of interest. + * @param contrasts a vector of Contrast instances */ TestFixed (const matrix_type& measurements, const matrix_type& design, const vector& contrasts); @@ -293,7 +295,7 @@ namespace MR * @param index the index of the element for which the design matrix is requested * @return the design matrix for that element, including imported data for extra columns */ - matrix_type default_design (const size_t index) const; + //matrix_type default_design (const size_t index) const; size_t num_factors() const override { return M.cols() + importers.size(); } diff --git a/core/math/stats/import.h b/core/math/stats/import.h index a6c5be9d59..4286122489 100644 --- a/core/math/stats/import.h +++ b/core/math/stats/import.h @@ -1,16 +1,16 @@ /* * Copyright (c) 2008-2016 the MRtrix3 contributors - * + * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/ - * + * * MRtrix is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * + * * For more details, see www.mrtrix.org - * + * */ #ifndef __math_stats_import_h__ #define __math_stats_import_h__ @@ -53,10 +53,10 @@ namespace MR path (path) { } /*! - * @param column the column of a matrix into which the data from this + * @param row the row of a matrix into which the data from this * particular file should be loaded */ - virtual void operator() (matrix_type::ColXpr column) const = 0; + virtual void operator() (matrix_type::RowXpr column) const = 0; /*! * @param index extract the data from this file corresponding to a particular diff --git a/core/math/stats/shuffle.cpp b/core/math/stats/shuffle.cpp index 27c69f2b20..54afd136a9 100644 --- a/core/math/stats/shuffle.cpp +++ b/core/math/stats/shuffle.cpp @@ -156,15 +156,22 @@ namespace MR output.data.resize (0, 0); return false; } - output.data = matrix_type::Zero (rows, rows); if (permutations.size()) { + output.data = matrix_type::Zero (rows, rows); for (size_t i = 0; i != rows; ++i) output.data (i, permutations[counter][i]) = 1.0; + } else { + output.data = matrix_type::Identity (rows, rows); } if (signflips.size()) { - for (size_t i = 0; i != rows; ++i) { - if (signflips[counter][i]) - output.data.row (i) *= -1.0; + for (size_t r = 0; r != rows; ++r) { + if (signflips[counter][r]) { + //output.data.row (r) *= -1.0; + for (size_t c = 0; c != rows; ++c) { + if (output.data (r, c)) + output.data (r, c) *= -1.0; + } + } } } ++counter; diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index 73d431c5c5..e164409183 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -90,7 +90,6 @@ namespace MR default_enhanced_statistics (default_enhanced_statistics), statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), enhanced_statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), - // NOTE: uncorrected_pvalue_counter currently transposed with respect to matrices // TODO Consider changing to Eigen::Array uncorrected_pvalue_counter (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)), perm_dist (perm_dist), @@ -195,8 +194,8 @@ namespace MR { assert (stats_calculator); Math::Stats::Shuffler shuffler (stats_calculator->num_subjects(), false, "Running permutations"); - perm_dist.resize (stats_calculator->num_outputs(), shuffler.size()); - uncorrected_pvalues.resize (stats_calculator->num_outputs(), stats_calculator->num_elements()); + perm_dist.resize (shuffler.size(), stats_calculator->num_outputs()); + uncorrected_pvalues.resize (stats_calculator->num_elements(), stats_calculator->num_outputs()); vector> global_uncorrected_pvalue_count (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)); { Processor processor (stats_calculator, enhancer, diff --git a/testing/data b/testing/data index 2530a7f752..4d1bd05cf2 160000 --- a/testing/data +++ b/testing/data @@ -1 +1 @@ -Subproject commit 2530a7f7525ff6ad05b0c7618978c8f3c46685e7 +Subproject commit 4d1bd05cf27ee75111e6868e63d9072418803c1a diff --git a/testing/tests/vectorstats b/testing/tests/vectorstats index 2354d4ee87..fbfb335e18 100644 --- a/testing/tests/vectorstats +++ b/testing/tests/vectorstats @@ -1,40 +1,2 @@ -# 16 subjects per group -N=16 && SNR=5 && \ -python -c """ -import random; -subj_files = [] -for i in range(0,2*${N}): # 2 groups - path = 'tmp' + str(i) + '.txt' - # 5 data points per subject - if i < ${N}: - # First group has effect in row 1, not in rows 2-5 - data = [ random.normalvariate(${SNR},1.0), - random.normalvariate(0.0,1.0), - random.normalvariate(0.0,1.0), - random.normalvariate(0.0,1.0), - random.normalvariate(0.0,1.0) ] - else: - # Second group has effect in row 2, not in rows 1 or 3-5 - data = [ random.normalvariate(0.0,1.0), - random.normalvariate(${SNR},1.0), - random.normalvariate(0.0,1.0), - random.normalvariate(0.0,1.0), - random.normalvariate(0.0,1.0) ] - with open(path, 'w') as f: - f.write('\n'.join([str(f) for f in data])) - subj_files.append(path) -with open('tmpdesign.csv', 'w') as f: - for i in range(0,2*${N}): - group = '1' if i < ${N} else '0' - # Group ID, then one random EV - f.write('1,' + group + ',' + str(random.normalvariate(0.0,1.0)) + '\n') -with open('tmpcontrast.csv', 'w') as f: - # Three contrast rows: - # - Group difference (effect should be present in data row 1) - # - Inverse group difference (effect should be present in data row 2) - # - Random EV (should be absent) - f.write('0,1,0\n0,-1,0\n0,0,1\n') -with open('tmpsubjects.txt', 'w') as f: - for path in subj_files: - f.write(path + '\n') -""" && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout +N=16 && SNR=5 && vectorstats/gen0.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -force && vectorstats/test0.py +N=16 && SNR=5 && vectorstats/gen1.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test1.py From 9c0344e3f67f603145beb8c0e3639ac820776bd9 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 6 Dec 2017 14:43:27 +1100 Subject: [PATCH 0076/1471] Stats: Transpose F-test matrices F-test matrices should now be provided such that each row represents an F-test, with the columns selecting the rows of the contrast matrix to be included. Also fixed vectorstats tests. --- cmd/fixelcfestats.cpp | 2 +- core/math/stats/glm.cpp | 20 ++++++++++---------- core/math/stats/shuffle.cpp | 4 ++-- testing/data | 2 +- testing/tests/vectorstats | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 3043ee2085..6c56778b08 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -447,7 +447,7 @@ void run() for (size_t i = 0; i != num_contrasts; ++i) { write_fixel_output (Path::join (output_fixel_directory, "cfe" + postfix(i) + ".mif"), cfe_output.row(i), output_header); - write_fixel_output (Path::join (output_fixel_directory, (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".mif"), tvalue_output.row(i), output_header); + write_fixel_output (Path::join (output_fixel_directory, std::string(contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".mif"), tvalue_output.row(i), output_header); } // Perform permutation testing diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index f8bc487718..bfda3c5f3b 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -49,8 +49,8 @@ namespace MR using namespace App; OptionGroup result = OptionGroup ("Options related to the General Linear Model (GLM)") - + Option ("ftests", "perform F-tests; input text file should contain, for each F-test, a column containing " - "ones and zeros, where ones indicate those rows of the contrast matrix to be included " + + Option ("ftests", "perform F-tests; input text file should contain, for each F-test, a row containing " + "ones and zeros, where ones indicate the rows of the contrast matrix to be included " "in the F-test.") + Argument ("path").type_file_in() @@ -76,17 +76,17 @@ namespace MR auto opt = App::get_options ("ftests"); if (opt.size()) { const matrix_type ftest_matrix = load_matrix (opt[0][0]); - if (ftest_matrix.rows() != contrast_matrix.rows()) - throw Exception ("Number of rows in F-test matrix (" + str(ftest_matrix.rows()) + ") does not match number of rows in contrast matrix (" + str(contrast_matrix.rows()) + ")"); + if (ftest_matrix.cols() != contrast_matrix.rows()) + throw Exception ("Number of columns in F-test matrix (" + str(ftest_matrix.rows()) + ") does not match number of rows in contrast matrix (" + str(contrast_matrix.rows()) + ")"); if (!((ftest_matrix.array() == 0.0) + (ftest_matrix.array() == 1.0)).all()) throw Exception ("F-test array must contain ones and zeros only"); - for (ssize_t ftest_index = 0; ftest_index != ftest_matrix.cols(); ++ftest_index) { - if (!ftest_matrix.col (ftest_index).count()) - throw Exception ("Column " + str(ftest_index+1) + " of F-test matrix does not contain any ones"); - matrix_type this_f_matrix (ftest_matrix.col (ftest_index).count(), contrast_matrix.cols()); + for (ssize_t ftest_index = 0; ftest_index != ftest_matrix.rows(); ++ftest_index) { + if (!ftest_matrix.row (ftest_index).count()) + throw Exception ("Row " + str(ftest_index+1) + " of F-test matrix does not contain any ones"); + matrix_type this_f_matrix (ftest_matrix.row (ftest_index).count(), contrast_matrix.cols()); ssize_t ftest_row = 0; for (ssize_t contrast_row = 0; contrast_row != contrast_matrix.rows(); ++contrast_row) { - if (ftest_matrix (contrast_row, ftest_index)) + if (ftest_matrix (ftest_index, contrast_row)) this_f_matrix.row (ftest_row++) = contrast_matrix.row (contrast_row); } contrasts.emplace_back (Contrast (this_f_matrix, ftest_index)); @@ -110,7 +110,7 @@ namespace MR matrix_type solve_betas (const matrix_type& measurements, const matrix_type& design) { - return design.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(measurements); + return design.jacobiSvd (Eigen::ComputeThinU | Eigen::ComputeThinV).solve (measurements); } diff --git a/core/math/stats/shuffle.cpp b/core/math/stats/shuffle.cpp index 54afd136a9..b247b11cc3 100644 --- a/core/math/stats/shuffle.cpp +++ b/core/math/stats/shuffle.cpp @@ -313,10 +313,10 @@ namespace MR vector sorted_null_dist; sorted_null_dist.reserve (null_dist.rows()); for (ssize_t perm = 0; perm != null_dist.rows(); ++perm) - sorted_null_dist.push_back (null_dist(perm, contrast)); + sorted_null_dist.push_back (null_dist (perm, contrast)); std::sort (sorted_null_dist.begin(), sorted_null_dist.end()); for (ssize_t element = 0; element != stats.rows(); ++element) { - if (stats(element, contrast) > 0.0) { + if (stats (element, contrast) > 0.0) { value_type pvalue = 1.0; for (size_t j = 0; j < size_t(sorted_null_dist.size()); ++j) { if (stats(element, contrast) < sorted_null_dist[j]) { diff --git a/testing/data b/testing/data index 4d1bd05cf2..077f05ec99 160000 --- a/testing/data +++ b/testing/data @@ -1 +1 @@ -Subproject commit 4d1bd05cf27ee75111e6868e63d9072418803c1a +Subproject commit 077f05ec99e125dedb4c7ebbe616fecbcd82f2f1 diff --git a/testing/tests/vectorstats b/testing/tests/vectorstats index fbfb335e18..ba48233908 100644 --- a/testing/tests/vectorstats +++ b/testing/tests/vectorstats @@ -1,2 +1,2 @@ -N=16 && SNR=5 && vectorstats/gen0.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -force && vectorstats/test0.py -N=16 && SNR=5 && vectorstats/gen1.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test1.py +N=16 SNR=5 vectorstats/gen0.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -ftests tmpftests.csv -force && vectorstats/test0.py +N=16 SNR=5 vectorstats/gen1.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test1.py From 46c77e5e4d960d93d8eb6b5e9883676ec6c0bc99 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 6 Dec 2017 16:49:33 +1100 Subject: [PATCH 0077/1471] Stats: Re-establish support for element-wise design matrices This functionality had regressed due to other changes being made to the statistical inference code (e.g. Freedman-Lane); however it should now be back up and running. --- cmd/vectorstats.cpp | 2 +- core/math/stats/glm.cpp | 51 +++++++++++++++++++-------------------- testing/data | 2 +- testing/tests/vectorstats | 1 + 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index f9a64129fc..bfced035ff 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -177,7 +177,7 @@ void run() { matrix_type betas (num_factors, num_elements); - matrix_type abs_effect_size (num_contrasts, num_elements), std_effect_size (num_contrasts, num_elements); + matrix_type abs_effect_size (num_elements, num_contrasts), std_effect_size (num_elements, num_contrasts); vector_type stdev (num_elements); Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index bfda3c5f3b..572ac35c4e 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -268,7 +268,7 @@ namespace MR } bool operator() (const size_t& element_index) { - const matrix_type element_data = data.row (element_index); + const matrix_type element_data = data.col (element_index); matrix_type element_design (design_fixed.rows(), design_fixed.cols() + extra_columns.size()); element_design.leftCols (design_fixed.cols()) = design_fixed; // For each element-wise design matrix column, @@ -278,8 +278,8 @@ namespace MR Math::Stats::GLM::all_stats (element_data, element_design, contrasts, local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); global_betas.col (element_index) = local_betas; - global_abs_effect_size.col (element_index) = local_abs_effect_size.col(0); - global_std_effect_size.col (element_index) = local_std_effect_size.col(0); + global_abs_effect_size.row (element_index) = local_abs_effect_size.row (0); + global_std_effect_size.row (element_index) = local_std_effect_size.row (0); global_stdev[element_index] = local_stdev[0]; return true; } @@ -296,7 +296,7 @@ namespace MR vector_type local_stdev; }; - Source source (measurements.rows()); + Source source (measurements.cols()); Functor functor (measurements, fixed_design, extra_columns, contrasts, betas, abs_effect_size, std_effect_size, stdev); Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); @@ -461,7 +461,7 @@ namespace MR output.resize (num_elements(), num_outputs()); // Let's loop over elements first, then contrasts in the inner loop - for (ssize_t element = 0; element != y.cols(); ++element) { + for (ssize_t ie = 0; ie != y.cols(); ++ie) { // For each element (row in y), need to load the additional data for that element // for all subjects in order to construct the design matrix @@ -471,7 +471,7 @@ namespace MR // addition to the duplication of the fixed design matrix contents) would hurt bad matrix_type extra_data (num_subjects(), importers.size()); for (ssize_t col = 0; col != ssize_t(importers.size()); ++col) - extra_data.col (col) = importers[col] (element); + extra_data.col (col) = importers[col] (ie); // What can we do here that's common across all contrasts? // - Import the element-wise data @@ -491,7 +491,7 @@ namespace MR BitSet element_mask (M.rows(), true); if (nans_in_data) { for (ssize_t row = 0; row != y.rows(); ++row) { - if (!std::isfinite (y (row, element))) + if (!std::isfinite (y (row, ie))) element_mask[row] = false; } } @@ -514,7 +514,7 @@ namespace MR Mfull_masked.block (0, 0, num_subjects(), M.cols()) = M; Mfull_masked.block (0, M.cols(), num_subjects(), extra_data.cols()) = extra_data; perm_matrix_masked = shuffling_matrix; - y_masked = y.col (element); + y_masked = y.col (ie); } else { @@ -526,7 +526,7 @@ namespace MR if (element_mask[in_index]) { Mfull_masked.block (out_index, 0, 1, M.cols()) = M.row (in_index); Mfull_masked.block (out_index, M.cols(), 1, extra_data.cols()) = extra_data.row (in_index); - y_masked[out_index++] = y (in_index, element); + y_masked[out_index++] = y (in_index, ie); } else { // Any row in the permutation matrix that contains a non-zero entry // in the column corresponding to in_row needs to be removed @@ -555,34 +555,33 @@ namespace MR const matrix_type Rm = matrix_type::Identity (finite_count, finite_count) - (Mfull_masked*pinvMfull_masked); - matrix_type beta, betahat; - vector_type F; + matrix_type beta, c_lambda; // We now have our permutation (shuffling) matrix and design matrix prepared, // and can commence regressing the partitioned model of each contrast for (size_t ic = 0; ic != c.size(); ++ic) { const auto partition = c[ic] (Mfull_masked); + const matrix_type XtX = partition.X.transpose()*partition.X; // Now that we have the individual contrast model partition for these data, // the rest of this function should proceed similarly to the fixed // design matrix case - // TODO Consider functionalising the below; should be consistent between fixed and variable const matrix_type Sy = perm_matrix_masked * partition.Rz * y_masked.matrix(); - beta.noalias() = Sy * pinvMfull_masked; - betahat.noalias() = beta * matrix_type(c[ic]); - F = (betahat.transpose() * (partition.X.inverse()*partition.X) * betahat / c[ic].rank()) / - ((Rm*Sy).squaredNorm() / (finite_count - partition.rank_x - partition.rank_z)); - - for (ssize_t iF = 0; iF != F.size(); ++iF) { - if (!std::isfinite (F[iF])) { - output (iF, ic) = value_type(0); - } else if (c[ic].is_F()) { - output (iF, ic) = F[iF]; - } else { - assert (betahat.cols() == 1); - output (iF, ic) = std::sqrt (F[iF]) * (betahat (iF, 0) > 0 ? 1.0 : -1.0); - } + beta.noalias() = pinvMfull_masked * Sy; + c_lambda.noalias() = matrix_type(c[ic]) * beta; + const default_type sse = (Rm*Sy).squaredNorm(); + + const default_type F = ((c_lambda.transpose() * XtX * c_lambda) / c[ic].rank()) (0, 0) / + (sse / value_type (finite_count - partition.rank_x - partition.rank_z)); + + if (!std::isfinite (F)) { + output (ie, ic) = value_type(0); + } else if (c[ic].is_F()) { + output (ie, ic) = F; + } else { + assert (c_lambda.rows() == 1); + output (ie, ic) = std::sqrt (F) * (c_lambda.sum() > 0 ? 1.0 : -1.0); } } // End looping over contrasts diff --git a/testing/data b/testing/data index 077f05ec99..6e009b6e1a 160000 --- a/testing/data +++ b/testing/data @@ -1 +1 @@ -Subproject commit 077f05ec99e125dedb4c7ebbe616fecbcd82f2f1 +Subproject commit 6e009b6e1a2a8b06b269096b3eb1933d42c0d3d5 diff --git a/testing/tests/vectorstats b/testing/tests/vectorstats index ba48233908..d4df086462 100644 --- a/testing/tests/vectorstats +++ b/testing/tests/vectorstats @@ -1,2 +1,3 @@ N=16 SNR=5 vectorstats/gen0.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -ftests tmpftests.csv -force && vectorstats/test0.py N=16 SNR=5 vectorstats/gen1.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test1.py +N=16 SNR=5 vectorstats/gen2.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -column tmpcolumn.txt -force && vectorstats/test2.py From 5bd55d53a7000e64aa67bd1945d5fc9680cb9257 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 6 Dec 2017 16:52:13 +1100 Subject: [PATCH 0078/1471] Stats commands: Cocumentation update Update relates to 9c0344e3, where the interpretation of input F-test matrices was transposed. --- docs/reference/commands/connectomestats.rst | 2 +- docs/reference/commands/fixelcfestats.rst | 2 +- docs/reference/commands/mrclusterstats.rst | 2 +- docs/reference/commands/vectorstats.rst | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/reference/commands/connectomestats.rst b/docs/reference/commands/connectomestats.rst index 367be0a0d4..a18018d03b 100644 --- a/docs/reference/commands/connectomestats.rst +++ b/docs/reference/commands/connectomestats.rst @@ -60,7 +60,7 @@ Options for controlling TFCE behaviour Options related to the General Linear Model (GLM) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-ftests path** perform F-tests; input text file should contain, for each F-test, a column containing ones and zeros, where ones indicate those rows of the contrast matrix to be included in the F-test. +- **-ftests path** perform F-tests; input text file should contain, for each F-test, a row containing ones and zeros, where ones indicate the rows of the contrast matrix to be included in the F-test. - **-fonly** only assess F-tests; do not perform statistical inference on entries in the contrast matrix diff --git a/docs/reference/commands/fixelcfestats.rst b/docs/reference/commands/fixelcfestats.rst index 4e9bd62999..f104d70aaa 100644 --- a/docs/reference/commands/fixelcfestats.rst +++ b/docs/reference/commands/fixelcfestats.rst @@ -63,7 +63,7 @@ Parameters for the Connectivity-based Fixel Enhancement algorithm Options related to the General Linear Model (GLM) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-ftests path** perform F-tests; input text file should contain, for each F-test, a column containing ones and zeros, where ones indicate those rows of the contrast matrix to be included in the F-test. +- **-ftests path** perform F-tests; input text file should contain, for each F-test, a row containing ones and zeros, where ones indicate the rows of the contrast matrix to be included in the F-test. - **-fonly** only assess F-tests; do not perform statistical inference on entries in the contrast matrix diff --git a/docs/reference/commands/mrclusterstats.rst b/docs/reference/commands/mrclusterstats.rst index 138134afd6..30a6b23c38 100644 --- a/docs/reference/commands/mrclusterstats.rst +++ b/docs/reference/commands/mrclusterstats.rst @@ -60,7 +60,7 @@ Options for controlling TFCE behaviour Options related to the General Linear Model (GLM) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-ftests path** perform F-tests; input text file should contain, for each F-test, a column containing ones and zeros, where ones indicate those rows of the contrast matrix to be included in the F-test. +- **-ftests path** perform F-tests; input text file should contain, for each F-test, a row containing ones and zeros, where ones indicate the rows of the contrast matrix to be included in the F-test. - **-fonly** only assess F-tests; do not perform statistical inference on entries in the contrast matrix diff --git a/docs/reference/commands/vectorstats.rst b/docs/reference/commands/vectorstats.rst index 7a1245c196..774f24c9f3 100644 --- a/docs/reference/commands/vectorstats.rst +++ b/docs/reference/commands/vectorstats.rst @@ -44,7 +44,7 @@ Options relating to shuffling of data for nonparametric statistical inference Options related to the General Linear Model (GLM) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- **-ftests path** perform F-tests; input text file should contain, for each F-test, a column containing ones and zeros, where ones indicate those rows of the contrast matrix to be included in the F-test. +- **-ftests path** perform F-tests; input text file should contain, for each F-test, a row containing ones and zeros, where ones indicate the rows of the contrast matrix to be included in the F-test. - **-fonly** only assess F-tests; do not perform statistical inference on entries in the contrast matrix From 4a61e7bf42e20681f56ae581928c188725fa75dd Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 6 Dec 2017 17:10:54 +1100 Subject: [PATCH 0079/1471] Minor tweaks to stats code - Change capitalisation of terminal outputs - Ensure that the statistics for the final element in the default permutation are appropriately computed. --- cmd/connectomestats.cpp | 2 +- cmd/fixelcfestats.cpp | 20 ++++++++++---------- cmd/mrclusterstats.cpp | 12 ++++++------ cmd/vectorstats.cpp | 6 +++--- core/math/stats/glm.cpp | 4 ++-- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index f860559acc..bbb61e4134 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -223,7 +223,7 @@ void run() nans_in_columns = true; } if (extra_columns.size()) { - CONSOLE ("number of element-wise design matrix columns: " + str(extra_columns.size())); + CONSOLE ("Number of element-wise design matrix columns: " + str(extra_columns.size())); if (nans_in_columns) INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from edge-wise design matrices accordingly"); } diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 6c56778b08..6b1a22ba0a 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -230,9 +230,9 @@ void run() // Load design matrix: const matrix_type design = load_matrix (argument[2]); - CONSOLE ("design matrix dimensions: " + str(design.rows()) + " x " + str(design.cols())); + CONSOLE ("Design matrix dimensions: " + str(design.rows()) + " x " + str(design.cols())); if (design.rows() != (ssize_t)importer.size()) - throw Exception ("number of input files does not match number of rows in design matrix"); + throw Exception ("Number of input files does not match number of rows in design matrix"); // Load contrasts const vector contrasts = Math::Stats::GLM::load_contrasts (argument[3]); @@ -251,14 +251,14 @@ void run() nans_in_columns = true; } if (extra_columns.size()) { - CONSOLE ("number of element-wise design matrix columns: " + str(extra_columns.size())); + CONSOLE ("Number of element-wise design matrix columns: " + str(extra_columns.size())); if (nans_in_columns) INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from fixel-wise design matrices accordingly"); } const ssize_t num_factors = design.cols() + extra_columns.size(); if (contrasts[0].cols() != num_factors) - throw Exception ("the number of columns per contrast (" + str(contrasts[0].cols()) + ")" + throw Exception ("The number of columns per contrast (" + str(contrasts[0].cols()) + ")" + (extra_columns.size() ? " (in addition to the " + str(extra_columns.size()) + " uses of -column)" : "") + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")"); @@ -271,9 +271,9 @@ void run() // Read in tracts, and compute whole-brain fixel-fixel connectivity const size_t num_tracks = properties["count"].empty() ? 0 : to (properties["count"]); if (!num_tracks) - throw Exception ("no tracks found in input file"); + throw Exception ("No tracks found in input file"); if (num_tracks < 1000000) { - WARN ("more than 1 million tracks is preferable to ensure robust fixel-fixel connectivity; file \"" + track_filename + "\" contains only " + str(num_tracks)); + WARN ("More than 1 million tracks is preferable to ensure robust fixel-fixel connectivity; file \"" + track_filename + "\" contains only " + str(num_tracks)); } { typedef DWI::Tractography::Mapping::SetVoxelDir SetVoxelDir; @@ -304,7 +304,7 @@ void run() { // TODO This could trivially be multi-threaded; fixels are handled independently - ProgressBar progress ("normalising and thresholding fixel-fixel connectivity matrix", num_fixels); + ProgressBar progress ("Normalising and thresholding fixel-fixel connectivity matrix", num_fixels); for (uint32_t fixel = 0; fixel < num_fixels; ++fixel) { auto it = connectivity_matrix[fixel].begin(); @@ -356,7 +356,7 @@ void run() matrix_type data = matrix_type::Zero (importer.size(), num_fixels); bool nans_in_data = false; { - ProgressBar progress ("loading input images", importer.size()); + ProgressBar progress ("Loading input images", importer.size()); for (size_t subject = 0; subject < importer.size(); subject++) { (*importer[subject]) (data.row (subject)); // Smooth the data @@ -403,7 +403,7 @@ void run() Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, betas, abs_effect_size, std_effect_size, stdev); - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); + ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); for (ssize_t i = 0; i != num_factors; ++i) { write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), betas.row(i), output_header); ++progress; @@ -458,7 +458,7 @@ void run() Stats::PermTest::run_permutations (glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, perm_distribution, uncorrected_pvalues); - ProgressBar progress ("outputting final results"); + ProgressBar progress ("Outputting final results"); for (size_t i = 0; i != num_contrasts; ++i) { save_vector (perm_distribution.row(i), Path::join (output_fixel_directory, "perm_dist" + postfix(i) + ".txt")); ++progress; diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index bf268312b8..fb49586032 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -202,7 +202,7 @@ void run() { // Load design matrix const matrix_type design = load_matrix (argument[1]); if (design.rows() != (ssize_t)importer.size()) - throw Exception ("number of input files does not match number of rows in design matrix"); + throw Exception ("Number of input files does not match number of rows in design matrix"); // Load contrasts const vector contrasts = Math::Stats::GLM::load_contrasts (argument[2]); @@ -222,14 +222,14 @@ void run() { nans_in_columns = true; } if (extra_columns.size()) { - CONSOLE ("number of element-wise design matrix columns: " + str(extra_columns.size())); + CONSOLE ("Number of element-wise design matrix columns: " + str(extra_columns.size())); if (nans_in_columns) INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from voxel-wise design matrices accordingly"); } const ssize_t num_factors = design.cols() + extra_columns.size(); if (contrasts[0].cols() != num_factors) - throw Exception ("the number of columns per contrast (" + str(contrasts[0].cols()) + ")" + throw Exception ("The number of columns per contrast (" + str(contrasts[0].cols()) + ")" + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); @@ -276,7 +276,7 @@ void run() { Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, betas, abs_effect_size, std_effect_size, stdev); - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); + ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); for (ssize_t i = 0; i != num_factors; ++i) { write_output (betas.row(i), v2v, prefix + "beta" + str(i) + ".mif", output_header); ++progress; @@ -309,7 +309,7 @@ void run() { matrix_type empirical_enhanced_statistic; if (do_nonstationary_adjustment) { if (!use_tfce) - throw Exception ("nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); + throw Exception ("Nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_enhanced_statistic); for (size_t i = 0; i != num_contrasts; ++i) save_vector (empirical_enhanced_statistic.row(i), prefix + "empirical" + postfix(i) + ".txt"); @@ -326,7 +326,7 @@ void run() { for (size_t i = 0; i != num_contrasts; ++i) save_vector (perm_distribution.row(i), prefix + "perm_dist" + postfix(i) + ".txt"); - ProgressBar progress ("generating output", 1 + (2 * num_contrasts)); + ProgressBar progress ("Generating output images", 1 + (2 * num_contrasts)); for (size_t i = 0; i != num_contrasts; ++i) { write_output (uncorrected_pvalue.row(i), v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); ++progress; diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index bfced035ff..b16d5140b5 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -145,14 +145,14 @@ void run() nans_in_columns = true; } if (extra_columns.size()) { - CONSOLE ("number of element-wise design matrix columns: " + str(extra_columns.size())); + CONSOLE ("Number of element-wise design matrix columns: " + str(extra_columns.size())); if (nans_in_columns) INFO ("Non-finite values detected in element-wise design matrix columns; individual rows will be removed from voxel-wise design matrices accordingly"); } const ssize_t num_factors = design.cols() + extra_columns.size(); if (contrasts[0].cols() != num_factors) - throw Exception ("the number of columns per contrast (" + str(contrasts[0].cols()) + ")" + throw Exception ("The number of columns per contrast (" + str(contrasts[0].cols()) + ")" + " does not equal the number of columns in the design matrix (" + str(design.cols()) + ")" + (extra_columns.size() ? " (taking into account the " + str(extra_columns.size()) + " uses of -column)" : "")); CONSOLE ("Number of factors: " + str(num_factors)); @@ -183,7 +183,7 @@ void run() Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, betas, abs_effect_size, std_effect_size, stdev); - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", 2 + (2 * num_contrasts)); + ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", 2 + (2 * num_contrasts)); save_matrix (betas, output_prefix + "betas.csv"); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { if (!contrasts[i].is_F()) { diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 572ac35c4e..b082b8da66 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -160,7 +160,7 @@ namespace MR -#define GLM_ALL_STATS_DEBUG +//#define GLM_ALL_STATS_DEBUG void all_stats (const matrix_type& measurements, const matrix_type& design, @@ -236,7 +236,7 @@ namespace MR bool operator() (size_t& element_index) { element_index = counter++; - if (counter >= num_elements) { + if (element_index >= num_elements) { progress.reset(); return false; } From 7ffe8782ed90370097d6baf3bf0b461f347179d3 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 6 Dec 2017 17:43:43 +1100 Subject: [PATCH 0080/1471] Stats: Fix for Freedman-Lane in the absence of nuisance regressors --- core/math/stats/glm.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 5d44a58873..83bf31e0f6 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -60,10 +60,12 @@ namespace MR Partition (const matrix_type& x, const matrix_type& z) : X (x), Z (z), - Hz (Z * Math::pinv (Z)), - Rz (matrix_type::Identity (Z.rows(), Z.rows()) - Hz), + Hz (Z.cols() ? + (Z * Math::pinv (Z)) : + matrix_type (matrix_type::Zero (X.rows(), X.rows()))), + Rz (matrix_type::Identity (X.rows(), X.rows()) - Hz), rank_x (Math::rank (X)), - rank_z (Math::rank (Z)) { } + rank_z (Z.cols() ? Math::rank (Z) : 0) { } // X = Component of design matrix related to effect of interest // Z = Component of design matrix related to nuisance regressors const matrix_type X, Z; From 4a6ff06fd456d939511441d1a953e9a2d50d382e Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 6 Dec 2017 17:44:50 +1100 Subject: [PATCH 0081/1471] Add vectorstats test missing from 7ffe8782 --- testing/tests/vectorstats | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/tests/vectorstats b/testing/tests/vectorstats index d4df086462..8e5b271529 100644 --- a/testing/tests/vectorstats +++ b/testing/tests/vectorstats @@ -1,3 +1,4 @@ N=16 SNR=5 vectorstats/gen0.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -ftests tmpftests.csv -force && vectorstats/test0.py N=16 SNR=5 vectorstats/gen1.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test1.py N=16 SNR=5 vectorstats/gen2.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -column tmpcolumn.txt -force && vectorstats/test2.py +N=16 SNR=5 vectorstats/gen3.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test3.py From e6d9516a7cac71e5d3c2b7284bc8f30e3d4460c4 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 7 Dec 2017 10:54:15 +1100 Subject: [PATCH 0082/1471] Stats: Support rank-deficient F-tests Previously, if an F-test were rank-deficient, this would lead to a matrix math error in calculation of the F-statistic. This change explicitly tests each F-test matrix, and replaces it with its own row-space if the matrix is rank-deficient. --- core/math/stats/glm.cpp | 21 ++++++++++++++++++--- core/math/stats/glm.h | 4 +++- testing/data | 2 +- testing/tests/vectorstats | 2 +- 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index b082b8da66..f35bf92aa5 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -332,6 +332,24 @@ namespace MR + matrix_type Contrast::check_rank (const matrix_type& in, const size_t index) const + { + // FullPivLU.image() provides column-space of matrix; + // here we want the row-space (since it's degeneracy in contrast matrix rows + // that has led to the rank-deficiency, whereas we can't exclude factors). + // Hence the transposing. + Eigen::FullPivLU decomp (in.transpose()); + if (decomp.rank() == in.rows()) + return in; + WARN ("F-test " + str(index+1) + " is rank-deficient; row-space matrix decomposition will instead be used"); + INFO ("Original matrix: " + str(in)); + const matrix_type result = decomp.image (in.transpose()).transpose(); + INFO ("Decomposed matrix: " + str(result)); + return result; + } + + + @@ -402,14 +420,11 @@ namespace MR //VAR (temp4.cols()); //std::cerr << temp4 << "\n"; for (ssize_t ie = 0; ie != num_elements(); ++ie) { - // FIXME Pretty sure that if rank(c)>1, this would need to be three-dimensional - // (2D matrix per element) c_lambda.noalias() = matrix_type(c[ic]) * beta.col (ie); //VAR (c_lambda.rows()); //VAR (c_lambda.cols()); //VAR (partitions[ic].X.rows()); //VAR (partitions[ic].X.cols()); - // FIXME Issue here if rank of this_c_lambda is greater than rank of XtX const auto numerator = (c_lambda.transpose() * XtX * c_lambda) / c[ic].rank(); assert (numerator.rows() == 1); assert (numerator.cols() == 1); diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 83bf31e0f6..d23a4fa179 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -85,7 +85,7 @@ namespace MR i (index) { } Contrast (const matrix_type& in, const size_t index) : - c (in), + c (check_rank (in, index)), r (Math::rank (c)), F (true), i (index) { } @@ -103,6 +103,8 @@ namespace MR const size_t r; const bool F; const size_t i; + + matrix_type check_rank (const matrix_type&, const size_t) const; }; diff --git a/testing/data b/testing/data index 6e009b6e1a..0ed349d5aa 160000 --- a/testing/data +++ b/testing/data @@ -1 +1 @@ -Subproject commit 6e009b6e1a2a8b06b269096b3eb1933d42c0d3d5 +Subproject commit 0ed349d5aaf3123246d685483b4f18de0f455261 diff --git a/testing/tests/vectorstats b/testing/tests/vectorstats index 8e5b271529..1fbd020f2f 100644 --- a/testing/tests/vectorstats +++ b/testing/tests/vectorstats @@ -1,4 +1,4 @@ N=16 SNR=5 vectorstats/gen0.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -ftests tmpftests.csv -force && vectorstats/test0.py N=16 SNR=5 vectorstats/gen1.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test1.py -N=16 SNR=5 vectorstats/gen2.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -column tmpcolumn.txt -force && vectorstats/test2.py +N=16 SNR=5 vectorstats/gen2.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -column tmpcolumn.txt -ftests tmpftests.csv -force && vectorstats/test2.py N=16 SNR=5 vectorstats/gen3.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test3.py From 7c506455bd7bd599cbca65bdcdc81763d32dc554 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Sat, 9 Dec 2017 00:42:33 +0000 Subject: [PATCH 0083/1471] mrview: remove inerpolators from main GUI image class Transform matrices are now computed on demand. --- core/image.h | 76 ++++++++-------- src/gui/mrview/gui_image.cpp | 43 ++++----- src/gui/mrview/gui_image.h | 7 -- src/gui/mrview/mode/base.cpp | 24 ++--- src/gui/mrview/mode/base.h | 10 +-- src/gui/mrview/mode/volume.cpp | 93 ++++++++++---------- src/gui/mrview/tool/odf/odf.cpp | 26 +++--- src/gui/mrview/tool/roi_editor/roi.cpp | 22 ++--- src/gui/mrview/tool/roi_editor/roi.h | 2 +- src/gui/mrview/tool/roi_editor/undoentry.cpp | 24 ++--- src/gui/mrview/tool/screen_capture.cpp | 10 +-- src/gui/mrview/tool/view.cpp | 30 +++---- src/gui/mrview/volume.cpp | 9 -- src/gui/mrview/volume.h | 42 ++++++--- src/gui/mrview/window.cpp | 4 +- src/gui/mrview/window.h | 2 +- 16 files changed, 211 insertions(+), 213 deletions(-) diff --git a/core/image.h b/core/image.h index 8a44acf1bf..2ea7bda65f 100644 --- a/core/image.h +++ b/core/image.h @@ -35,7 +35,7 @@ namespace MR template class Image : - public ImageBase, ValueType> + public ImageBase, ValueType> { MEMALIGN (Image) public: using value_type = ValueType; @@ -102,10 +102,10 @@ namespace MR return stream; } - //! write out the contents of a direct IO image to file - /*! + //! write out the contents of a direct IO image to file + /*! * returns the name of the image - needed by display() to get the - * name of the temporary file to supply to MRView. + * name of the temporary file to supply to MRView. * * \note this is \e not the recommended way to save an image - only use * this function when you absolutely need to minimise RAM usage on @@ -123,44 +123,44 @@ namespace MR std::string dump_to_mrtrix_file (std::string filename, bool use_multi_threading = true) const; //! return a new Image using direct IO - /*! + /*! * this will preload the data into RAM if the datatype on file doesn't * match that on file (or if any scaling is applied to the data). The * optional \a with_strides argument is used to additionally enforce - * preloading if the strides aren't compatible with those specified. + * preloading if the strides aren't compatible with those specified. * * Example: * \code * auto image = Header::open (argument[0]).get_image().with_direct_io(); - * \endcode + * \endcode * \note this invalidate the invoking Image - do not use the original * image in subsequent code.*/ Image with_direct_io (Stride::List with_strides = Stride::List()); //! return a new Image using direct IO - /*! + /*! * this is a convenience function, performing the same function as * with_direct_io(Stride::List). The difference is that the \a axis * argument specifies which axis should be contiguous, or if \a axis is * negative, that the spatial axes should be contiguous (the \c * SpatiallyContiguous constexpr, set to -1, is provided for clarity). * In other words: - * \code + * \code * auto image = Image::open (filename).with_direct_io (3); * \endcode * is equivalent to: - * \code + * \code * auto header = Header::open (filename); * auto image = header.get_image().with_direct_io (Stride::contiguous_along_axis (3, header)); * \endcode * and - * \code + * \code * auto image = Image::open (filename).with_direct_io (-1); * // or; * auto image = Image::open (filename).with_direct_io (SpatiallyContiguous); * \endcode * is equivalent to: - * \code + * \code * auto header = Header::open (filename); * auto image = header.get_image().with_direct_io (Stride::contiguous_along_spatial_axes (header)); * \endcode @@ -176,7 +176,7 @@ namespace MR /*! \note this will only work if image access is direct (i.e. for a * scratch image, with preloading, or when the data type is native and * without scaling. */ - ValueType* address () const { + ValueType* address () const { assert (data_pointer != nullptr && "Image::address() can only be used when image access is via direct RAM access"); return data_pointer ? static_cast(data_pointer) + data_offset : nullptr; } @@ -210,7 +210,7 @@ namespace MR - template + template class Image::Buffer : public Header { MEMALIGN (Image::Buffer) public: Buffer() {} // TODO: delete this line! Only for testing memory alignment issues. @@ -219,7 +219,7 @@ namespace MR Buffer (Buffer&&) = default; Buffer& operator= (const Buffer&) = delete; Buffer& operator= (Buffer&&) = default; - Buffer (const Buffer& b) : + Buffer (const Buffer& b) : Header (b), fetch_func (b.fetch_func), store_func (b.store_func) { } @@ -267,12 +267,12 @@ namespace MR // lightweight struct to copy data into: template - struct TmpImage : - public ImageBase, ValueType> + struct TmpImage : + public ImageBase, ValueType> { MEMALIGN (TmpImage) using value_type = ValueType; - TmpImage (const typename Image::Buffer& b, void* const data, + TmpImage (const typename Image::Buffer& b, void* const data, vector x, const Stride::List& strides, size_t offset) : b (b), data (data), x (x), strides (strides), offset (offset) { } @@ -291,10 +291,10 @@ namespace MR FORCE_INLINE ssize_t get_index (size_t axis) const { return x[axis]; } FORCE_INLINE void move_index (size_t axis, ssize_t increment) { offset += stride (axis) * increment; x[axis] += increment; } - FORCE_INLINE value_type get_value () const { return Raw::fetch_native (data, offset); } + FORCE_INLINE value_type get_value () const { return Raw::fetch_native (data, offset); } FORCE_INLINE void set_value (ValueType val) { Raw::store_native (val, data, offset); } }; - + CHECK_MEM_ALIGN (TmpImage); } @@ -309,13 +309,13 @@ namespace MR template Image::Buffer::Buffer (Header& H, bool read_write_if_existing) : Header (H) { - assert (H.valid() && "IO handler must be set when creating an Image"); + assert (H.valid() && "IO handler must be set when creating an Image"); assert ((H.is_file_backed() ? is_data_type::value : true) && "class types cannot be stored on file using the Image class"); acquire_io (H); io->set_readwrite_if_existing (read_write_if_existing); io->open (*this, footprint (voxel_count (*this))); - if (io->is_file_backed()) + if (io->is_file_backed()) set_fetch_store_functions (); } @@ -324,8 +324,8 @@ namespace MR - template - void* Image::Buffer::get_data_pointer () + template + void* Image::Buffer::get_data_pointer () { if (data_buffer) // already allocated via with_direct_io() return data_buffer.get(); @@ -361,7 +361,7 @@ namespace MR template FORCE_INLINE Image::Image () : - data_pointer (nullptr), + data_pointer (nullptr), data_offset (0) { } template @@ -371,10 +371,10 @@ namespace MR x (ndim(), 0), strides (desired_strides.size() ? desired_strides : Stride::get (*buffer)), data_offset (Stride::offset (*this)) - { + { assert (buffer); assert (data_pointer || buffer->get_io()); - DEBUG ("image \"" + name() + "\" initialised with strides = " + str(strides) + ", start = " + str(data_offset) + DEBUG ("image \"" + name() + "\" initialised with strides = " + str(strides) + ", start = " + str(data_offset) + ", using " + ( is_direct_io() ? "" : "in" ) + "direct IO"); } @@ -383,7 +383,7 @@ namespace MR template - Image::~Image () + Image::~Image () { if (buffer.unique()) { // was image preloaded and read/write? If so,need to write back: @@ -392,7 +392,7 @@ namespace MR auto data_buffer = std::move (buffer->data_buffer); TmpImage src = { *buffer, data_buffer.get(), vector (ndim(), 0), strides, Stride::offset (*this) }; Image dest (buffer); - threaded_copy_with_progress_message ("writing back direct IO buffer for \"" + name() + "\"", src, dest); + threaded_copy_with_progress_message ("writing back direct IO buffer for \"" + name() + "\"", src, dest); } } } @@ -416,10 +416,10 @@ namespace MR preload |= ( new_strides != Stride::get (*this) ); with_strides = new_strides; } - else - with_strides = Stride::get (*this); + else + with_strides = Stride::get (*this); - if (!preload) + if (!preload) return std::move (*this); // do the preload: @@ -435,7 +435,7 @@ namespace MR else { auto src (*this); TmpImage dest = { *buffer, buffer->data_buffer.get(), vector (ndim(), 0), with_strides, Stride::offset (with_strides, *this) }; - threaded_copy_with_progress_message ("preloading data for \"" + name() + "\"", src, dest); + threaded_copy_with_progress_message ("preloading data for \"" + name() + "\"", src, dest); } return Image (buffer, with_strides); @@ -446,7 +446,7 @@ namespace MR template - std::string Image::dump_to_mrtrix_file (std::string filename, bool) const + std::string Image::dump_to_mrtrix_file (std::string filename, bool) const { if (!data_pointer || ( !Path::has_suffix (filename, ".mih") && !Path::has_suffix (filename, ".mif") )) throw Exception ("FIXME: image not suitable for use with 'Image::dump_to_mrtrix_file()'"); @@ -475,7 +475,7 @@ namespace MR data_filename = filename.substr (0, filename.size()-4) + ".dat"; out << Path::basename (data_filename) << "\n"; out.close(); - out.open (data_filename, std::ios::out | std::ios::binary); + out.open (data_filename, std::ios::out | std::ios::binary); } const int64_t data_size = footprint (*buffer); @@ -484,7 +484,7 @@ namespace MR if (!out.good()) throw Exception ("error writing back contents of file \"" + data_filename + "\": " + strerror(errno)); out.close(); - + // If data_size exceeds some threshold, ostream artificially increases the file size beyond that required at close() // TODO check whether this is still needed...? File::resize (data_filename, offset + data_size); @@ -514,7 +514,7 @@ namespace MR //! save contents of an existing image to file (for debugging only) template typename std::enable_if::type>::value, std::string>::type - save (ImageType&& x, const std::string& filename, bool use_multi_threading = true) + save (ImageType&& x, const std::string& filename, bool use_multi_threading = true) { return __save_generic (x, filename, use_multi_threading); } @@ -522,7 +522,7 @@ namespace MR //! save contents of an existing image to file (for debugging only) template typename std::enable_if::type>::value, std::string>::type - save (ImageType&& x, const std::string& filename, bool use_multi_threading = true) + save (ImageType&& x, const std::string& filename, bool use_multi_threading = true) { try { return x.dump_to_mrtrix_file (filename, use_multi_threading); } catch (...) { } diff --git a/src/gui/mrview/gui_image.cpp b/src/gui/mrview/gui_image.cpp index 8aa1507f4f..e688011c2e 100644 --- a/src/gui/mrview/gui_image.cpp +++ b/src/gui/mrview/gui_image.cpp @@ -53,6 +53,8 @@ namespace MR { update_texture2D (plane, slice); + auto V2S = voxel2scanner(); + int x, y; get_axes (plane, x, y); float xsize = header().size(x)-0.5, ysize = header().size(y)-0.5; @@ -62,22 +64,22 @@ namespace MR p[x] = -0.5; p[y] = -0.5; - vertices[0].noalias() = _transform.voxel2scanner.cast() * p; + vertices[0].noalias() = V2S * p; vertices[1] = { 0.0f, 0.0f, 0.0f }; p[x] = -0.5; p[y] = ysize; - vertices[2].noalias() = _transform.voxel2scanner.cast() * p; + vertices[2].noalias() = V2S * p; vertices[3] = { 0.0f, 1.0f, 0.0f }; p[x] = xsize; p[y] = ysize; - vertices[4].noalias() = _transform.voxel2scanner.cast() * p; + vertices[4].noalias() = V2S * p; vertices[5] = { 1.0f, 1.0f, 0.0f }; p[x] = xsize; p[y] = -0.5; - vertices[6].noalias() = _transform.voxel2scanner.cast() * p; + vertices[6].noalias() = V2S * p; vertices[7] = { 1.0f, 0.0f, 0.0f }; start (shader_program); @@ -121,8 +123,6 @@ namespace MR Image::Image (MR::Header&& image_header) : ImageBase (std::move (image_header)), image (header().get_image()), - linear_interp (image), - nearest_interp (image), slice_min { { NaN, NaN, NaN } }, slice_max { { NaN, NaN, NaN } } { @@ -580,19 +580,24 @@ namespace MR - cfloat Image::trilinear_value (const Eigen::Vector3f& scanner_point) const { - if (!linear_interp.scanner (scanner_point)) + cfloat Image::trilinear_value (const Eigen::Vector3f& scanner_point) const + { + auto interp = Interp::make_linear (image); + if (!interp.scanner (scanner_point)) return cfloat(NAN, NAN); for (size_t n = 3; n < image.ndim(); ++n) - linear_interp.index (n) = image.index (n); - return linear_interp.value(); + interp.index (n) = image.index (n); + return interp.value(); } - cfloat Image::nearest_neighbour_value (const Eigen::Vector3f& scanner_point) const { - if (!nearest_interp.scanner (scanner_point)) + + cfloat Image::nearest_neighbour_value (const Eigen::Vector3f& scanner_point) const + { + auto interp = Interp::make_nearest (image); + if (!interp.scanner (scanner_point)) return cfloat(NAN, NAN); for (size_t n = 3; n < image.ndim(); ++n) - nearest_interp.index (n) = image.index (n); - return nearest_interp.value(); + interp.index (n) = image.index (n); + return interp.value(); } @@ -642,16 +647,6 @@ namespace MR - - void Image::set_tranform (const transform_type& new_transform) - { - Volume::set_tranform (new_transform); - - linear_interp = decltype (linear_interp) (image); - nearest_interp = decltype (nearest_interp) (image); - } - - } } } diff --git a/src/gui/mrview/gui_image.h b/src/gui/mrview/gui_image.h index 2b932a60b3..c2df9d761c 100644 --- a/src/gui/mrview/gui_image.h +++ b/src/gui/mrview/gui_image.h @@ -86,18 +86,11 @@ namespace MR cfloat trilinear_value (const Eigen::Vector3f&) const; cfloat nearest_neighbour_value (const Eigen::Vector3f&) const; - const MR::Transform& transform() const { return linear_interp; } - void set_tranform (const transform_type& transform); - const vector& comments() const { return _comments; } void reset_windowing (const int, const bool); protected: - mutable MR::Interp::Linear > linear_interp; - mutable MR::Interp::Nearest> nearest_interp; - friend class Tool::ODF; - std::array slice_min, slice_max; std::unordered_map tex_4d_cache; diff --git a/src/gui/mrview/mode/base.cpp b/src/gui/mrview/mode/base.cpp index 28f58d7adb..466b82bdc7 100644 --- a/src/gui/mrview/mode/base.cpp +++ b/src/gui/mrview/mode/base.cpp @@ -68,7 +68,7 @@ namespace MR projection.setup_render_text(); if (window().show_voxel_info()) { - Eigen::Vector3f voxel (image()->transform().scanner2voxel.cast() * focus()); + Eigen::Vector3f voxel (image()->scanner2voxel() * focus()); ssize_t vox [] = { ssize_t(std::round (voxel[0])), ssize_t(std::round (voxel[1])), ssize_t(std::round (voxel[2])) }; std::string vox_str = printf ("voxel: [ %d %d %d ", vox[0], vox[1], vox[2]); @@ -149,7 +149,7 @@ namespace MR void Base::mouse_press_event () { } void Base::mouse_release_event () { } - void Base::slice_move_event (float x) + void Base::slice_move_event (float x) { if (window().active_camera_interactor() && window().active_camera_interactor()->slice_move_event (x)) return; @@ -224,7 +224,7 @@ namespace MR void Base::setup_projection (const int axis, Projection& with_projection) const { - const GL::mat4 M = snap_to_image() ? GL::mat4 (image()->transform().image2scanner.matrix()) : GL::mat4 (orientation()); + const GL::mat4 M = snap_to_image() ? GL::mat4 (image()->image2scanner().matrix()) : GL::mat4 (orientation()); setup_projection (adjust_projection_matrix (GL::transpose (M), axis), with_projection); } @@ -279,18 +279,18 @@ namespace MR Math::Versorf Base::get_rotate_rotation () const { const Projection* proj = get_current_projection(); - if (!proj) + if (!proj) return Math::Versorf(); QPoint dpos = window().mouse_displacement(); - if (dpos.x() == 0 && dpos.y() == 0) + if (dpos.x() == 0 && dpos.y() == 0) return Math::Versorf(); Eigen::Vector3f x1 (window().mouse_position().x() - proj->x_position() - proj->width()/2, window().mouse_position().y() - proj->y_position() - proj->height()/2, 0.0); - if (x1.norm() < 16.0f) + if (x1.norm() < 16.0f) return Math::Versorf(); Eigen::Vector3f x0 (dpos.x() - x1[0], dpos.y() - x1[1], 0.0); @@ -313,7 +313,7 @@ namespace MR if (window().active_camera_interactor() && window().active_camera_interactor()->tilt_event()) return; - if (snap_to_image()) + if (snap_to_image()) window().set_snap_to_image (false); const Math::Versorf rot = get_tilt_rotation(); @@ -334,7 +334,7 @@ namespace MR if (window().active_camera_interactor() && window().active_camera_interactor()->rotate_event()) return; - if (snap_to_image()) + if (snap_to_image()) window().set_snap_to_image (false); const Math::Versorf rot = get_rotate_rotation(); @@ -352,14 +352,14 @@ namespace MR - void Base::reset_event () - { + void Base::reset_event () + { reset_view(); updateGL(); } - void Base::reset_view () + void Base::reset_view () { if (!image()) return; const Projection* proj = get_current_projection(); @@ -383,7 +383,7 @@ namespace MR std::floor ((image()->header().size(2)-1)/2.0f) ); - set_focus (image()->transform().voxel2scanner.cast() * p); + set_focus (image()->voxel2scanner() * p); set_target (focus()); reset_orientation(); diff --git a/src/gui/mrview/mode/base.h b/src/gui/mrview/mode/base.h index ef2c77fac6..ae8bdc77f8 100644 --- a/src/gui/mrview/mode/base.h +++ b/src/gui/mrview/mode/base.h @@ -110,7 +110,7 @@ namespace MR else return Math::Versorf::unit(); } - return window().orientation(); + return window().orientation(); } int width () const { return glarea()->width(); } @@ -173,7 +173,7 @@ namespace MR Eigen::Vector3f voxel_at (const Eigen::Vector3f& pos) const { if (!image()) return Eigen::Vector3f { NAN, NAN, NAN }; - const Eigen::Vector3f result = image()->transform().scanner2voxel.cast() * pos; + const Eigen::Vector3f result = image()->scanner2voxel() * pos; return result; } @@ -190,13 +190,13 @@ namespace MR int slice (int axis) const { return std::round (voxel_at (focus())[axis]); } int slice () const { return slice (plane()); } - void updateGL () { window().updateGL(); } + void updateGL () { window().updateGL(); } protected: GL::mat4 adjust_projection_matrix (const GL::mat4& Q, int proj) const; - GL::mat4 adjust_projection_matrix (const GL::mat4& Q) const { - return adjust_projection_matrix (Q, plane()); + GL::mat4 adjust_projection_matrix (const GL::mat4& Q) const { + return adjust_projection_matrix (Q, plane()); } void reset_view (); diff --git a/src/gui/mrview/mode/volume.cpp b/src/gui/mrview/mode/volume.cpp index 700cf2b3d0..8ccb594fd0 100644 --- a/src/gui/mrview/mode/volume.cpp +++ b/src/gui/mrview/mode/volume.cpp @@ -28,25 +28,25 @@ namespace MR namespace Mode { - std::string Volume::Shader::vertex_shader_source (const Displayable&) + std::string Volume::Shader::vertex_shader_source (const Displayable&) { - std::string source = + std::string source = "layout(location=0) in vec3 vertpos;\n" "uniform mat4 M;\n" "out vec3 texcoord;\n"; - for (int n = 0; n < mode.overlays_for_3D.size(); ++n) - source += + for (int n = 0; n < mode.overlays_for_3D.size(); ++n) + source += "uniform mat4 overlay_M" + str(n) + ";\n" "out vec3 overlay_texcoord" + str(n) + ";\n"; - source += + source += "void main () {\n" " texcoord = vertpos;\n" " gl_Position = M * vec4 (vertpos,1);\n"; - for (int n = 0; n < mode.overlays_for_3D.size(); ++n) - source += + for (int n = 0; n < mode.overlays_for_3D.size(); ++n) + source += " overlay_texcoord"+str(n) + " = (overlay_M"+str(n) + " * vec4 (vertpos,1)).xyz;\n"; source += @@ -78,7 +78,7 @@ namespace MR "in vec3 texcoord;\n"; for (size_t n = 0; n < clip.size(); ++n) - source += + source += "uniform vec4 clip" + str(n) + ";\n" "uniform int clip" + str(n) + "_selected;\n"; @@ -101,13 +101,13 @@ namespace MR " vec4 color;\n"; - source += + source += " final_color = vec4 (0.0);\n" " float dither = fract(sin(gl_FragCoord.x * 12.9898 + gl_FragCoord.y * 78.233) * 43758.5453);\n" " vec3 coord = texcoord + ray * dither;\n"; - for (int n = 0; n < mode.overlays_for_3D.size(); ++n) - source += + for (int n = 0; n < mode.overlays_for_3D.size(); ++n) + source += " vec3 overlay_coord"+str(n) +" = overlay_texcoord"+str(n) + " + overlay_ray"+str(n) + " * dither;\n"; source += @@ -129,13 +129,13 @@ namespace MR source += std::string(" bool show = ") + ( AND ? "false" : "true" ) + ";\n"; for (size_t n = 0; n < clip.size(); ++n) source += std::string(" if (dot (coord, clip") + str(n) + ".xyz) " + ( AND ? "<" : ">" ) + " clip" + str(n) + ".w)\n"; - source += + source += std::string(" show = ") + ( AND ? "true" : "false" ) + ";\n" " if (show) {\n"; } - source += + source += " color = texture (image_sampler, coord);\n" " amplitude = " + std::string (ColourMap::maps[object.colourmap].amplitude) + ";\n" " if (!isnan(amplitude) && !isinf(amplitude)"; @@ -150,17 +150,17 @@ namespace MR " color.a = clamp ((amplitude - alpha_offset) * alpha_scale, 0, alpha);\n"; if (!ColourMap::maps[object.colourmap].special) { - source += + source += " amplitude = clamp ("; - if (object.scale_inverted()) + if (object.scale_inverted()) source += "1.0 -"; - source += + source += " scale * (amplitude - offset), 0.0, 1.0);\n"; } - source += + source += std::string (" ") + ColourMap::maps[object.colourmap].glsl_mapping; - + source += " final_color.rgb += (1.0 - final_color.a) * color.rgb * color.a;\n" " final_color.a += color.a;\n" @@ -176,7 +176,7 @@ namespace MR // OVERLAYS: for (size_t n = 0, N = mode.overlays_for_3D.size(); n < N; ++n) { const ImageBase* image = mode.overlays_for_3D[n]; - source += + source += " overlay_coord"+str(n) + " += overlay_ray"+str(n) + ";\n" " if (overlay_coord"+str(n) + ".s >= 0.0 && overlay_coord"+str(n) + ".s <= 1.0 &&\n" " overlay_coord"+str(n) + ".t >= 0.0 && overlay_coord"+str(n) + ".t <= 1.0 &&\n" @@ -194,11 +194,11 @@ namespace MR source += " && amplitude >= overlay"+str(n)+"_alpha_offset) {\n"; if (!ColourMap::maps[image->colourmap].special) { - source += + source += " amplitude = clamp ("; if (image->scale_inverted()) source += "1.0 -"; - source += + source += " overlay"+str(n)+"_scale * (amplitude - overlay"+str(n)+"_offset), 0.0, 1.0);\n"; } @@ -208,7 +208,7 @@ namespace MR replace (mapping, "colourmap_colour", "overlay"+str(n)+"_colourmap_colour"); source += std::string (" ") + mapping; - source += + source += " color.a = amplitude * overlay"+str(n) + "_alpha;\n" " final_color.rgb += (1.0 - final_color.a) * color.rgb * color.a;\n" " final_color.a += color.a;\n" @@ -220,18 +220,18 @@ namespace MR if (clip.size() && mode.get_cliphighlightstate()) { source += " float highlight = 0.0;\n"; - for (size_t n = 0; n < clip.size(); ++n) + for (size_t n = 0; n < clip.size(); ++n) source += " if (clip"+str(n)+"_selected != 0)\n" " highlight += clamp (selection_thickness - abs (dot (coord, clip" + str(n) + ".xyz) - clip" + str(n) + ".w), 0.0, selection_thickness);\n"; - source += + source += " highlight *= " + str(clip_color[3]) + ";\n" - " final_color.rgb += (1.0 - final_color.a) * vec3(" + " final_color.rgb += (1.0 - final_color.a) * vec3(" + str(clip_color[0]) + "," + str(clip_color[1]) + "," + str(clip_color[2]) + ") * highlight;\n" " final_color.a += highlight;\n"; } - source += + source += " if (final_color.a > 0.95) break;\n" " }\n" "}\n"; @@ -242,11 +242,11 @@ namespace MR - bool Volume::Shader::need_update (const Displayable& object) const + bool Volume::Shader::need_update (const Displayable& object) const { - if (mode.update_overlays) + if (mode.update_overlays) return true; - if (mode.get_active_clip_planes().size() != active_clip_planes) + if (mode.get_active_clip_planes().size() != active_clip_planes) return true; if (mode.get_cliphighlightstate() != cliphighlight) return true; @@ -257,7 +257,7 @@ namespace MR - void Volume::Shader::update (const Displayable& object) + void Volume::Shader::update (const Displayable& object) { active_clip_planes = mode.get_active_clip_planes().size(); cliphighlight = mode.get_cliphighlightstate(); @@ -287,10 +287,11 @@ namespace MR inline GL::mat4 get_tex_to_scanner_matrix (const ImageBase& image) { - const Eigen::Vector3f pos = image.transform().voxel2scanner.cast() * Eigen::Vector3f { -0.5f, -0.5f, -0.5f }; - const Eigen::Vector3f vec_X = image.transform().voxel2scanner.linear().cast() * Eigen::Vector3f { float(image.header().size(0)), 0.0f, 0.0f }; - const Eigen::Vector3f vec_Y = image.transform().voxel2scanner.linear().cast() * Eigen::Vector3f { 0.0f, float(image.header().size(1)), 0.0f }; - const Eigen::Vector3f vec_Z = image.transform().voxel2scanner.linear().cast() * Eigen::Vector3f { 0.0f, 0.0f, float(image.header().size(2)) }; + const auto V2S = image.voxel2scanner(); + const Eigen::Vector3f pos = V2S * Eigen::Vector3f { -0.5f, -0.5f, -0.5f }; + const Eigen::Vector3f vec_X = V2S.rotation() * Eigen::Vector3f { float(image.header().size(0)), 0.0f, 0.0f }; + const Eigen::Vector3f vec_Y = V2S.rotation() * Eigen::Vector3f { 0.0f, float(image.header().size(1)), 0.0f }; + const Eigen::Vector3f vec_Z = V2S.rotation() * Eigen::Vector3f { 0.0f, 0.0f, float(image.header().size(2)) }; GL::mat4 T2S; T2S(0,0) = vec_X[0]; T2S(1,0) = vec_X[1]; @@ -308,7 +309,7 @@ namespace MR T2S(1,3) = pos[1]; T2S(2,3) = pos[2]; - T2S(3,0) = T2S(3,1) = T2S(3,2) = 0.0f; + T2S(3,0) = T2S(3,1) = T2S(3,2) = 0.0f; T2S(3,3) = 1.0f; return T2S; @@ -353,7 +354,7 @@ namespace MR GL::mat4 S2T = GL::inv (T2S); float step_size = 0.5f * std::min ( { float(image()->header().spacing (0)), float(image()->header().spacing (1)), float(image()->header().spacing (2)) } ); - Eigen::Vector3f ray = image()->transform().scanner2voxel.matrix().topLeftCorner<3,3>().cast() * projection.screen_normal(); + Eigen::Vector3f ray = image()->scanner2voxel().rotation() * projection.screen_normal(); Eigen::Vector3f ray_real_space = ray; ray *= step_size; ray[0] /= image()->header().size(0); @@ -395,39 +396,39 @@ namespace MR GLubyte indices[12]; if (ray[0] < 0) { - indices[0] = 4; + indices[0] = 4; indices[1] = 5; indices[2] = 7; indices[3] = 6; } else { - indices[0] = 0; + indices[0] = 0; indices[1] = 1; indices[2] = 3; indices[3] = 2; } if (ray[1] < 0) { - indices[4] = 2; + indices[4] = 2; indices[5] = 3; indices[6] = 7; indices[7] = 6; } else { - indices[4] = 0; + indices[4] = 0; indices[5] = 1; indices[6] = 5; indices[7] = 4; } if (ray[2] < 0) { - indices[8] = 1; + indices[8] = 1; indices[9] = 3; indices[10] = 7; indices[11] = 5; } else { - indices[8] = 0; + indices[8] = 0; indices[9] = 2; indices[10] = 6; indices[11] = 4; @@ -457,7 +458,7 @@ namespace MR depth_texture.bind(); depth_texture.set_interp (gl::NEAREST); } - else + else depth_texture.bind(); GL_CHECK_ERROR; @@ -525,12 +526,12 @@ namespace MR inline Tool::View* Volume::get_view_tool () const { Tool::Dock* dock = dynamic_cast(window().tools()->actions()[0])->dock; - if (!dock) + if (!dock) return NULL; return dynamic_cast (dock->tool); } - inline vector< std::pair > Volume::get_active_clip_planes () const + inline vector< std::pair > Volume::get_active_clip_planes () const { Tool::View* view = get_view_tool(); return view ? view->get_active_clip_planes() : vector< std::pair >(); @@ -541,7 +542,7 @@ namespace MR Tool::View* view = get_view_tool(); return view ? view->get_clip_planes_to_be_edited() : vector(); } - + inline bool Volume::get_cliphighlightstate () const { Tool::View* view = get_view_tool(); diff --git a/src/gui/mrview/tool/odf/odf.cpp b/src/gui/mrview/tool/odf/odf.cpp index 7f9398c93a..0ea1d3f168 100644 --- a/src/gui/mrview/tool/odf/odf.cpp +++ b/src/gui/mrview/tool/odf/odf.cpp @@ -289,28 +289,28 @@ namespace MR Eigen::Vector3f pos (window().target()); pos += projection.screen_normal() * (projection.screen_normal().dot (window().focus() - window().target())); if (lock_to_grid_box->isChecked()) { - Eigen::Vector3f p = image.transform().scanner2voxel.cast() * pos; + Eigen::Vector3f p = image.scanner2voxel() * pos; p[0] = std::round (p[0]); p[1] = std::round (p[1]); p[2] = std::round (p[2]); - pos = image.transform().voxel2scanner.cast() * p; + pos = image.voxel2scanner() * p; } Eigen::Vector3f x_dir = projection.screen_to_model_direction (1.0, 0.0, projection.depth_of (pos)); x_dir.normalize(); - x_dir = image.transform().scanner2image.rotation().cast() * x_dir; + x_dir = image.scanner2image().rotation() * x_dir; x_dir[0] *= image.header().spacing (0); x_dir[1] *= image.header().spacing (1); x_dir[2] *= image.header().spacing (2); - x_dir = image.transform().image2scanner.rotation().cast() * x_dir; + x_dir = image.image2scanner().rotation() * x_dir; Eigen::Vector3f y_dir = projection.screen_to_model_direction (0.0, 1.0, projection.depth_of (pos)); y_dir.normalize(); - y_dir = image.transform().scanner2image.rotation().cast() * y_dir; + y_dir = image.scanner2image().rotation() * y_dir; y_dir[0] *= image.header().spacing (0); y_dir[1] *= image.header().spacing (1); y_dir[2] *= image.header().spacing (2); - y_dir = image.transform().image2scanner.rotation().cast() * y_dir; + y_dir = image.image2scanner().rotation() * y_dir; const Eigen::Vector3f x_width = projection.screen_to_model_direction (projection.width()/2.0, 0.0, projection.depth_of (pos)); const int nx = std::ceil (x_width.norm() / x_dir.norm()); @@ -396,14 +396,16 @@ namespace MR MRView::Image& image (item.image); values.setZero(); if (interp) { - if (image.linear_interp.scanner (pos)) { - for (image.linear_interp.index(3) = 0; image.linear_interp.index(3) < std::min (ssize_t(values.size()), image.linear_interp.size(3)); ++image.linear_interp.index(3)) - values[image.linear_interp.index(3)] = image.linear_interp.value().real(); + auto linear_interp = Interp::make_linear (image.image); + if (linear_interp.scanner (pos)) { + for (linear_interp.index(3) = 0; linear_interp.index(3) < std::min (ssize_t(values.size()), linear_interp.size(3)); ++linear_interp.index(3)) + values[linear_interp.index(3)] = linear_interp.value().real(); } } else { - if (image.nearest_interp.scanner (pos)) { - for (image.nearest_interp.index(3) = 0; image.nearest_interp.index(3) < std::min (ssize_t(values.size()), image.nearest_interp.size(3)); ++image.nearest_interp.index(3)) - values[image.nearest_interp.index(3)] = image.nearest_interp.value().real(); + auto nearest_interp = Interp::make_nearest (image.image); + if (nearest_interp.scanner (pos)) { + for (nearest_interp.index(3) = 0; nearest_interp.index(3) < std::min (ssize_t(values.size()), nearest_interp.size(3)); ++nearest_interp.index(3)) + values[nearest_interp.index(3)] = nearest_interp.value().real(); } } if (item.odf_type == odf_type_t::DIXEL && item.dixel->dir_type == ODF_Item::DixelPlugin::dir_t::DW_SCHEME) { diff --git a/src/gui/mrview/tool/roi_editor/roi.cpp b/src/gui/mrview/tool/roi_editor/roi.cpp index 6f10482a47..922ee06324 100644 --- a/src/gui/mrview/tool/roi_editor/roi.cpp +++ b/src/gui/mrview/tool/roi_editor/roi.cpp @@ -365,11 +365,11 @@ namespace MR - int ROI::normal2axis (const Eigen::Vector3f& normal, const MR::Transform& transform) const + int ROI::normal2axis (const Eigen::Vector3f& normal, const ROI_Item& roi) const { - float x_dot_n = std::abs ((transform.image2scanner.rotation().cast() * Eigen::Vector3f { 1.0f, 0.0f, 0.0f }).dot (normal)); - float y_dot_n = std::abs ((transform.image2scanner.rotation().cast() * Eigen::Vector3f { 0.0f, 1.0f, 0.0f }).dot (normal)); - float z_dot_n = std::abs ((transform.image2scanner.rotation().cast() * Eigen::Vector3f { 0.0f, 0.0f, 1.0f }).dot (normal)); + float x_dot_n = std::abs ((roi.image2scanner().rotation() * Eigen::Vector3f { 1.0f, 0.0f, 0.0f }).dot (normal)); + float y_dot_n = std::abs ((roi.image2scanner().rotation() * Eigen::Vector3f { 0.0f, 1.0f, 0.0f }).dot (normal)); + float z_dot_n = std::abs ((roi.image2scanner().rotation() * Eigen::Vector3f { 0.0f, 0.0f, 1.0f }).dot (normal)); if (x_dot_n > y_dot_n) return x_dot_n > z_dot_n ? 0 : 2; else @@ -502,8 +502,8 @@ namespace MR const Projection* proj = window().get_current_mode()->get_current_projection(); if (!proj) return; const Eigen::Vector3f current_origin = proj->screen_to_model (window().mouse_position(), window().focus()); - current_axis = normal2axis (proj->screen_normal(), roi->transform()); - current_slice = std::lround ((roi->transform().scanner2voxel.cast() * current_origin)[current_axis]); + current_axis = normal2axis (proj->screen_normal(), *roi); + current_slice = std::lround ((roi->scanner2voxel() * current_origin)[current_axis]); roi->start (ROI_UndoEntry (*roi, current_axis, current_slice)); @@ -753,19 +753,19 @@ namespace MR // figure out the closest ROI axis, and lock to it: ROI_Item* roi = dynamic_cast (list_model->get (indices[0])); - current_axis = normal2axis (proj->screen_normal(), roi->transform()); + current_axis = normal2axis (proj->screen_normal(), *roi); // figure out current slice in ROI: - current_slice = std::lround ((roi->transform().scanner2voxel.cast() * current_origin)[current_axis]); + current_slice = std::lround ((roi->scanner2voxel() * current_origin)[current_axis]); // floating-point version of slice location to keep it consistent on // mouse move: Eigen::Vector3f slice_axis { 0.0, 0.0, 0.0 }; slice_axis[current_axis] = current_axis == 2 ? 1.0 : -1.0; - slice_axis = roi->transform().image2scanner.rotation().cast() * slice_axis; + slice_axis = roi->image2scanner().rotation() * slice_axis; current_slice_loc = current_origin.dot (slice_axis); - const Math::Versorf orient (roi->header().transform().rotation().cast()); + const Math::Versorf orient (roi->image2scanner().rotation()); window().set_snap_to_image (false); window().set_orientation (orient); window().set_plane (current_axis); @@ -814,7 +814,7 @@ namespace MR Eigen::Vector3f pos = proj->screen_to_model (window().mouse_position(), window().focus()); Eigen::Vector3f slice_axis (0.0, 0.0, 0.0); slice_axis[current_axis] = current_axis == 2 ? 1.0 : -1.0; - slice_axis = roi->transform().image2scanner.rotation().cast() * slice_axis; + slice_axis = roi->image2scanner().rotation() * slice_axis; float l = (current_slice_loc - pos.dot (slice_axis)) / proj->screen_normal().dot (slice_axis); window().set_focus (window().focus() + l * proj->screen_normal()); const Eigen::Vector3f pos_adj = pos + l * proj->screen_normal(); diff --git a/src/gui/mrview/tool/roi_editor/roi.h b/src/gui/mrview/tool/roi_editor/roi.h index aaf386c175..2d1803ff5d 100644 --- a/src/gui/mrview/tool/roi_editor/roi.h +++ b/src/gui/mrview/tool/roi_editor/roi.h @@ -106,7 +106,7 @@ namespace MR void load (vector>& list); void save (ROI_Item*); - int normal2axis (const Eigen::Vector3f&, const MR::Transform&) const; + int normal2axis (const Eigen::Vector3f&, const ROI_Item&) const; void dropEvent (QDropEvent* event) override; }; diff --git a/src/gui/mrview/tool/roi_editor/undoentry.cpp b/src/gui/mrview/tool/roi_editor/undoentry.cpp index 8a7f55b6e6..0b8f077231 100644 --- a/src/gui/mrview/tool/roi_editor/undoentry.cpp +++ b/src/gui/mrview/tool/roi_editor/undoentry.cpp @@ -29,7 +29,7 @@ namespace MR std::unique_ptr ROI_UndoEntry::shared; - + ROI_UndoEntry::Shared::Shared() : count (1) @@ -205,8 +205,8 @@ namespace MR void ROI_UndoEntry::draw_line (ROI_Item& roi, const Eigen::Vector3f& prev_pos, const Eigen::Vector3f& pos, const bool insert_mode_value) { const GLubyte value = insert_mode_value ? 1 : 0; - Eigen::Vector3f p = roi.transform().scanner2voxel.cast() * prev_pos; - const Eigen::Vector3f final_pos = roi.transform().scanner2voxel.cast() * pos; + Eigen::Vector3f p = roi.scanner2voxel() * prev_pos; + const Eigen::Vector3f final_pos = roi.scanner2voxel() * pos; const Eigen::Vector3f dir ((final_pos - p).normalized()); Eigen::Array3i v (int(std::round (p[0])), int(std::round (p[1])), int(std::round (p[2]))); const Eigen::Array3i final_vox (int(std::round (final_pos[0])), int(std::round (final_pos[1])), int(std::round (final_pos[2]))); @@ -249,8 +249,8 @@ namespace MR const float radius = 0.5f * diameter; const float radius_sq = Math::pow2 (radius); const GLubyte value = insert_mode_value ? 1 : 0; - const Eigen::Vector3f start = roi.transform().scanner2voxel.cast() * prev_pos; - const Eigen::Vector3f end = roi.transform().scanner2voxel.cast() * pos; + const Eigen::Vector3f start = roi.scanner2voxel() * prev_pos; + const Eigen::Vector3f end = roi.scanner2voxel() * pos; const Eigen::Vector3f offset (end - start); const float offset_norm (offset.norm()); const Eigen::Vector3f dir (Eigen::Vector3f(offset).normalized()); @@ -291,7 +291,7 @@ namespace MR void ROI_UndoEntry::draw_circle (ROI_Item& roi, const Eigen::Vector3f& pos, const bool insert_mode_value, const float diameter) { - Eigen::Vector3f vox = roi.transform().scanner2voxel.cast() * pos; + Eigen::Vector3f vox = roi.scanner2voxel() * pos; roi.brush_size = diameter; const float radius = 0.5f * diameter; const float radius_sq = Math::pow2 (radius); @@ -326,10 +326,10 @@ namespace MR void ROI_UndoEntry::draw_rectangle (ROI_Item& roi, const Eigen::Vector3f& from_pos, const Eigen::Vector3f& to_pos, const bool insert_mode_value) { - Eigen::Vector3f vox = roi.transform().scanner2voxel.cast() * from_pos; + Eigen::Vector3f vox = roi.scanner2voxel() * from_pos; const GLubyte value = insert_mode_value ? 1 : 0; std::array a = { { int(std::lround (vox[0])), int(std::lround (vox[1])), int(std::lround (vox[2])) } }; - vox = roi.transform().scanner2voxel.cast() * to_pos; + vox = roi.scanner2voxel() * to_pos; std::array b = { { int(std::lround (vox[0])), int(std::lround (vox[1])), int(std::lround (vox[2])) } }; if (a[0] > b[0]) std::swap (a[0], b[0]); @@ -358,7 +358,7 @@ namespace MR void ROI_UndoEntry::draw_fill (ROI_Item& roi, const Eigen::Vector3f& pos, const bool insert_mode_value) { - const Eigen::Vector3f vox = roi.transform().scanner2voxel.cast() * pos; + const Eigen::Vector3f vox = roi.scanner2voxel() * pos; const std::array seed_voxel = { { int(std::lround (vox[0])), int(std::lround (vox[1])), int(std::lround (vox[2])) } }; for (size_t axis = 0; axis != 3; ++axis) { if (seed_voxel[axis] < 0) return; @@ -402,7 +402,7 @@ namespace MR - void ROI_UndoEntry::undo (ROI_Item& roi) + void ROI_UndoEntry::undo (ROI_Item& roi) { MRView::GrabContext context; ASSERT_GL_MRVIEW_CONTEXT_IS_CURRENT; @@ -411,7 +411,7 @@ namespace MR ASSERT_GL_MRVIEW_CONTEXT_IS_CURRENT; } - void ROI_UndoEntry::redo (ROI_Item& roi) + void ROI_UndoEntry::redo (ROI_Item& roi) { MRView::GrabContext context; ASSERT_GL_MRVIEW_CONTEXT_IS_CURRENT; @@ -420,7 +420,7 @@ namespace MR ASSERT_GL_MRVIEW_CONTEXT_IS_CURRENT; } - void ROI_UndoEntry::copy (ROI_Item& roi, ROI_UndoEntry& source) + void ROI_UndoEntry::copy (ROI_Item& roi, ROI_UndoEntry& source) { MRView::GrabContext context; ASSERT_GL_MRVIEW_CONTEXT_IS_CURRENT; diff --git a/src/gui/mrview/tool/screen_capture.cpp b/src/gui/mrview/tool/screen_capture.cpp index c57962fe08..bdc538e922 100644 --- a/src/gui/mrview/tool/screen_capture.cpp +++ b/src/gui/mrview/tool/screen_capture.cpp @@ -374,7 +374,7 @@ namespace MR switch (translation_type) { case TranslationType::Voxel: - trans_vec = img->transform().voxel2scanner.rotation().cast() * trans_vec; + trans_vec = img->voxel2scanner().rotation() * trans_vec; break; case TranslationType::Camera: { @@ -420,7 +420,7 @@ namespace MR start_index->setValue (i + 1); this->window().updateGL(); qApp->processEvents(); - } + } is_playing = false; } @@ -455,8 +455,8 @@ namespace MR - void Capture::add_commandline_options (MR::App::OptionList& options) - { + void Capture::add_commandline_options (MR::App::OptionList& options) + { using namespace MR::App; options + OptionGroup ("Screen Capture tool options") @@ -470,7 +470,7 @@ namespace MR + Option ("capture.grab", "Start the screen capture process.").allow_multiple(); } - bool Capture::process_commandline_option (const MR::App::ParsedOption& opt) + bool Capture::process_commandline_option (const MR::App::ParsedOption& opt) { if (opt.opt->is ("capture.folder")) { directory->setPath (std::string(opt[0]).c_str()); diff --git a/src/gui/mrview/tool/view.cpp b/src/gui/mrview/tool/view.cpp index 31e4ce83ff..eebf98f7ca 100644 --- a/src/gui/mrview/tool/view.cpp +++ b/src/gui/mrview/tool/view.cpp @@ -129,7 +129,7 @@ namespace MR p.plane[1] = M (proj, 1); p.plane[2] = M (proj, 2); - const Eigen::Vector3f centre = image.transform().voxel2scanner.cast() * Eigen::Vector3f { image.header().size(0)/2.0f, image.header().size(1)/2.0f, image.header().size(2)/2.0f }; + const Eigen::Vector3f centre = image.voxel2scanner() * Eigen::Vector3f { image.header().size(0)/2.0f, image.header().size(1)/2.0f, image.header().size(2)/2.0f }; p.plane[3] = centre[0]*p.plane[0] + centre[1]*p.plane[1] + centre[2]*p.plane[2]; p.active = true; @@ -582,7 +582,7 @@ namespace MR return; auto focus (window().focus()); - focus = window().image()->transform().scanner2voxel.cast() * focus; + focus = window().image()->scanner2voxel() * focus; std::cout << str(focus[0]) << ", " << str(focus[1]) << ", " << str(focus[2]) << std::endl; QClipboard *clip = QApplication::clipboard(); @@ -602,7 +602,7 @@ namespace MR focus_y->setValue (focus[1]); focus_z->setValue (focus[2]); - focus = window().image()->transform().scanner2voxel.cast() * focus; + focus = window().image()->scanner2voxel() * focus; voxel_x->setValue (focus[0]); voxel_y->setValue (focus[1]); voxel_z->setValue (focus[2]); @@ -636,7 +636,7 @@ namespace MR { try { Eigen::Vector3f focus { voxel_x->value(), voxel_y->value(), voxel_z->value() }; - focus = window().image()->transform().voxel2scanner.cast() * focus; + focus = window().image()->voxel2scanner() * focus; window().set_focus (focus); window().updateGL(); } @@ -670,7 +670,7 @@ namespace MR transparency_box->setVisible (mode->features & Mode::ShaderTransparency); threshold_box->setVisible (mode->features & Mode::ShaderTransparency); clip_box->setVisible (mode->features & Mode::ShaderClipping); - if (mode->features & Mode::ShaderClipping) + if (mode->features & Mode::ShaderClipping) clip_planes_selection_changed_slot(); else window().register_camera_interactor(); @@ -1100,27 +1100,27 @@ namespace MR } - void View::deactivate () - { + void View::deactivate () + { clip_planes_list_view->selectionModel()->clear(); } - bool View::slice_move_event (float x) + bool View::slice_move_event (float x) { - + vector clip = get_clip_planes_to_be_edited(); if (clip.size()) { const auto &header = window().image()->header(); float increment = x * std::pow (header.spacing (0) * header.spacing (1) * header.spacing (2), 1.0f/3.0f); move_clip_planes_in_out (clip, increment); - } + } return true; } - bool View::pan_event () + bool View::pan_event () { vector clip = get_clip_planes_to_be_edited(); if (clip.size()) { @@ -1135,17 +1135,17 @@ namespace MR } - bool View::panthrough_event () + bool View::panthrough_event () { vector clip = get_clip_planes_to_be_edited(); - if (clip.size()) + if (clip.size()) move_clip_planes_in_out (clip, MOVE_IN_OUT_FOV_MULTIPLIER * window().mouse_displacement().y() * window().FOV()); return true; } - bool View::tilt_event () + bool View::tilt_event () { vector clip = get_clip_planes_to_be_edited(); if (clip.size()) { @@ -1159,7 +1159,7 @@ namespace MR - bool View::rotate_event () + bool View::rotate_event () { vector clip = get_clip_planes_to_be_edited(); if (clip.size()) { diff --git a/src/gui/mrview/volume.cpp b/src/gui/mrview/volume.cpp index 38804ddfd6..42d5f61e48 100644 --- a/src/gui/mrview/volume.cpp +++ b/src/gui/mrview/volume.cpp @@ -56,15 +56,6 @@ namespace MR - - void Volume::set_tranform (const transform_type& new_transform) - { - _header.transform() = new_transform; - _transform = new MR::Transform (_header); - } - - - } } } diff --git a/src/gui/mrview/volume.h b/src/gui/mrview/volume.h index 90af2b68f0..50e0c18964 100644 --- a/src/gui/mrview/volume.h +++ b/src/gui/mrview/volume.h @@ -40,7 +40,6 @@ namespace MR Volume (MR::Header&& header) : Displayable (header.name()), _header (std::move (header)), - _transform (_header), //CONF option: ImageInterpolation //CONF default: true //CONF Define default interplation setting for image and image overlay. @@ -53,8 +52,8 @@ namespace MR bool interpolate () const { return interpolation == gl::LINEAR; } void set_colourmap (size_t index) { - if (ColourMap::maps[index].special || ColourMap::maps[colourmap].special) - if (index != colourmap) + if (ColourMap::maps[index].special || ColourMap::maps[colourmap].special) + if (index != colourmap) texture_mode_changed = true; Displayable::colourmap = index; } @@ -73,15 +72,34 @@ namespace MR _texture.gen (gl::TEXTURE_3D); _texture.bind(); } - else + else _texture.bind(); _texture.set_interp (interpolation); } + + Eigen::Transform image2scanner () const { + return _header.transform().cast(); + } + + Eigen::Transform scanner2image () const { + return _header.transform().inverse().cast(); + } + + Eigen::Transform voxel2scanner () const { + auto T = _header.transform(); + return T.scale (Eigen::Vector3d (_header.spacing(0), _header.spacing(1), _header.spacing(2))).cast(); + } + + Eigen::Transform scanner2voxel () const { + auto T = _header.transform().inverse(); + return T.prescale (Eigen::Vector3d (1.0/_header.spacing(0), 1.0/_header.spacing(1), 1.0/_header.spacing(2))).cast(); + } + void allocate(); float focus_rate () const { - return 1.0e-3 * (std::pow ( + return 1.0e-3 * (std::pow ( _header.size(0) * _header.spacing(0) * _header.size(1) * _header.spacing(1) * _header.size(2) * _header.spacing(2), @@ -92,8 +110,6 @@ namespace MR const GL::Texture& texture () const { return _texture; } const MR::Header& header () const { return _header; } MR::Header& header () { return _header; } - const MR::Transform& transform () const { return _transform; } - void set_tranform (const transform_type& new_transform); void min_max_set() { update_levels(); @@ -111,7 +127,6 @@ namespace MR protected: MR::Header _header; - MR::Transform _transform; int interpolation; GL::Texture _texture; GL::VertexBuffer vertex_buffer; @@ -128,7 +143,7 @@ namespace MR return Eigen::Vector3f (a[0]/b[0], a[1]/b[1], a[2]/b[2]); } - void set_vertices_for_slice_render (const Projection& projection, float depth) + void set_vertices_for_slice_render (const Projection& projection, float depth) { vertices[0] = projection.screen_to_model (projection.x_position(), projection.y_position()+projection.height(), depth); vertices[2] = projection.screen_to_model (projection.x_position(), projection.y_position(), depth); @@ -136,10 +151,11 @@ namespace MR vertices[6] = projection.screen_to_model (projection.x_position()+projection.width(), projection.y_position()+projection.height(), depth); const Eigen::Vector3f sizes (_header.size (0), _header.size (1), _header.size (2)); - vertices[1] = div ((_transform.scanner2voxel.cast() * vertices[0]) + Eigen::Vector3f { 0.5, 0.5, 0.5 }, sizes); - vertices[3] = div ((_transform.scanner2voxel.cast() * vertices[2]) + Eigen::Vector3f { 0.5, 0.5, 0.5 }, sizes); - vertices[5] = div ((_transform.scanner2voxel.cast() * vertices[4]) + Eigen::Vector3f { 0.5, 0.5, 0.5 }, sizes); - vertices[7] = div ((_transform.scanner2voxel.cast() * vertices[6]) + Eigen::Vector3f { 0.5, 0.5, 0.5 }, sizes); + const auto S2V = scanner2voxel(); + vertices[1] = div ((S2V * vertices[0]) + Eigen::Vector3f { 0.5, 0.5, 0.5 }, sizes); + vertices[3] = div ((S2V * vertices[2]) + Eigen::Vector3f { 0.5, 0.5, 0.5 }, sizes); + vertices[5] = div ((S2V * vertices[4]) + Eigen::Vector3f { 0.5, 0.5, 0.5 }, sizes); + vertices[7] = div ((S2V * vertices[6]) + Eigen::Vector3f { 0.5, 0.5, 0.5 }, sizes); } void draw_vertices () diff --git a/src/gui/mrview/window.cpp b/src/gui/mrview/window.cpp index afcb55b379..124bdb8b3c 100644 --- a/src/gui/mrview/window.cpp +++ b/src/gui/mrview/window.cpp @@ -1759,7 +1759,7 @@ namespace MR - void Window::register_camera_interactor (Tool::CameraInteractor* agent) + void Window::register_camera_interactor (Tool::CameraInteractor* agent) { if (camera_interactor) camera_interactor->deactivate(); @@ -1844,7 +1844,7 @@ namespace MR vector pos = parse_floats (opt[0]); if (pos.size() != 3) throw Exception ("-voxel option expects a comma-separated list of 3 floating-point values"); - set_focus (image()->transform().voxel2scanner.cast() * Eigen::Vector3f { float(pos[0]), float(pos[1]), float(pos[2]) }); + set_focus (image()->voxel2scanner() * Eigen::Vector3f { float(pos[0]), float(pos[1]), float(pos[2]) }); glarea->update(); } return; diff --git a/src/gui/mrview/window.h b/src/gui/mrview/window.h index e9ba0acc9b..fd1bc72e8b 100644 --- a/src/gui/mrview/window.h +++ b/src/gui/mrview/window.h @@ -104,7 +104,7 @@ namespace MR if (!image()) return -1; else - return std::round ((image()->transform().scanner2voxel.cast() * focus())[anatomical_plane]); + return std::round ((image()->image.transform().inverse().cast() * focus()) (anatomical_plane) / image()->image.spacing (anatomical_plane)); } Mode::Base* get_current_mode () const { return mode.get(); } From e8bafea9a45e05a37664e805aefa0aae32ef1ddd Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Sat, 9 Dec 2017 00:54:40 +0000 Subject: [PATCH 0084/1471] mrview transform tool: can now modify transform for live image --- src/gui/mrview/tool/transform.cpp | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/gui/mrview/tool/transform.cpp b/src/gui/mrview/tool/transform.cpp index 229807f8fc..f012b5deb1 100644 --- a/src/gui/mrview/tool/transform.cpp +++ b/src/gui/mrview/tool/transform.cpp @@ -46,7 +46,7 @@ namespace MR } - void Transform::closeEvent (QCloseEvent*) + void Transform::closeEvent (QCloseEvent*) { if (window().active_camera_interactor() == this) window().register_camera_interactor(); @@ -59,16 +59,17 @@ namespace MR } - void Transform::deactivate () - { + void Transform::deactivate () + { activate_button->setChecked (false); } - bool Transform::slice_move_event (float x) + bool Transform::slice_move_event (float x) { const Projection* proj = window().get_current_mode()->get_current_projection(); - if (!proj) + if (!proj) return true; + const auto &header = window().image()->header(); float increment = window().snap_to_image() ? x * header.spacing (window().plane()) : @@ -79,6 +80,9 @@ namespace MR VAR (M.matrix()); M.translate (move.cast()); VAR(M.matrix()); + + window().image()->header().transform() = M; + window().image()->image.buffer->transform() = M; window().updateGL(); return true; } @@ -114,7 +118,7 @@ namespace MR bool Transform::tilt_event () { - /*if (snap_to_image()) + /*if (snap_to_image()) window().set_snap_to_image (false); const Math::Versorf rot = get_tilt_rotation(); @@ -133,7 +137,7 @@ namespace MR bool Transform::rotate_event () { - /*if (snap_to_image()) + /*if (snap_to_image()) window().set_snap_to_image (false); const Math::Versorf rot = get_rotate_rotation(); From b742298abe66a6f30f75bd84b7a9744bb72e8b3f Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 11 Dec 2017 13:33:39 +1100 Subject: [PATCH 0085/1471] Stats commands: Fix outputs due to matrix transposing --- cmd/connectomestats.cpp | 16 ++++++++-------- cmd/fixelcfestats.cpp | 17 +++++++++-------- cmd/mrclusterstats.cpp | 17 +++++++++-------- 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index bbb61e4134..18d86e4e6b 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -272,8 +272,8 @@ void run() } for (size_t i = 0; i != num_contrasts; ++i) { if (!contrasts[i].is_F()) { - save_matrix (mat2vec.V2M (abs_effect_size.row(i)), "abs_effect" + postfix(i) + ".csv"); ++progress; - save_matrix (mat2vec.V2M (std_effect_size.row(i)), "std_effect" + postfix(i) + ".csv"); ++progress; + save_matrix (mat2vec.V2M (abs_effect_size.col(i)), "abs_effect" + postfix(i) + ".csv"); ++progress; + save_matrix (mat2vec.V2M (std_effect_size.col(i)), "std_effect" + postfix(i) + ".csv"); ++progress; } } save_matrix (mat2vec.V2M (stdev), "std_dev.csv"); @@ -292,7 +292,7 @@ void run() if (do_nonstationary_adjustment) { Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_statistic); for (size_t i = 0; i != num_contrasts; ++i) - save_matrix (mat2vec.V2M (empirical_statistic.row(i)), output_prefix + "_empirical" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (empirical_statistic.col(i)), output_prefix + "_empirical" + postfix(i) + ".csv"); } // Precompute default statistic and enhanced statistic @@ -302,8 +302,8 @@ void run() Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_statistic, enhanced_output, tvalue_output); for (size_t i = 0; i != num_contrasts; ++i) { - save_matrix (mat2vec.V2M (tvalue_output.row(i)), output_prefix + "_" + (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".csv"); - save_matrix (mat2vec.V2M (enhanced_output.row(i)), output_prefix + "_enhanced" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (tvalue_output.col(i)), output_prefix + "_" + (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (enhanced_output.col(i)), output_prefix + "_enhanced" + postfix(i) + ".csv"); } // Perform permutation testing @@ -315,13 +315,13 @@ void run() enhanced_output, null_distribution, uncorrected_pvalues); for (size_t i = 0; i != num_contrasts; ++i) - save_vector (null_distribution.row(i), output_prefix + "_null_dist" + postfix(i) + ".txt"); + save_vector (null_distribution.col(i), output_prefix + "_null_dist" + postfix(i) + ".txt"); matrix_type pvalue_output (num_contrasts, num_edges); Math::Stats::statistic2pvalue (null_distribution, enhanced_output, pvalue_output); for (size_t i = 0; i != num_contrasts; ++i) { - save_matrix (mat2vec.V2M (pvalue_output.row(i)), output_prefix + "_fwe_pvalue" + postfix(i) + ".csv"); - save_matrix (mat2vec.V2M (uncorrected_pvalues.row(i)), output_prefix + "_uncorrected_pvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (pvalue_output.col(i)), output_prefix + "_fwe_pvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (uncorrected_pvalues.col(i)), output_prefix + "_uncorrected_pvalue" + postfix(i) + ".csv"); } } diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 6b1a22ba0a..c8a57f6fbf 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -124,6 +124,7 @@ void write_fixel_output (const std::string& filename, const VectorType& data, const Header& header) { + assert (data.size() == header.size (0)); auto output = Image::create (filename, header); for (uint32_t i = 0; i < data.size(); ++i) { output.index(0) = i; @@ -410,8 +411,8 @@ void run() } for (size_t i = 0; i != num_contrasts; ++i) { if (!contrasts[i].is_F()) { - write_fixel_output (Path::join (output_fixel_directory, "abs_effect" + postfix(i) + ".mif"), abs_effect_size.row(i), output_header); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "std_effect" + postfix(i) + ".mif"), std_effect_size.row(i), output_header); ++progress; + write_fixel_output (Path::join (output_fixel_directory, "abs_effect" + postfix(i) + ".mif"), abs_effect_size.col(i), output_header); ++progress; + write_fixel_output (Path::join (output_fixel_directory, "std_effect" + postfix(i) + ".mif"), std_effect_size.col(i), output_header); ++progress; } } write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), stdev, output_header); @@ -434,7 +435,7 @@ void run() Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, empirical_cfe_statistic); output_header.keyval()["nonstationary adjustment"] = str(true); for (size_t i = 0; i != num_contrasts; ++i) - write_fixel_output (Path::join (output_fixel_directory, "cfe_empirical" + postfix(i) + ".mif"), empirical_cfe_statistic.row(i), output_header); + write_fixel_output (Path::join (output_fixel_directory, "cfe_empirical" + postfix(i) + ".mif"), empirical_cfe_statistic.col(i), output_header); } else { output_header.keyval()["nonstationary adjustment"] = str(false); } @@ -446,8 +447,8 @@ void run() Stats::PermTest::precompute_default_permutation (glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, tvalue_output); for (size_t i = 0; i != num_contrasts; ++i) { - write_fixel_output (Path::join (output_fixel_directory, "cfe" + postfix(i) + ".mif"), cfe_output.row(i), output_header); - write_fixel_output (Path::join (output_fixel_directory, std::string(contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".mif"), tvalue_output.row(i), output_header); + write_fixel_output (Path::join (output_fixel_directory, "cfe" + postfix(i) + ".mif"), cfe_output.col(i), output_header); + write_fixel_output (Path::join (output_fixel_directory, std::string(contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".mif"), tvalue_output.col(i), output_header); } // Perform permutation testing @@ -460,7 +461,7 @@ void run() ProgressBar progress ("Outputting final results"); for (size_t i = 0; i != num_contrasts; ++i) { - save_vector (perm_distribution.row(i), Path::join (output_fixel_directory, "perm_dist" + postfix(i) + ".txt")); + save_vector (perm_distribution.col(i), Path::join (output_fixel_directory, "perm_dist" + postfix(i) + ".txt")); ++progress; } @@ -468,9 +469,9 @@ void run() Math::Stats::statistic2pvalue (perm_distribution, cfe_output, pvalue_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { - write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue" + postfix(i) + ".mif"), pvalue_output.row(i), output_header); + write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue" + postfix(i) + ".mif"), pvalue_output.col(i), output_header); ++progress; - write_fixel_output (Path::join (output_fixel_directory, "uncorrected_pvalue" + postfix(i) + ".mif"), uncorrected_pvalues.row(i), output_header); + write_fixel_output (Path::join (output_fixel_directory, "uncorrected_pvalue" + postfix(i) + ".mif"), uncorrected_pvalues.col(i), output_header); ++progress; } diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index fb49586032..300f074792 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -283,8 +283,8 @@ void run() { } for (size_t i = 0; i != num_contrasts; ++i) { if (!contrasts[i].is_F()) { - write_output (abs_effect_size.row(i), v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); ++progress; - write_output (std_effect_size.row(i), v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); ++progress; + write_output (abs_effect_size.col(i), v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); ++progress; + write_output (std_effect_size.col(i), v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); ++progress; } } write_output (stdev, v2v, prefix + "std_dev.mif", output_header); @@ -312,30 +312,31 @@ void run() { throw Exception ("Nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_enhanced_statistic); for (size_t i = 0; i != num_contrasts; ++i) - save_vector (empirical_enhanced_statistic.row(i), prefix + "empirical" + postfix(i) + ".txt"); + save_vector (empirical_enhanced_statistic.col(i), prefix + "empirical" + postfix(i) + ".txt"); } if (!get_options ("notest").size()) { matrix_type perm_distribution, uncorrected_pvalue; - matrix_type default_cluster_output (num_contrasts, num_voxels); + // FIXME This shouldn't be empty... + matrix_type default_cluster_output (num_voxels, num_contrasts); Stats::PermTest::run_permutations (glm_test, enhancer, empirical_enhanced_statistic, default_cluster_output, perm_distribution, uncorrected_pvalue); for (size_t i = 0; i != num_contrasts; ++i) - save_vector (perm_distribution.row(i), prefix + "perm_dist" + postfix(i) + ".txt"); + save_vector (perm_distribution.col(i), prefix + "perm_dist" + postfix(i) + ".txt"); ProgressBar progress ("Generating output images", 1 + (2 * num_contrasts)); for (size_t i = 0; i != num_contrasts; ++i) { - write_output (uncorrected_pvalue.row(i), v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); + write_output (uncorrected_pvalue.col(i), v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); ++progress; } - matrix_type fwe_pvalue_output (num_contrasts, num_voxels); + matrix_type fwe_pvalue_output (num_voxels, num_contrasts); Math::Stats::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { - write_output (fwe_pvalue_output.row(i), v2v, prefix + "fwe_pvalue" + postfix(i) + ".mif", output_header); + write_output (fwe_pvalue_output.col(i), v2v, prefix + "fwe_pvalue" + postfix(i) + ".mif", output_header); ++progress; } From 43f7cde68abae007d8b535f915fa5957c6dbf155 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Mon, 11 Dec 2017 13:51:43 +0000 Subject: [PATCH 0086/1471] MRView transform tool: get remaining operations working --- src/gui/mrview/tool/transform.cpp | 71 ++++++++++++++++++++++--------- 1 file changed, 52 insertions(+), 19 deletions(-) diff --git a/src/gui/mrview/tool/transform.cpp b/src/gui/mrview/tool/transform.cpp index f012b5deb1..3341fd6734 100644 --- a/src/gui/mrview/tool/transform.cpp +++ b/src/gui/mrview/tool/transform.cpp @@ -40,12 +40,19 @@ namespace MR } + + + + void Transform::showEvent (QShowEvent*) { activate_button->setChecked (false); } + + + void Transform::closeEvent (QCloseEvent*) { if (window().active_camera_interactor() == this) @@ -53,17 +60,25 @@ namespace MR } + + + void Transform::onActivate (bool onoff) { window().register_camera_interactor (onoff ? this : nullptr); } + + void Transform::deactivate () { activate_button->setChecked (false); } + + + bool Transform::slice_move_event (float x) { const Projection* proj = window().get_current_mode()->get_current_projection(); @@ -77,9 +92,7 @@ namespace MR auto move = window().get_current_mode()->get_through_plane_translation (increment, *proj); transform_type M = header.transform(); - VAR (M.matrix()); - M.translate (move.cast()); - VAR(M.matrix()); + M.translate (-move.cast()); window().image()->header().transform() = M; window().image()->image.buffer->transform() = M; @@ -92,43 +105,63 @@ namespace MR bool Transform::pan_event () { - /*const Projection* proj = get_current_projection(); - if (!proj) return; + const Projection* proj = window().get_current_mode()->get_current_projection(); + if (!proj) + return true; - auto move = -proj->screen_to_model_direction (window().mouse_displacement(), target()); - set_target (target() + move); - updateGL();*/ - return false; + auto move = proj->screen_to_model_direction (window().mouse_displacement(), window().target()); + + transform_type M = window().image()->header().transform(); + M.translate (move.cast()); + + window().image()->header().transform() = M; + window().image()->image.buffer->transform() = M; + window().updateGL(); + + return true; } bool Transform::panthrough_event () { - /*const Projection* proj = get_current_projection(); - if (!proj) return; - auto move = get_through_plane_translation_FOV (window().mouse_displacement().y(), *proj); + const Projection* proj = window().get_current_mode()->get_current_projection(); + if (!proj) + return true; - set_focus (focus() + move); - move_target_to_focus_plane (*proj); - updateGL();*/ - return false; + auto move = window().get_current_mode()->get_through_plane_translation_FOV (window().mouse_displacement().y(), *proj); + + transform_type M = window().image()->header().transform(); + M.translate (move.cast()); + + window().image()->header().transform() = M; + window().image()->image.buffer->transform() = M; + window().updateGL(); + + return true; } + + + bool Transform::tilt_event () { - /*if (snap_to_image()) + if (window().snap_to_image()) window().set_snap_to_image (false); + transform_type M = window().image()->header().transform(); + + //M = const Math::Versorf rot = get_tilt_rotation(); if (!rot) return; Math::Versorf orient = rot * orientation(); set_orientation (orient); - updateGL();*/ - return false; + window().updateGL(); + + return true; } From bd32c718e49d3b740cc02fbe1ae4f32f8a1cfe27 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Mon, 11 Dec 2017 23:20:53 +0000 Subject: [PATCH 0087/1471] remove Math::Versor in favour of Eigen::Quaternion --- core/math/versor.h | 96 ------------------------- src/gui/dwi/render_frame.cpp | 14 ++-- src/gui/dwi/render_frame.h | 7 +- src/gui/mrview/mode/base.cpp | 32 ++++----- src/gui/mrview/mode/base.h | 24 +++---- src/gui/mrview/mode/volume.h | 3 +- src/gui/mrview/tool/connectome/edge.cpp | 3 +- src/gui/mrview/tool/overlay.cpp | 38 +++++----- src/gui/mrview/tool/roi_editor/roi.cpp | 15 ++-- src/gui/mrview/tool/screen_capture.cpp | 4 +- src/gui/mrview/tool/screen_capture.h | 6 +- src/gui/mrview/tool/transform.cpp | 10 +-- src/gui/mrview/tool/transform.h | 2 +- src/gui/mrview/tool/view.cpp | 14 ++-- src/gui/mrview/tool/view.h | 2 +- src/gui/mrview/window.cpp | 2 +- src/gui/mrview/window.h | 7 +- src/gui/opengl/transformation.h | 30 ++++---- 18 files changed, 102 insertions(+), 207 deletions(-) delete mode 100644 core/math/versor.h diff --git a/core/math/versor.h b/core/math/versor.h deleted file mode 100644 index e7a47c53e6..0000000000 --- a/core/math/versor.h +++ /dev/null @@ -1,96 +0,0 @@ -/* Copyright (c) 2008-2017 the MRtrix3 contributors. - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at http://mozilla.org/MPL/2.0/. - * - * MRtrix is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * - * For more details, see http://www.mrtrix.org/. - */ - - -#ifndef __math_versor_h__ -#define __math_versor_h__ - -#include - -#include "debug.h" -#include "math/math.h" - -namespace MR { - namespace Math { - - - - template - class Versor : public Eigen::Quaternion - { MEMALIGN(Versor) - - using value_type = ValueType; - - public: - Versor () : Eigen::Quaternion (NAN, NAN, NAN, NAN) { } - Versor (const value_type& w, const value_type& x, const value_type& y, const value_type& z) : - Eigen::Quaternion (w, x, y, z) { Eigen::Quaternion::normalize(); } - - // This may be used erroneously if trying to convert from a matrix representation to a versor - // (this constructor in fact just reads 4 value_type's from an array directly into the quaternion) - Versor (const value_type*) = delete; - - template - Versor (const Eigen::QuaternionBase& other) : - Eigen::Quaternion (other) { Eigen::Quaternion::normalize(); } - Versor (const Eigen::AngleAxis& aa): - Eigen::Quaternion (aa) { Eigen::Quaternion::normalize(); } - template - Versor (const Eigen::MatrixBase& other) : - Eigen::Quaternion (other) { Eigen::Quaternion::normalize(); } - template - Versor (const Eigen::Quaternion& other) : - Eigen::Quaternion (other) { Eigen::Quaternion::normalize(); } - - // This functionality was provided explicitly in earlier MRtrix versions, but should - // now be instead handled using the constructor that takes an Eigen::AngleAxis - Versor (const value_type, const Eigen::Matrix&) = delete; - - bool valid() const { return std::isfinite (w()); } - bool operator! () const { return !valid(); } - - // Don't give reference access to individual elements; need to maintain unit norm - value_type w() const { return Eigen::Quaternion::w(); } - value_type& w() = delete; - value_type x() const { return Eigen::Quaternion::x(); } - value_type& x() = delete; - value_type y() const { return Eigen::Quaternion::y(); } - value_type& y() = delete; - value_type z() const { return Eigen::Quaternion::z(); } - value_type& z() = delete; - - // Shouldn't be any need to perform explicit normalization - void normalize() = delete; - Versor normalized() = delete; - - - static Versor unit() { return Versor (value_type(1.0), value_type(0.0), value_type(0.0), value_type(0.0)); } - - }; - - using Versorf = Versor; - using Versord = Versor; - - template - inline std::ostream& operator<< (std::ostream& stream, const Versor& v) - { - stream << "[ " << v.w() << " " << v.x() << "i " << v.y() << "j " << v.z() << "k ]"; - return stream; - } - - - - } -} - -#endif diff --git a/src/gui/dwi/render_frame.cpp b/src/gui/dwi/render_frame.cpp index 2dd6c18402..c92cd87957 100644 --- a/src/gui/dwi/render_frame.cpp +++ b/src/gui/dwi/render_frame.cpp @@ -42,7 +42,7 @@ namespace MR constexpr float AngleMax = 90.0f; - const Math::Versorf DefaultOrientation = Eigen::AngleAxisf (Math::pi_4, Eigen::Vector3f (0.0f, 0.0f, 1.0f)) * + const Eigen::Quaternionf DefaultOrientation = Eigen::AngleAxisf (Math::pi_4, Eigen::Vector3f (0.0f, 0.0f, 1.0f)) * Eigen::AngleAxisf (Math::pi/3.0f, Eigen::Vector3f (1.0f, 0.0f, 0.0f)); QFont get_font (QWidget* parent) { QFont f = parent->font(); @@ -53,7 +53,7 @@ namespace MR RenderFrame::RenderFrame (QWidget* parent) : GL::Area (parent), - view_angle (AngleDefault), distance (DistDefault), scale (NaN), + view_angle (AngleDefault), distance (DistDefault), scale (NaN), lmax_computed (0), lod_computed (0), mode (mode_t::SH), recompute_mesh (true), recompute_amplitudes (true), show_axes (true), hide_neg_values (true), color_by_dir (true), use_lighting (true), glfont (get_font (parent)), projection (this, glfont), @@ -85,7 +85,7 @@ namespace MR for (size_t j = 0; j != 3; ++j) M(i,j) = rotation(j,i); } - orientation = Math::Versorf (M); + orientation = Eigen::Quaternionf (M); update(); } @@ -156,7 +156,7 @@ namespace MR void RenderFrame::paintGL () { - gl::ColorMask (true, true, true, true); + gl::ColorMask (true, true, true, true); gl::ClearColor (lighting->background_color[0], lighting->background_color[1], lighting->background_color[2], 0.0); gl::Clear (gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT); @@ -190,7 +190,7 @@ namespace MR if (std::isfinite (values[0])) { gl::Disable (gl::BLEND); - if (!std::isfinite (scale)) + if (!std::isfinite (scale)) scale = 2.0f / values.norm(); renderer.set_mode (mode); @@ -261,7 +261,7 @@ namespace MR // otherwise we get transparent windows... #if QT_VERSION >= 0x050400 gl::ClearColor (0.0, 0.0, 0.0, 1.0); - gl::ColorMask (false, false, false, true); + gl::ColorMask (false, false, false, true); gl::Clear (gl::COLOR_BUFFER_BIT); #endif @@ -302,7 +302,7 @@ namespace MR const Eigen::Vector3f v = x.cross (z).normalized(); float angle = RotationInc * std::sqrt (float (Math::pow2 (dx) + Math::pow2 (dy))); if (angle > Math::pi_2) angle = Math::pi_2; - const Math::Versorf rot (Eigen::AngleAxisf (angle, v)); + const Eigen::Quaternionf rot (Eigen::AngleAxisf (angle, v)); orientation = rot * orientation; update(); } diff --git a/src/gui/dwi/render_frame.h b/src/gui/dwi/render_frame.h index bd098adde8..8eb0860e52 100644 --- a/src/gui/dwi/render_frame.h +++ b/src/gui/dwi/render_frame.h @@ -18,7 +18,6 @@ #include "memory.h" #include "types.h" #include "dwi/directions/set.h" -#include "math/versor.h" #include "gui/opengl/lighting.h" #include "gui/dwi/renderer.h" #include "gui/projection.h" @@ -88,14 +87,14 @@ namespace MR void reset_view (); void set_lmax (int lmax) { assert (mode == mode_t::SH); - if (lmax != lmax_computed) + if (lmax != lmax_computed) recompute_mesh = recompute_amplitudes = true; lmax_computed = lmax; update(); } void set_LOD (int lod) { assert (mode == mode_t::SH || mode == mode_t::TENSOR); - if (lod != lod_computed) + if (lod != lod_computed) recompute_mesh = recompute_amplitudes = true; lod_computed = lod; update(); @@ -142,7 +141,7 @@ namespace MR QPoint last_pos; GL::Font glfont; Projection projection; - Math::Versorf orientation; + Eigen::Quaternionf orientation; Eigen::Vector3f focus; std::string screenshot_name; diff --git a/src/gui/mrview/mode/base.cpp b/src/gui/mrview/mode/base.cpp index 466b82bdc7..4c345757ed 100644 --- a/src/gui/mrview/mode/base.cpp +++ b/src/gui/mrview/mode/base.cpp @@ -228,7 +228,7 @@ namespace MR setup_projection (adjust_projection_matrix (GL::transpose (M), axis), with_projection); } - void Base::setup_projection (const Math::Versorf& V, Projection& with_projection) const + void Base::setup_projection (const Eigen::Quaternionf& V, Projection& with_projection) const { setup_projection (adjust_projection_matrix (GL::transpose (GL::mat4 (V))), with_projection); } @@ -252,15 +252,15 @@ namespace MR - Math::Versorf Base::get_tilt_rotation () const + Eigen::Quaternionf Base::get_tilt_rotation () const { const Projection* proj = get_current_projection(); if (!proj) - return Math::Versorf(); + return Eigen::Quaternionf(); QPoint dpos = window().mouse_displacement(); if (dpos.x() == 0 && dpos.y() == 0) - return Math::Versorf(); + return Eigen::Quaternionf(); const Eigen::Vector3f x = proj->screen_to_model_direction (dpos, target()); const Eigen::Vector3f z = proj->screen_normal(); @@ -268,7 +268,7 @@ namespace MR float angle = -ROTATION_INC * std::sqrt (float (Math::pow2 (dpos.x()) + Math::pow2 (dpos.y()))); if (angle > Math::pi_2) angle = Math::pi_2; - return Math::Versorf (Eigen::AngleAxisf (angle, v)); + return Eigen::Quaternionf (Eigen::AngleAxisf (angle, v)); } @@ -276,22 +276,22 @@ namespace MR - Math::Versorf Base::get_rotate_rotation () const + Eigen::Quaternionf Base::get_rotate_rotation () const { const Projection* proj = get_current_projection(); if (!proj) - return Math::Versorf(); + return Eigen::Quaternionf(); QPoint dpos = window().mouse_displacement(); if (dpos.x() == 0 && dpos.y() == 0) - return Math::Versorf(); + return Eigen::Quaternionf(); Eigen::Vector3f x1 (window().mouse_position().x() - proj->x_position() - proj->width()/2, window().mouse_position().y() - proj->y_position() - proj->height()/2, 0.0); if (x1.norm() < 16.0f) - return Math::Versorf(); + return Eigen::Quaternionf(); Eigen::Vector3f x0 (dpos.x() - x1[0], dpos.y() - x1[1], 0.0); @@ -301,7 +301,7 @@ namespace MR const Eigen::Vector3f n = x1.cross (x0); const float angle = n[2]; Eigen::Vector3f v = (proj->screen_normal()).normalized(); - return Math::Versorf (Eigen::AngleAxisf (angle, v)); + return Eigen::Quaternionf (Eigen::AngleAxisf (angle, v)); } @@ -316,11 +316,11 @@ namespace MR if (snap_to_image()) window().set_snap_to_image (false); - const Math::Versorf rot = get_tilt_rotation(); - if (!rot) + const Eigen::Quaternionf rot = get_tilt_rotation(); + if (!rot.coeffs().allFinite()) return; - Math::Versorf orient = rot * orientation(); + Eigen::Quaternionf orient = rot * orientation(); set_orientation (orient); updateGL(); } @@ -337,11 +337,11 @@ namespace MR if (snap_to_image()) window().set_snap_to_image (false); - const Math::Versorf rot = get_rotate_rotation(); - if (!rot) + const Eigen::Quaternionf rot = get_rotate_rotation(); + if (!rot.coeffs().allFinite()) return; - Math::Versorf orient = rot * orientation(); + Eigen::Quaternionf orient = rot * orientation(); set_orientation (orient); updateGL(); } diff --git a/src/gui/mrview/mode/base.h b/src/gui/mrview/mode/base.h index ae8bdc77f8..2ec42321e1 100644 --- a/src/gui/mrview/mode/base.h +++ b/src/gui/mrview/mode/base.h @@ -15,8 +15,6 @@ #ifndef __gui_mrview_mode_base_h__ #define __gui_mrview_mode_base_h__ -#include "math/versor.h" - #include "gui/opengl/gl.h" #include "gui/opengl/transformation.h" #include "gui/projection.h" @@ -103,12 +101,12 @@ namespace MR const Eigen::Vector3f& target () const { return window().target(); } float FOV () const { return window().FOV(); } int plane () const { return window().plane(); } - Math::Versorf orientation () const { + Eigen::Quaternionf orientation () const { if (snap_to_image()) { if (image()) - return Math::Versorf (image()->header().transform().rotation().cast()); + return Eigen::Quaternionf (image()->header().transform().rotation().cast()); else - return Math::Versorf::unit(); + return Eigen::Quaternionf::Identity(); } return window().orientation(); } @@ -129,12 +127,12 @@ namespace MR void set_target (const Eigen::Vector3f& p) { window().set_target (p); } void set_FOV (float value) { window().set_FOV (value); } void set_plane (int p) { window().set_plane (p); } - void set_orientation (const Math::Versorf& V) { window().set_orientation (V); } + void set_orientation (const Eigen::Quaternionf& V) { window().set_orientation (V); } void reset_orientation () { - Math::Versorf orient (Math::Versorf::unit()); if (image()) - orient = Math::Versorf (image()->header().transform().rotation().cast()); - set_orientation (orient); + set_orientation (Eigen::Quaternionf (image()->header().transform().rotation().cast())); + else + set_orientation (Eigen::Quaternionf::Identity()); } GL::Area* glarea () const { @@ -165,15 +163,15 @@ namespace MR } void setup_projection (const int, Projection&) const; - void setup_projection (const Math::Versorf&, Projection&) const; + void setup_projection (const Eigen::Quaternionf&, Projection&) const; void setup_projection (const GL::mat4&, Projection&) const; - Math::Versorf get_tilt_rotation () const; - Math::Versorf get_rotate_rotation () const; + Eigen::Quaternionf get_tilt_rotation () const; + Eigen::Quaternionf get_rotate_rotation () const; Eigen::Vector3f voxel_at (const Eigen::Vector3f& pos) const { if (!image()) return Eigen::Vector3f { NAN, NAN, NAN }; - const Eigen::Vector3f result = image()->scanner2voxel() * pos; + const Eigen::Vector3f result = image()->scanner2voxel().cast() * pos; return result; } diff --git a/src/gui/mrview/mode/volume.h b/src/gui/mrview/mode/volume.h index 8d16a17589..b7d6a24b16 100644 --- a/src/gui/mrview/mode/volume.h +++ b/src/gui/mrview/mode/volume.h @@ -16,7 +16,6 @@ #define __gui_mrview_mode_volume_h__ #include "app.h" -#include "math/versor.h" #include "gui/mrview/mode/base.h" #include "gui/opengl/transformation.h" @@ -38,7 +37,7 @@ namespace MR public: Volume () : Base (FocusContrast | MoveTarget | TiltRotate | ShaderTransparency | ShaderThreshold | ShaderClipping), - volume_shader (*this) { + volume_shader (*this) { } virtual void paint (Projection& projection); diff --git a/src/gui/mrview/tool/connectome/edge.cpp b/src/gui/mrview/tool/connectome/edge.cpp index 068af7fd3b..6e792abf08 100644 --- a/src/gui/mrview/tool/connectome/edge.cpp +++ b/src/gui/mrview/tool/connectome/edge.cpp @@ -15,7 +15,6 @@ #include "gui/mrview/tool/connectome/edge.h" #include "math/rng.h" -#include "math/versor.h" #include "dwi/tractography/file.h" #include "dwi/tractography/properties.h" @@ -62,7 +61,7 @@ namespace MR // Now, a rotation angle const float angle = std::acos (z_axis.dot (dir)); // Convert to rotation matrix representation - const Eigen::Matrix matrix (Math::Versorf (Eigen::AngleAxisf (angle, v)).matrix()); + const Eigen::Matrix matrix (Eigen::Quaternionf (Eigen::AngleAxisf (angle, v)).matrix()); // Put into the GLfloat array rot_matrix[0] = matrix(0,0); rot_matrix[1] = matrix(0,1); rot_matrix[2] = matrix(0,2); rot_matrix[3] = matrix(1,0); rot_matrix[4] = matrix(1,1); rot_matrix[5] = matrix(1,2); diff --git a/src/gui/mrview/tool/overlay.cpp b/src/gui/mrview/tool/overlay.cpp index 15a85ca2ec..bf373058f0 100644 --- a/src/gui/mrview/tool/overlay.cpp +++ b/src/gui/mrview/tool/overlay.cpp @@ -35,14 +35,14 @@ namespace MR class Overlay::Item : public Image { MEMALIGN(Overlay::Item) public: Item (MR::Header&& H) : Image (std::move (H)) { } - Mode::Slice::Shader slice_shader; + Mode::Slice::Shader slice_shader; }; - class Overlay::Model : public ListModelBase + class Overlay::Model : public ListModelBase { MEMALIGN(Overlay::Model) public: - Model (QObject* parent) : + Model (QObject* parent) : ListModelBase (parent) { } void add_items (vector>& list); @@ -59,7 +59,7 @@ namespace MR for (size_t i = 0; i < list.size(); ++i) { Item* overlay = new Item (std::move (*list[i])); overlay->set_allowed_features (true, true, false); - if (!overlay->colourmap) + if (!overlay->colourmap) overlay->colourmap = 1; overlay->alpha = 1.0f; overlay->set_use_transparency (true); @@ -72,7 +72,7 @@ namespace MR Overlay::Overlay (Dock* parent) : - Base (parent) { + Base (parent) { VBoxLayout* main_box = new VBoxLayout (this); HBoxLayout* layout = new HBoxLayout; layout->setContentsMargins (0, 0, 0, 0); @@ -258,7 +258,7 @@ namespace MR } - void Overlay::hide_all_slot () + void Overlay::hide_all_slot () { updateGL(); } @@ -283,7 +283,7 @@ namespace MR Overlay::Item* image = dynamic_cast(image_list_model->items[i].get()); need_to_update |= !std::isfinite (image->intensity_min()); image->transparent_intensity = image->opaque_intensity = image->intensity_min(); - if (is_3D) + if (is_3D) window().get_current_mode()->overlays_for_3D.push_back (image); else image->render3D (image->slice_shader, projection, projection.depth_of (window().focus())); @@ -574,7 +574,7 @@ namespace MR - void Overlay::update_selection () + void Overlay::update_selection () { QModelIndexList indices = image_list_view->selectionModel()->selectedIndexes(); volume_label->setEnabled (false); @@ -609,7 +609,7 @@ namespace MR if (colourmap_index != int(overlay->colourmap)) { if (colourmap_index == -2) colourmap_index = overlay->colourmap; - else + else colourmap_index = -1; } rate += overlay->scaling_rate(); @@ -618,11 +618,11 @@ namespace MR num_lower_threshold += overlay->use_discard_lower(); num_upper_threshold += overlay->use_discard_upper(); opacity += overlay->alpha; - if (overlay->interpolate()) + if (overlay->interpolate()) ++num_interp; if (!std::isfinite (overlay->lessthan)) overlay->lessthan = overlay->intensity_min(); - if (!std::isfinite (overlay->greaterthan)) + if (!std::isfinite (overlay->greaterthan)) overlay->greaterthan = overlay->intensity_max(); lower_threshold_val += overlay->lessthan; upper_threshold_val += overlay->greaterthan; @@ -654,7 +654,7 @@ namespace MR interpolate_check_box->setCheckState (Qt::Unchecked); else if (num_interp == indices.size()) interpolate_check_box->setCheckState (Qt::Checked); - else + else interpolate_check_box->setCheckState (Qt::PartiallyChecked); min_value->setRate (rate); @@ -666,7 +666,7 @@ namespace MR lower_threshold_check_box->setCheckState (num_lower_threshold ? ( num_lower_threshold == indices.size() ? Qt::Checked : - Qt::PartiallyChecked ) : + Qt::PartiallyChecked ) : Qt::Unchecked); lower_threshold->setRate (rate); @@ -674,7 +674,7 @@ namespace MR upper_threshold_check_box->setCheckState (num_upper_threshold ? ( num_upper_threshold == indices.size() ? Qt::Checked : - Qt::PartiallyChecked ) : + Qt::PartiallyChecked ) : Qt::Unchecked); upper_threshold->setRate (rate); } @@ -682,8 +682,8 @@ namespace MR - void Overlay::add_commandline_options (MR::App::OptionList& options) - { + void Overlay::add_commandline_options (MR::App::OptionList& options) + { using namespace MR::App; options + OptionGroup ("Overlay tool options") @@ -700,10 +700,10 @@ namespace MR + Option ("overlay.colourmap", "Sets the colourmap of the overlay as indexed in the colourmap dropdown menu.").allow_multiple() + Argument ("index").type_integer(); - + } - bool Overlay::process_commandline_option (const MR::App::ParsedOption& opt) + bool Overlay::process_commandline_option (const MR::App::ParsedOption& opt) { if (opt.opt->is ("overlay.load")) { vector> list; @@ -742,7 +742,7 @@ namespace MR catch (Exception& e) { e.display(); } return true; } - + return false; } diff --git a/src/gui/mrview/tool/roi_editor/roi.cpp b/src/gui/mrview/tool/roi_editor/roi.cpp index 922ee06324..8289011327 100644 --- a/src/gui/mrview/tool/roi_editor/roi.cpp +++ b/src/gui/mrview/tool/roi_editor/roi.cpp @@ -17,7 +17,6 @@ #include "gui/mrview/tool/roi_editor/roi.h" #include "header.h" -#include "math/versor.h" #include "gui/cursor.h" #include "gui/projection.h" #include "gui/dialog/file.h" @@ -367,9 +366,9 @@ namespace MR int ROI::normal2axis (const Eigen::Vector3f& normal, const ROI_Item& roi) const { - float x_dot_n = std::abs ((roi.image2scanner().rotation() * Eigen::Vector3f { 1.0f, 0.0f, 0.0f }).dot (normal)); - float y_dot_n = std::abs ((roi.image2scanner().rotation() * Eigen::Vector3f { 0.0f, 1.0f, 0.0f }).dot (normal)); - float z_dot_n = std::abs ((roi.image2scanner().rotation() * Eigen::Vector3f { 0.0f, 0.0f, 1.0f }).dot (normal)); + float x_dot_n = std::abs ((roi.image2scanner().rotation().cast() * Eigen::Vector3f { 1.0f, 0.0f, 0.0f }).dot (normal)); + float y_dot_n = std::abs ((roi.image2scanner().rotation().cast() * Eigen::Vector3f { 0.0f, 1.0f, 0.0f }).dot (normal)); + float z_dot_n = std::abs ((roi.image2scanner().rotation().cast() * Eigen::Vector3f { 0.0f, 0.0f, 1.0f }).dot (normal)); if (x_dot_n > y_dot_n) return x_dot_n > z_dot_n ? 0 : 2; else @@ -503,7 +502,7 @@ namespace MR if (!proj) return; const Eigen::Vector3f current_origin = proj->screen_to_model (window().mouse_position(), window().focus()); current_axis = normal2axis (proj->screen_normal(), *roi); - current_slice = std::lround ((roi->scanner2voxel() * current_origin)[current_axis]); + current_slice = std::lround ((roi->scanner2voxel().cast() * current_origin)[current_axis]); roi->start (ROI_UndoEntry (*roi, current_axis, current_slice)); @@ -762,10 +761,10 @@ namespace MR // mouse move: Eigen::Vector3f slice_axis { 0.0, 0.0, 0.0 }; slice_axis[current_axis] = current_axis == 2 ? 1.0 : -1.0; - slice_axis = roi->image2scanner().rotation() * slice_axis; + slice_axis = roi->image2scanner().rotation().cast() * slice_axis; current_slice_loc = current_origin.dot (slice_axis); - const Math::Versorf orient (roi->image2scanner().rotation()); + const Eigen::Quaternionf orient (roi->image2scanner().rotation()); window().set_snap_to_image (false); window().set_orientation (orient); window().set_plane (current_axis); @@ -814,7 +813,7 @@ namespace MR Eigen::Vector3f pos = proj->screen_to_model (window().mouse_position(), window().focus()); Eigen::Vector3f slice_axis (0.0, 0.0, 0.0); slice_axis[current_axis] = current_axis == 2 ? 1.0 : -1.0; - slice_axis = roi->image2scanner().rotation() * slice_axis; + slice_axis = roi->image2scanner().rotation().cast() * slice_axis; float l = (current_slice_loc - pos.dot (slice_axis)) / proj->screen_normal().dot (slice_axis); window().set_focus (window().focus() + l * proj->screen_normal()); const Eigen::Vector3f pos_adj = pos + l * proj->screen_normal(); diff --git a/src/gui/mrview/tool/screen_capture.cpp b/src/gui/mrview/tool/screen_capture.cpp index bdc538e922..31f1d27c11 100644 --- a/src/gui/mrview/tool/screen_capture.cpp +++ b/src/gui/mrview/tool/screen_capture.cpp @@ -346,10 +346,10 @@ namespace MR win.captureGL (Path::join (folder, prefix + printf ("%04d.png", i))); // Rotation - Math::Versorf orientation (win.orientation()); + Eigen::Quaternionf orientation (win.orientation()); Eigen::Vector3f axis { rotation_axis_x->value(), rotation_axis_y->value(), rotation_axis_z->value() }; axis.normalize(); - const Math::Versorf rotation (Eigen::AngleAxisf (radians, axis)); + const Eigen::Quaternionf rotation (Eigen::AngleAxisf (radians, axis)); switch (rotation_type) { case RotationType::World: diff --git a/src/gui/mrview/tool/screen_capture.h b/src/gui/mrview/tool/screen_capture.h index 7277630c89..f796f983c5 100644 --- a/src/gui/mrview/tool/screen_capture.h +++ b/src/gui/mrview/tool/screen_capture.h @@ -17,8 +17,6 @@ #include -#include "math/versor.h" - #include "gui/mrview/tool/base.h" #include "gui/mrview/adjust_button.h" #include "gui/mrview/spin_box.h" @@ -87,13 +85,13 @@ namespace MR class CaptureState { MEMALIGN(CaptureState) public: - Math::Versorf orientation; + Eigen::Quaternionf orientation; Eigen::Vector3f focus, target; float fov; size_t volume, volume_axis; size_t frame_index; int plane; - CaptureState(const Math::Versorf& orientation, + CaptureState(const Eigen::Quaternionf& orientation, const Eigen::Vector3f& focus, const Eigen::Vector3f& target, float fov, size_t volume, size_t volume_axis, size_t frame_index, int plane) diff --git a/src/gui/mrview/tool/transform.cpp b/src/gui/mrview/tool/transform.cpp index 3341fd6734..8834a03d2a 100644 --- a/src/gui/mrview/tool/transform.cpp +++ b/src/gui/mrview/tool/transform.cpp @@ -151,16 +151,18 @@ namespace MR window().set_snap_to_image (false); transform_type M = window().image()->header().transform(); - +/* //M = - const Math::Versorf rot = get_tilt_rotation(); + const auto rot = window().get_current_mode()->get_tilt_rotation().cast(); if (!rot) - return; + return true; + + M = transform_type(rot); Math::Versorf orient = rot * orientation(); set_orientation (orient); window().updateGL(); - +*/ return true; } diff --git a/src/gui/mrview/tool/transform.h b/src/gui/mrview/tool/transform.h index 9404b69bb1..8abe0360d2 100644 --- a/src/gui/mrview/tool/transform.h +++ b/src/gui/mrview/tool/transform.h @@ -45,7 +45,7 @@ namespace MR protected: virtual void showEvent (QShowEvent* event) override; virtual void closeEvent (QCloseEvent* event) override; - + private slots: void onActivate (bool); diff --git a/src/gui/mrview/tool/view.cpp b/src/gui/mrview/tool/view.cpp index eebf98f7ca..ac91bba81c 100644 --- a/src/gui/mrview/tool/view.cpp +++ b/src/gui/mrview/tool/view.cpp @@ -1083,14 +1083,14 @@ namespace MR } - void View::rotate_clip_planes (vector& clip, const Math::Versorf& rot) + void View::rotate_clip_planes (vector& clip, const Eigen::Quaternionf& rot) { const auto& focus (window().focus()); for (size_t n = 0; n < clip.size(); ++n) { GL::vec4& p (*clip[n]); float distance_to_focus = p[0]*focus[0] + p[1]*focus[1] + p[2]*focus[2] - p[3]; - const Math::Versorf norm (0.0f, p[0], p[1], p[2]); - const Math::Versorf rotated = norm * rot; + const Eigen::Quaternionf norm (0.0f, p[0], p[1], p[2]); + const Eigen::Quaternionf rotated = norm * rot; p[0] = rotated.x(); p[1] = rotated.y(); p[2] = rotated.z(); @@ -1149,8 +1149,8 @@ namespace MR { vector clip = get_clip_planes_to_be_edited(); if (clip.size()) { - const Math::Versorf rot = window().get_current_mode()->get_tilt_rotation(); - if (!rot) + const Eigen::Quaternionf rot = window().get_current_mode()->get_tilt_rotation(); + if (!rot.coeffs().allFinite()) return true; rotate_clip_planes (clip, rot); } @@ -1163,8 +1163,8 @@ namespace MR { vector clip = get_clip_planes_to_be_edited(); if (clip.size()) { - const Math::Versorf rot = window().get_current_mode()->get_rotate_rotation(); - if (!rot) + const Eigen::Quaternionf rot = window().get_current_mode()->get_rotate_rotation(); + if (!rot.coeffs().allFinite()) return true; rotate_clip_planes (clip, rot); } diff --git a/src/gui/mrview/tool/view.h b/src/gui/mrview/tool/view.h index 25b9d001ac..4b71227a1a 100644 --- a/src/gui/mrview/tool/view.h +++ b/src/gui/mrview/tool/view.h @@ -144,7 +144,7 @@ namespace MR void set_transparency_from_image (); void move_clip_planes_in_out (vector& clip, float distance); - void rotate_clip_planes (vector& clip, const Math::Versorf& rot); + void rotate_clip_planes (vector& clip, const Eigen::Quaternionf& rot); }; } diff --git a/src/gui/mrview/window.cpp b/src/gui/mrview/window.cpp index 124bdb8b3c..d4d343e26b 100644 --- a/src/gui/mrview/window.cpp +++ b/src/gui/mrview/window.cpp @@ -228,7 +228,7 @@ namespace MR mouse_action (NoAction), focal_point { NAN, NAN, NAN }, camera_target { NAN, NAN, NAN }, - orient (), + orient (NaN, NaN, NaN, NaN), field_of_view (100.0), anatomical_plane (2), colourbar_position (ColourMap::Position::BottomRight), diff --git a/src/gui/mrview/window.h b/src/gui/mrview/window.h index fd1bc72e8b..803a50ebce 100644 --- a/src/gui/mrview/window.h +++ b/src/gui/mrview/window.h @@ -17,7 +17,6 @@ #include "image.h" #include "memory.h" -#include "math/versor.h" #include "gui/cursor.h" #include "gui/gui.h" #include "gui/mrview/gui_image.h" @@ -112,7 +111,7 @@ namespace MR const Eigen::Vector3f& target () const { return camera_target; } float FOV () const { return field_of_view; } int plane () const { return anatomical_plane; } - const Math::Versorf& orientation () const { return orient; } + const Eigen::Quaternionf& orientation () const { return orient; } bool snap_to_image () const { return snap_to_image_axes_and_voxel; } Image* image () { return static_cast (image_group->checkedAction()); } @@ -120,7 +119,7 @@ namespace MR void set_target (const Eigen::Vector3f& p) { camera_target = p; emit targetChanged(); } void set_FOV (float value) { field_of_view = value; emit fieldOfViewChanged(); } void set_plane (int p) { anatomical_plane = p; emit planeChanged(); } - void set_orientation (const Math::Versorf& V) { orient = V; emit orientationChanged(); } + void set_orientation (const Eigen::Quaternionf& V) { orient = V; emit orientationChanged(); } void set_scaling (float min, float max) { if (!image()) return; image()->set_windowing (min, max); } void set_snap_to_image (bool onoff) { snap_to_image_axes_and_voxel = onoff; snap_to_image_action->setChecked(onoff); emit focusChanged(); } @@ -247,7 +246,7 @@ namespace MR MouseAction mouse_action; Eigen::Vector3f focal_point, camera_target; - Math::Versorf orient; + Eigen::Quaternionf orient; float field_of_view; int anatomical_plane, annotations; ColourMap::Position colourbar_position, tools_colourbar_position; diff --git a/src/gui/opengl/transformation.h b/src/gui/opengl/transformation.h index a4742012dd..d0afe5744b 100644 --- a/src/gui/opengl/transformation.h +++ b/src/gui/opengl/transformation.h @@ -18,8 +18,6 @@ #include #include "math/least_squares.h" -#include "math/versor.h" - #include "gui/opengl/gl.h" namespace MR @@ -36,7 +34,7 @@ namespace MR public: vec4 () { } vec4 (float x, float y, float z, float w) { v[0] = x; v[1] = y; v[2] = z; v[3] = w; } - vec4 (const Math::Versorf& V) { v[0] = V.x(); v[1] = V.y(); v[2] = V.z(); v[3] = V.w(); } + vec4 (const Eigen::Quaternionf& V) { v[0] = V.x(); v[1] = V.y(); v[2] = V.z(); v[3] = V.w(); } template vec4 (const Cont& p, float w) { v[0] = p[0]; v[1] = p[1]; v[2] = p[2]; v[3] = w; } vec4 (const float* p) { memcpy (v, p, sizeof(v)); } @@ -49,7 +47,7 @@ namespace MR operator GLfloat* () { return v; } friend std::ostream& operator<< (std::ostream& stream, const vec4& v) { - for (size_t i = 0; i < 4; ++i) + for (size_t i = 0; i < 4; ++i) stream << v[i] << " "; return stream; } @@ -64,12 +62,12 @@ namespace MR class mat4 { MEMALIGN(mat4) public: - mat4 () { } + mat4 () { } mat4 (const mat4& a) { memcpy (m, a.m, sizeof(m)); } mat4 (const float* p) { memcpy (m, p, sizeof(m)); } - mat4 (const Math::Versorf& v) + mat4 (const Eigen::Quaternionf& v) { - const Math::Versorf::Matrix3 R = v.matrix(); + const auto R = v.matrix(); zero(); for (size_t i = 0; i != 3; ++i) { for (size_t j = 0; j != 3; ++j) @@ -116,8 +114,8 @@ namespace MR vec4 operator* (const vec4& v) const { vec4 r; r.zero(); - for (size_t j = 0; j < 4; ++j) - for (size_t i = 0; i < 4; ++i) + for (size_t j = 0; j < 4; ++j) + for (size_t i = 0; i < 4; ++i) r[i] += (*this)(i,j) * v[j]; return r; } @@ -129,7 +127,7 @@ namespace MR friend std::ostream& operator<< (std::ostream& stream, const mat4& m) { for (size_t i = 0; i < 4; ++i) { - for (size_t j = 0; j < 4; ++j) + for (size_t j = 0; j < 4; ++j) stream << m(i,j) << " "; stream << "\n"; } @@ -153,7 +151,7 @@ namespace MR - inline mat4 transpose (const mat4& a) + inline mat4 transpose (const mat4& a) { mat4 b; for (size_t j = 0; j < 4; ++j) @@ -166,7 +164,7 @@ namespace MR - inline mat4 inv (const mat4& a) + inline mat4 inv (const mat4& a) { Eigen::Matrix A; for (size_t i = 0; i != 4; ++i) { @@ -178,7 +176,7 @@ namespace MR - inline mat4 ortho (float L, float R, float B, float T, float N, float F) + inline mat4 ortho (float L, float R, float B, float T, float N, float F) { mat4 m; m.zero(); @@ -196,7 +194,7 @@ namespace MR - inline mat4 frustum (float L, float R, float B, float T, float N, float F) + inline mat4 frustum (float L, float R, float B, float T, float N, float F) { mat4 m; m.zero(); @@ -213,7 +211,7 @@ namespace MR } - inline mat4 translate (float x, float y, float z) + inline mat4 translate (float x, float y, float z) { mat4 m = identity(); m(0,3) = x; @@ -230,7 +228,7 @@ namespace MR } - inline mat4 scale (float x, float y, float z) + inline mat4 scale (float x, float y, float z) { mat4 m; m.zero(); From d84af72dae8795b385da7a5d1100e482da64f672 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 12 Dec 2017 18:52:57 +1100 Subject: [PATCH 0088/1471] Stats: Various fixes - More fixes to stats commands to reflect chosen ordering of matrix data for different variables. Generally data for a single contrast will be stored contiguously in columns; otherwise, data for each individual element (e.g. regression coefficients) will be stored contiguously in columns. - Move Math::Stats::statistic2pvalue() to new files core/math/stats/fwe.h / .cpp, and rename to Math::Stats::fwe_pvalue(). - fixelcfestats: Allow subject filename paths to be defined relative to the input fixel directory, relative to the working directory, or as absolute paths. - Change matrix_type Contrast::operator() to Contrast::matrix() to keep GCC happy. - Use model partitioning approach described in Winkler et al., NeuroImage 2014, Appendix A. This results in a sinple scalar per element in the case of t-tests, and vector per element in the case of multi-line F-tests, which simplifies some of the GLM::Test* code. - GLM::TestVariable: Pre-define many variables to avoid memory reallocation. --- cmd/connectomestats.cpp | 8 +-- cmd/fixelcfestats.cpp | 39 +++++++++-- cmd/mrclusterstats.cpp | 4 +- cmd/vectorstats.cpp | 7 +- core/math/stats/fwe.cpp | 63 +++++++++++++++++ core/math/stats/fwe.h | 38 ++++++++++ core/math/stats/glm.cpp | 136 +++++++++++++++--------------------- core/math/stats/glm.h | 16 ++--- core/math/stats/import.h | 2 + core/math/stats/shuffle.cpp | 35 ---------- core/math/stats/shuffle.h | 7 -- 11 files changed, 207 insertions(+), 148 deletions(-) create mode 100644 core/math/stats/fwe.cpp create mode 100644 core/math/stats/fwe.h diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 18d86e4e6b..e39b2e79d8 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -18,6 +18,7 @@ #include "progressbar.h" #include "file/path.h" +#include "math/stats/fwe.h" #include "math/stats/glm.h" #include "math/stats/import.h" #include "math/stats/shuffle.h" @@ -296,8 +297,8 @@ void run() } // Precompute default statistic and enhanced statistic - matrix_type tvalue_output (num_contrasts, num_edges); - matrix_type enhanced_output (num_contrasts, num_edges); + matrix_type tvalue_output (num_edges, num_contrasts); + matrix_type enhanced_output (num_edges, num_contrasts); Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_statistic, enhanced_output, tvalue_output); @@ -317,8 +318,7 @@ void run() for (size_t i = 0; i != num_contrasts; ++i) save_vector (null_distribution.col(i), output_prefix + "_null_dist" + postfix(i) + ".txt"); - matrix_type pvalue_output (num_contrasts, num_edges); - Math::Stats::statistic2pvalue (null_distribution, enhanced_output, pvalue_output); + const matrix_type pvalue_output = MR::Math::Stats::fwe_pvalue (null_distribution, enhanced_output); for (size_t i = 0; i != num_contrasts; ++i) { save_matrix (mat2vec.V2M (pvalue_output.col(i)), output_prefix + "_fwe_pvalue" + postfix(i) + ".csv"); save_matrix (mat2vec.V2M (uncorrected_pvalues.col(i)), output_prefix + "_uncorrected_pvalue" + postfix(i) + ".csv"); diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index c8a57f6fbf..ac5cd3306e 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -21,6 +21,7 @@ #include "fixel/helpers.h" #include "fixel/keys.h" #include "fixel/loop.h" +#include "math/stats/fwe.h" #include "math/stats/glm.h" #include "math/stats/import.h" #include "math/stats/shuffle.h" @@ -142,7 +143,7 @@ class SubjectFixelImport : public SubjectDataImportBase public: SubjectFixelImport (const std::string& path) : SubjectDataImportBase (path), - H (Header::open (path)), + H (Header::open (find_image (path))), data (H.get_image()) { for (size_t axis = 1; axis < data.ndim(); ++axis) { @@ -155,7 +156,10 @@ class SubjectFixelImport : public SubjectDataImportBase { assert (row.size() == size()); Image temp (data); // For thread-safety - row = temp.row(0); + // Doesn't work + //row = temp.row(0); + for (temp.index(0) = 0; temp.index(0) != temp.size(0); ++temp.index(0)) + row (temp.index(0)) = temp.value(); } default_type operator[] (const size_t index) const override @@ -170,12 +174,35 @@ class SubjectFixelImport : public SubjectDataImportBase const Header& header() const { return H; } + + static void set_fixel_directory (const std::string& s) { fixel_directory = s; } + + private: Header H; const Image data; + // Enable input image paths to be either absolute, relative to CWD, or + // relative to input fixel template directory + std::string find_image (const std::string& path) const + { + const std::string cat_path = Path::join (fixel_directory, path); + if (Path::is_file (cat_path)) + return cat_path; + if (Path::is_file (path)) + return path; + throw Exception ("Unable to find subject image \"" + path + + "\" either in input fixel diretory \"" + fixel_directory + + "\" or in current working directory"); + return ""; + } + + static std::string fixel_directory; + }; +std::string SubjectFixelImport::fixel_directory; + void run() @@ -192,6 +219,7 @@ void run() const std::string input_fixel_directory = argument[0]; + SubjectFixelImport::set_fixel_directory (input_fixel_directory); Header index_header = Fixel::find_index_header (input_fixel_directory); auto index_image = index_header.get_image(); @@ -441,8 +469,8 @@ void run() } // Precompute default statistic and CFE statistic - matrix_type cfe_output (num_contrasts, num_fixels); - matrix_type tvalue_output (num_contrasts, num_fixels); + matrix_type cfe_output (num_fixels, num_contrasts); + matrix_type tvalue_output (num_fixels, num_contrasts); Stats::PermTest::precompute_default_permutation (glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, tvalue_output); @@ -465,8 +493,7 @@ void run() ++progress; } - matrix_type pvalue_output (num_contrasts, num_fixels); - Math::Stats::statistic2pvalue (perm_distribution, cfe_output, pvalue_output); + const matrix_type pvalue_output = MR::Math::Stats::fwe_pvalue (perm_distribution, cfe_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue" + postfix(i) + ".mif"), pvalue_output.col(i), output_header); diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 300f074792..1cdd6d76e6 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -18,6 +18,7 @@ #include "algo/loop.h" #include "file/path.h" +#include "math/stats/fwe.h" #include "math/stats/glm.h" #include "math/stats/import.h" #include "math/stats/shuffle.h" @@ -332,8 +333,7 @@ void run() { write_output (uncorrected_pvalue.col(i), v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); ++progress; } - matrix_type fwe_pvalue_output (num_voxels, num_contrasts); - Math::Stats::statistic2pvalue (perm_distribution, default_cluster_output, fwe_pvalue_output); + const matrix_type fwe_pvalue_output = MR::Math::Stats::fwe_pvalue (perm_distribution, default_cluster_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { write_output (fwe_pvalue_output.col(i), v2v, prefix + "fwe_pvalue" + postfix(i) + ".mif", output_header); diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index b16d5140b5..3ec5402bbb 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -18,6 +18,7 @@ #include "progressbar.h" #include "file/path.h" +#include "math/stats/fwe.h" #include "math/stats/glm.h" #include "math/stats/import.h" #include "math/stats/shuffle.h" @@ -205,6 +206,7 @@ void run() // Precompute default statistic // Don't use convenience function: No enhancer! // Manually construct default shuffling matrix + // TODO Change to use convenience function; we make an empty enhancer later anyway const matrix_type default_shuffle (matrix_type::Identity (num_subjects, num_subjects)); matrix_type default_tvalues; (*glm_test) (default_shuffle, default_tvalues); @@ -221,10 +223,9 @@ void run() Stats::PermTest::run_permutations (glm_test, enhancer, empirical_distribution, default_tvalues, null_distribution, uncorrected_pvalues); - matrix_type default_pvalues (num_elements, num_contrasts); - Math::Stats::statistic2pvalue (null_distribution, default_tvalues, default_pvalues); + const matrix_type fwe_pvalues = MR::Math::Stats::fwe_pvalue (null_distribution, default_tvalues); for (size_t i = 0; i != num_contrasts; ++i) { - save_vector (default_pvalues.col(i), output_prefix + "fwe_pvalue" + postfix(i) + ".csv"); + save_vector (fwe_pvalues.col(i), output_prefix + "fwe_pvalue" + postfix(i) + ".csv"); save_vector (uncorrected_pvalues.col(i), output_prefix + "uncorrected_pvalue" + postfix(i) + ".csv"); } diff --git a/core/math/stats/fwe.cpp b/core/math/stats/fwe.cpp new file mode 100644 index 0000000000..35fb0cf4df --- /dev/null +++ b/core/math/stats/fwe.cpp @@ -0,0 +1,63 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "math/stats/fwe.h" + +#include +#include + +namespace MR +{ + namespace Math + { + namespace Stats + { + + + + // FIXME Jump based on non-initialised value in the sort + // Pre-fill the null distribution / stats matrices with NaNs, detect when it's not overwritten + matrix_type fwe_pvalue (const matrix_type& null_distributions, const matrix_type& statistics) + { + matrix_type pvalues (statistics.rows(), statistics.cols()); + for (ssize_t contrast = 0; contrast != statistics.cols(); ++contrast) { + vector sorted_null_dist; + sorted_null_dist.reserve (null_distributions.rows()); + for (ssize_t perm = 0; perm != null_distributions.rows(); ++perm) + sorted_null_dist.push_back (null_distributions (perm, contrast)); + std::sort (sorted_null_dist.begin(), sorted_null_dist.end()); + for (ssize_t element = 0; element != statistics.rows(); ++element) { + if (statistics (element, contrast) > 0.0) { + value_type pvalue = 1.0; + for (size_t j = 0; j < size_t(sorted_null_dist.size()); ++j) { + if (statistics(element, contrast) < sorted_null_dist[j]) { + pvalue = value_type(j) / value_type(sorted_null_dist.size()); + break; + } + } + pvalues(element, contrast) = pvalue; + } else { + pvalues(element, contrast) = 0.0; + } + } + } + return pvalues; + } + + + + + } + } +} diff --git a/core/math/stats/fwe.h b/core/math/stats/fwe.h new file mode 100644 index 0000000000..0b3bdbda29 --- /dev/null +++ b/core/math/stats/fwe.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#ifndef __math_stats_fwe_h__ +#define __math_stats_fwe_h__ + +#include "math/stats/typedefs.h" + + +namespace MR +{ + namespace Math + { + namespace Stats + { + + + + matrix_type fwe_pvalue (const matrix_type& null_dist, const matrix_type& stats); + + + + } + } +} + +#endif diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index f35bf92aa5..495f4d6f44 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -77,7 +77,7 @@ namespace MR if (opt.size()) { const matrix_type ftest_matrix = load_matrix (opt[0][0]); if (ftest_matrix.cols() != contrast_matrix.rows()) - throw Exception ("Number of columns in F-test matrix (" + str(ftest_matrix.rows()) + ") does not match number of rows in contrast matrix (" + str(contrast_matrix.rows()) + ")"); + throw Exception ("Number of columns in F-test matrix (" + str(ftest_matrix.cols()) + ") does not match number of rows in contrast matrix (" + str(contrast_matrix.rows()) + ")"); if (!((ftest_matrix.array() == 0.0) + (ftest_matrix.array() == 1.0)).all()) throw Exception ("F-test array must contain ones and zeros only"); for (ssize_t ftest_index = 0; ftest_index != ftest_matrix.rows(); ++ftest_index) { @@ -120,7 +120,7 @@ namespace MR if (contrast.is_F()) return vector_type::Constant (measurements.rows(), NaN); else - return matrix_type(contrast) * solve_betas (measurements, design); + return contrast.matrix() * solve_betas (measurements, design); } matrix_type abs_effect_size (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) @@ -184,7 +184,7 @@ namespace MR if (contrasts[ic].is_F()) { abs_effect_size.col (ic).fill (NaN); } else { - abs_effect_size.col (ic) = (matrix_type (contrasts[ic]) * betas).row (0); + abs_effect_size.col (ic) = (contrasts[ic].matrix() * betas).row (0); } } #ifdef GLM_ALL_STATS_DEBUG @@ -307,26 +307,21 @@ namespace MR - Contrast::Partition Contrast::operator() (const matrix_type& design) const + + + + + // Same model partitioning as is used in FSL randomise + Contrast::Partition Contrast::partition (const matrix_type& design) const { - // For now, let's do the most basic partitioning possible: - // Split design matrix column-wise depending on whether entries in the contrast matrix are all zero - // TODO Later, may include config variables / compiler flags to change model partitioning technique - matrix_type X, Z; - //std::cerr << "Design:\n" << design << "\nContrast: " << c << "\n"; - const size_t nonzero_column_count = c.colwise().any().count(); - //VAR (nonzero_column_count); - X.resize (design.rows(), nonzero_column_count); - Z.resize (design.rows(), design.cols() - nonzero_column_count); - ssize_t ix = 0, iz = 0; - for (ssize_t ic = 0; ic != c.cols(); ++ic) { - if (c.col (ic).any()) - X.col (ix++) = design.col (ic); - else - Z.col (iz++) = design.col (ic); - } - //std::cerr << X << "\n"; - //std::cerr << Z << "\n"; + const matrix_type D = Math::pinv (design.transpose() * design); + // Note: Cu is transposed with respect to how contrast matrices are stored elsewhere + const matrix_type Cu = Eigen::FullPivLU (c).kernel(); + const auto inv_cDc = Math::pinv (c * D * c.transpose()); + // Note: Cv is transposed with respect to convention just as Cu is + const matrix_type Cv = Cu - c.transpose() * inv_cDc * c * D * Cu; + const matrix_type X = design * D * c.transpose() * inv_cDc; + const matrix_type Z = design * D * Cv * Math::pinv (Cv.transpose() * D * Cv); return Partition (X, Z); } @@ -336,7 +331,7 @@ namespace MR { // FullPivLU.image() provides column-space of matrix; // here we want the row-space (since it's degeneracy in contrast matrix rows - // that has led to the rank-deficiency, whereas we can't exclude factors). + // that has led to the rank-deficiency, whereas we can't exclude factor columns). // Hence the transposing. Eigen::FullPivLU decomp (in.transpose()); if (decomp.rank() == in.rows()) @@ -355,6 +350,10 @@ namespace MR + + + + TestFixed::TestFixed (const matrix_type& measurements, const matrix_type& design, const vector& contrasts) : TestBase (measurements, design, contrasts), pinvM (Math::pinv (M)), @@ -363,7 +362,7 @@ namespace MR assert (contrasts[0].cols() == design.cols()); // When the design matrix is fixed, we can pre-calculate the model partitioning for each contrast for (const auto c : contrasts) - partitions.emplace_back (c (design)); + partitions.emplace_back (c.partition (design)); } @@ -374,36 +373,31 @@ namespace MR if (!(size_t(output.rows()) == num_elements() && size_t(output.cols()) == num_outputs())) output.resize (num_elements(), num_outputs()); - matrix_type PRz, Sy, beta, c_lambda, XtX; + matrix_type Sy, lambdas, XtX, beta; vector_type sse; - // Implement Freedman-Lane for fixed design matrix case + // Freedman-Lane for fixed design matrix case // Each contrast needs to be handled explicitly on its own - // TESTME Need to see how an F-test goes here - // This may have an interaction with the model partitioning approach for (size_t ic = 0; ic != c.size(); ++ic) { // First, we perform permutation of the input data // In Freedman-Lane, the initial 'effective' regression against the nuisance // variables, and permutation of the data, are done in a single step - //VAR (perm_matrix.rows()); - //VAR (perm_matrix.cols()); + //VAR (shuffling_matrix.rows()); + //VAR (shuffling_matrix.cols()); //VAR (partitions[ic].Rz.rows()); //VAR (partitions[ic].Rz.cols()); //VAR (y.rows()); //VAR (y.cols()); - PRz.noalias() = shuffling_matrix * partitions[ic].Rz; - //VAR (PRz.rows()); - //VAR (PRz.cols()); - Sy.noalias() = PRz * y; + Sy.noalias() = shuffling_matrix * partitions[ic].Rz * y; //VAR (Sy.rows()); //VAR (Sy.cols()); // Now, we regress this shuffled data against the full model //VAR (pinvM.rows()); //VAR (pinvM.cols()); - beta.noalias() = pinvM * Sy; - //VAR (beta.rows()); - //VAR (beta.cols()); + lambdas.noalias() = pinvM * Sy; + //VAR (lambda.rows()); + //VAR (lambda.cols()); //VAR (matrix_type(c[ic]).rows()); //VAR (matrix_type(c[ic]).cols()); //VAR (Rm.rows()); @@ -414,18 +408,11 @@ namespace MR const default_type one_over_dof = 1.0 / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z); sse = (Rm*Sy).colwise().squaredNorm(); //VAR (sse.size()); - // FIXME This should be giving a vector, not a matrix - //auto temp4 = c_lambda.transpose() * (XtX * c_lambda); - //VAR (temp4.rows()); - //VAR (temp4.cols()); - //std::cerr << temp4 << "\n"; for (ssize_t ie = 0; ie != num_elements(); ++ie) { - c_lambda.noalias() = matrix_type(c[ic]) * beta.col (ie); - //VAR (c_lambda.rows()); - //VAR (c_lambda.cols()); - //VAR (partitions[ic].X.rows()); - //VAR (partitions[ic].X.cols()); - const auto numerator = (c_lambda.transpose() * XtX * c_lambda) / c[ic].rank(); + beta.noalias() = c[ic].matrix() * lambdas.col (ie); + //VAR (beta.rows()); + //VAR (beta.cols()); + const auto numerator = (beta.transpose() * XtX * beta) / c[ic].rank(); assert (numerator.rows() == 1); assert (numerator.cols() == 1); const value_type F = numerator (0, 0) / (one_over_dof * sse[ie]); @@ -434,8 +421,8 @@ namespace MR } else if (c[ic].is_F()) { output (ie, ic) = F; } else { - assert (c_lambda.rows() == 1); - output (ie, ic) = std::sqrt (F) * (c_lambda.row(0).sum() > 0.0 ? 1.0 : -1.0); + assert (beta.rows() == 1); + output (ie, ic) = std::sqrt (F) * (beta.sum() > 0.0 ? 1.0 : -1.0); } } @@ -475,6 +462,12 @@ namespace MR if (!(size_t(output.rows()) == num_elements() && size_t(output.cols()) == num_outputs())) output.resize (num_elements(), num_outputs()); + matrix_type extra_data (num_subjects(), importers.size()); + BitSet element_mask (num_subjects()), perm_matrix_mask (num_subjects()); + matrix_type perm_matrix_masked, Mfull_masked, pinvMfull_masked, Rm; + vector_type y_masked, Sy, lambda; + matrix_type XtX, beta; + // Let's loop over elements first, then contrasts in the inner loop for (ssize_t ie = 0; ie != y.cols(); ++ie) { @@ -484,7 +477,6 @@ namespace MR // rather than re-generating them each time? (More RAM, less CPU) // No, most of the time that subject data will be memory-mapped, so pre-loading (in // addition to the duplication of the fixed design matrix contents) would hurt bad - matrix_type extra_data (num_subjects(), importers.size()); for (ssize_t col = 0; col != ssize_t(importers.size()); ++col) extra_data.col (col) = importers[col] (ie); @@ -503,7 +495,7 @@ namespace MR // No, don't think it's removal of columns; think it's removal of any rows // that contain non-zero values in those columns // - BitSet element_mask (M.rows(), true); + element_mask.clear (true); if (nans_in_data) { for (ssize_t row = 0; row != y.rows(); ++row) { if (!std::isfinite (y (row, ie))) @@ -520,9 +512,6 @@ namespace MR // Do we need to reduce the size of our matrices / vectors // based on the presence of non-finite values? - matrix_type Mfull_masked; - matrix_type perm_matrix_masked; - vector_type y_masked; if (finite_count == num_subjects()) { Mfull_masked.resize (num_subjects(), num_factors()); @@ -535,7 +524,7 @@ namespace MR Mfull_masked.resize (finite_count, num_factors()); y_masked.resize (finite_count); - BitSet perm_matrix_mask (num_subjects(), true); + perm_matrix_mask.clear (true); ssize_t out_index = 0; for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { if (element_mask[in_index]) { @@ -566,28 +555,26 @@ namespace MR } assert (Mfull_masked.allFinite()); - const matrix_type pinvMfull_masked = Math::pinv (Mfull_masked); + pinvMfull_masked = Math::pinv (Mfull_masked); - const matrix_type Rm = matrix_type::Identity (finite_count, finite_count) - (Mfull_masked*pinvMfull_masked); - - matrix_type beta, c_lambda; + Rm.noalias() = matrix_type::Identity (finite_count, finite_count) - (Mfull_masked*pinvMfull_masked); // We now have our permutation (shuffling) matrix and design matrix prepared, // and can commence regressing the partitioned model of each contrast for (size_t ic = 0; ic != c.size(); ++ic) { - const auto partition = c[ic] (Mfull_masked); - const matrix_type XtX = partition.X.transpose()*partition.X; + const auto partition = c[ic].partition (Mfull_masked); + XtX.noalias() = partition.X.transpose()*partition.X; // Now that we have the individual contrast model partition for these data, // the rest of this function should proceed similarly to the fixed // design matrix case - const matrix_type Sy = perm_matrix_masked * partition.Rz * y_masked.matrix(); - beta.noalias() = pinvMfull_masked * Sy; - c_lambda.noalias() = matrix_type(c[ic]) * beta; - const default_type sse = (Rm*Sy).squaredNorm(); + Sy = perm_matrix_masked * partition.Rz * y_masked.matrix(); + lambda = pinvMfull_masked * Sy.matrix(); + beta.noalias() = c[ic].matrix() * lambda.matrix(); + const default_type sse = (Rm*Sy.matrix()).squaredNorm(); - const default_type F = ((c_lambda.transpose() * XtX * c_lambda) / c[ic].rank()) (0, 0) / + const default_type F = ((beta.transpose() * XtX * beta) (0, 0) / c[ic].rank()) / (sse / value_type (finite_count - partition.rank_x - partition.rank_z)); if (!std::isfinite (F)) { @@ -595,8 +582,8 @@ namespace MR } else if (c[ic].is_F()) { output (ie, ic) = F; } else { - assert (c_lambda.rows() == 1); - output (ie, ic) = std::sqrt (F) * (c_lambda.sum() > 0 ? 1.0 : -1.0); + assert (beta.rows() == 1); + output (ie, ic) = std::sqrt (F) * (beta.sum() > 0 ? 1.0 : -1.0); } } // End looping over contrasts @@ -606,17 +593,6 @@ namespace MR - /*matrix_type TestVariable::default_design (const size_t index) const - { - matrix_type output (M.rows(), M.cols() + importers.size()); - output.block (0, 0, M.rows(), M.cols()) = M; - for (size_t i = 0; i != importers.size(); ++i) - output.col (M.cols() + i) = importers[i] (index); - return output; - }*/ - - - } } } diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index d23a4fa179..dcc07d3f43 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -22,6 +22,8 @@ #include "math/stats/import.h" #include "math/stats/typedefs.h" +#include "misc/bitset.h" + namespace MR { namespace Math @@ -69,12 +71,11 @@ namespace MR // X = Component of design matrix related to effect of interest // Z = Component of design matrix related to nuisance regressors const matrix_type X, Z; - // We would also like to automatically calculate, on creation of a partition: // Hz: Projection matrix of nuisance regressors only // Rz: Residual-forming matrix due to nuisance regressors only + const matrix_type Hz, Rz; // rank_x: Rank of X // rank_z: Rank of Z - const matrix_type Hz, Rz; const size_t rank_x, rank_z; }; @@ -90,9 +91,9 @@ namespace MR F (true), i (index) { } - Partition operator() (const matrix_type&) const; + Partition partition (const matrix_type&) const; - operator const matrix_type& () const { return c; } + const matrix_type& matrix() const { return c; } ssize_t cols() const { return c.cols(); } size_t rank() const { return r; } bool is_F() const { return F; } @@ -294,13 +295,6 @@ namespace MR */ void operator() (const matrix_type& shuffling_matrix, matrix_type& output) const override; - /*! Acquire the design matrix for the default permutation - * (note that this needs to be re-run for each element being tested) - * @param index the index of the element for which the design matrix is requested - * @return the design matrix for that element, including imported data for extra columns - */ - //matrix_type default_design (const size_t index) const; - size_t num_factors() const override { return M.cols() + importers.size(); } protected: diff --git a/core/math/stats/import.h b/core/math/stats/import.h index 4286122489..09306849cc 100644 --- a/core/math/stats/import.h +++ b/core/math/stats/import.h @@ -123,6 +123,8 @@ namespace MR ProgressBar progress ("Importing data from files listed in \"" + Path::basename (path) + "\""); const std::string directory = Path::dirname (path); std::ifstream ifs (path.c_str()); + if (!ifs) + throw Exception ("Unable to open subject file list \"" + path + "\""); std::string line; while (getline (ifs, line)) { size_t p = line.find_last_not_of(" \t"); diff --git a/core/math/stats/shuffle.cpp b/core/math/stats/shuffle.cpp index b247b11cc3..ce55e69a0c 100644 --- a/core/math/stats/shuffle.cpp +++ b/core/math/stats/shuffle.cpp @@ -300,41 +300,6 @@ namespace MR - - - - - - - void statistic2pvalue (const matrix_type& null_dist, const matrix_type& stats, matrix_type& pvalues) - { - pvalues.resize (stats.rows(), stats.cols()); - for (ssize_t contrast = 0; contrast != stats.cols(); ++contrast) { - vector sorted_null_dist; - sorted_null_dist.reserve (null_dist.rows()); - for (ssize_t perm = 0; perm != null_dist.rows(); ++perm) - sorted_null_dist.push_back (null_dist (perm, contrast)); - std::sort (sorted_null_dist.begin(), sorted_null_dist.end()); - for (ssize_t element = 0; element != stats.rows(); ++element) { - if (stats (element, contrast) > 0.0) { - value_type pvalue = 1.0; - for (size_t j = 0; j < size_t(sorted_null_dist.size()); ++j) { - if (stats(element, contrast) < sorted_null_dist[j]) { - pvalue = value_type(j) / value_type(sorted_null_dist.size()); - break; - } - } - pvalues(element, contrast) = pvalue; - } else { - pvalues(element, contrast) = 0.0; - } - } - } - } - - - - } } } diff --git a/core/math/stats/shuffle.h b/core/math/stats/shuffle.h index b7190ae0f2..6af6eee2d0 100644 --- a/core/math/stats/shuffle.h +++ b/core/math/stats/shuffle.h @@ -107,13 +107,6 @@ namespace MR - // TODO Some of these should be vector_type's? - // - No, represent multiple contrasts - // TODO Should live elsewhere? - void statistic2pvalue (const matrix_type& null_dist, const matrix_type& stats, matrix_type& pvalues); - - - } } } From 25a3ab32b7e677235e1c3b24f97217681393b137 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Tue, 12 Dec 2017 23:19:15 +0000 Subject: [PATCH 0089/1471] MRView transform tool: aimplify interface and get all operations working --- src/gui/mrview/mode/base.cpp | 4 +- src/gui/mrview/tool/transform.cpp | 67 +++++++++++++++---------------- src/gui/mrview/tool/transform.h | 8 +--- 3 files changed, 36 insertions(+), 43 deletions(-) diff --git a/src/gui/mrview/mode/base.cpp b/src/gui/mrview/mode/base.cpp index 4c345757ed..c8d502a448 100644 --- a/src/gui/mrview/mode/base.cpp +++ b/src/gui/mrview/mode/base.cpp @@ -198,8 +198,8 @@ namespace MR const Projection* proj = get_current_projection(); if (!proj) return; - auto move = -proj->screen_to_model_direction (window().mouse_displacement(), target()); - set_target (target() + move); + auto move = proj->screen_to_model_direction (window().mouse_displacement(), target()); + set_target (target() - move); updateGL(); } diff --git a/src/gui/mrview/tool/transform.cpp b/src/gui/mrview/tool/transform.cpp index 8834a03d2a..e5a5a34f83 100644 --- a/src/gui/mrview/tool/transform.cpp +++ b/src/gui/mrview/tool/transform.cpp @@ -27,14 +27,15 @@ namespace MR Base (parent) { VBoxLayout* main_box = new VBoxLayout (this); + QLabel* label = new QLabel ( + "The transform tool is currently active

" + "Close this tool to deactivate.

" + "All camera view manipulations will now apply " + "to the main image, rather than to the camera"); + label->setWordWrap (true); + label->setAlignment (Qt::AlignHCenter); - activate_button = new QPushButton ("Activate",this); - activate_button->setToolTip (tr ("Activate transform manipulation mode\nAll camera move operations will now apply to the main image")); - activate_button->setIcon (QIcon (":/rotate.svg")); - activate_button->setCheckable (true); - activate_button->setChecked (!window().get_image_visibility()); - connect (activate_button, SIGNAL (clicked(bool)), this, SLOT (onActivate (bool))); - main_box->addWidget (activate_button, 0); + main_box->addWidget (label, 0); main_box->addStretch (); } @@ -46,7 +47,8 @@ namespace MR void Transform::showEvent (QShowEvent*) { - activate_button->setChecked (false); + if (isVisible()) + window().register_camera_interactor (this); } @@ -62,20 +64,15 @@ namespace MR - - void Transform::onActivate (bool onoff) + void Transform::hideEvent (QHideEvent*) { - window().register_camera_interactor (onoff ? this : nullptr); + if (window().active_camera_interactor() == this) + window().register_camera_interactor(); } - void Transform::deactivate () - { - activate_button->setChecked (false); - } - @@ -112,7 +109,7 @@ namespace MR auto move = proj->screen_to_model_direction (window().mouse_displacement(), window().target()); transform_type M = window().image()->header().transform(); - M.translate (move.cast()); + M.pretranslate (move.cast()); window().image()->header().transform() = M; window().image()->image.buffer->transform() = M; @@ -132,7 +129,7 @@ namespace MR auto move = window().get_current_mode()->get_through_plane_translation_FOV (window().mouse_displacement().y(), *proj); transform_type M = window().image()->header().transform(); - M.translate (move.cast()); + M.pretranslate (-move.cast()); window().image()->header().transform() = M; window().image()->image.buffer->transform() = M; @@ -150,19 +147,17 @@ namespace MR if (window().snap_to_image()) window().set_snap_to_image (false); - transform_type M = window().image()->header().transform(); -/* - //M = const auto rot = window().get_current_mode()->get_tilt_rotation().cast(); - if (!rot) + if (!rot.coeffs().allFinite()) return true; - M = transform_type(rot); + const Eigen::Vector3d origin = window().focus().cast(); + transform_type M = transform_type (rot).pretranslate (origin).translate (-origin) * window().image()->header().transform(); - Math::Versorf orient = rot * orientation(); - set_orientation (orient); + window().image()->header().transform() = M; + window().image()->image.buffer->transform() = M; window().updateGL(); -*/ + return true; } @@ -172,17 +167,21 @@ namespace MR bool Transform::rotate_event () { - /*if (snap_to_image()) + if (window().snap_to_image()) window().set_snap_to_image (false); - const Math::Versorf rot = get_rotate_rotation(); - if (!rot) - return; + const auto rot = window().get_current_mode()->get_rotate_rotation().cast(); + if (!rot.coeffs().allFinite()) + return true; - Math::Versorf orient = rot * orientation(); - set_orientation (orient); - updateGL();*/ - return false; + const Eigen::Vector3d origin = window().target().cast(); + transform_type M = transform_type (rot).inverse().pretranslate (origin).translate (-origin) * window().image()->header().transform(); + + window().image()->header().transform() = M; + window().image()->image.buffer->transform() = M; + window().updateGL(); + + return true; } diff --git a/src/gui/mrview/tool/transform.h b/src/gui/mrview/tool/transform.h index 8abe0360d2..10677280b3 100644 --- a/src/gui/mrview/tool/transform.h +++ b/src/gui/mrview/tool/transform.h @@ -35,7 +35,6 @@ namespace MR public: Transform (Dock* parent); - void deactivate () override; bool slice_move_event (float inc) override; bool pan_event () override; bool panthrough_event () override; @@ -45,12 +44,7 @@ namespace MR protected: virtual void showEvent (QShowEvent* event) override; virtual void closeEvent (QCloseEvent* event) override; - - private slots: - void onActivate (bool); - - private: - QPushButton *activate_button; + virtual void hideEvent (QHideEvent* event) override; }; } From b20c6b01cfd8fb2c23d0460944d1643219392f77 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 13 Dec 2017 12:04:51 +1100 Subject: [PATCH 0090/1471] Stats: Minor fixes - Allocate matrices as part of Stats::Permtest::precompute_default_permutation() rather than requiring pre-allocation. - Fix various signed - unsigned integer comparison warnings. - Use .eval() calls to evaluate Eigen matrix results prior to invoking Math::pinv(); this appears to be required for older versions of either Eigen or GCC. - Fix final permutation not being assessed. --- cmd/connectomestats.cpp | 6 +----- cmd/fixelcfestats.cpp | 12 ++++-------- cmd/mrclusterstats.cpp | 2 +- cmd/vectorstats.cpp | 1 - core/math/stats/glm.cpp | 21 +++++++++++---------- core/math/stats/shuffle.cpp | 3 +-- src/stats/permtest.cpp | 4 ++++ 7 files changed, 22 insertions(+), 27 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index e39b2e79d8..0d533f77cc 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -297,11 +297,8 @@ void run() } // Precompute default statistic and enhanced statistic - matrix_type tvalue_output (num_edges, num_contrasts); - matrix_type enhanced_output (num_edges, num_contrasts); - + matrix_type tvalue_output, enhanced_output; Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_statistic, enhanced_output, tvalue_output); - for (size_t i = 0; i != num_contrasts; ++i) { save_matrix (mat2vec.V2M (tvalue_output.col(i)), output_prefix + "_" + (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".csv"); save_matrix (mat2vec.V2M (enhanced_output.col(i)), output_prefix + "_enhanced" + postfix(i) + ".csv"); @@ -311,7 +308,6 @@ void run() if (!get_options ("notest").size()) { matrix_type null_distribution, uncorrected_pvalues; - Stats::PermTest::run_permutations (glm_test, enhancer, empirical_statistic, enhanced_output, null_distribution, uncorrected_pvalues); diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index ac5cd3306e..14bb81bc41 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -154,7 +154,7 @@ class SubjectFixelImport : public SubjectDataImportBase void operator() (matrix_type::RowXpr row) const override { - assert (row.size() == size()); + assert (size_t(row.size()) == size()); Image temp (data); // For thread-safety // Doesn't work //row = temp.row(0); @@ -224,7 +224,7 @@ void run() auto index_image = index_header.get_image(); const uint32_t num_fixels = Fixel::get_number_of_fixels (index_header); - CONSOLE ("number of fixels: " + str(num_fixels)); + CONSOLE ("Number of fixels: " + str(num_fixels)); vector positions (num_fixels); vector directions (num_fixels); @@ -259,7 +259,6 @@ void run() // Load design matrix: const matrix_type design = load_matrix (argument[2]); - CONSOLE ("Design matrix dimensions: " + str(design.rows()) + " x " + str(design.cols())); if (design.rows() != (ssize_t)importer.size()) throw Exception ("Number of input files does not match number of rows in design matrix"); @@ -286,6 +285,7 @@ void run() } const ssize_t num_factors = design.cols() + extra_columns.size(); + CONSOLE ("Number of factors: " + str(num_factors)); if (contrasts[0].cols() != num_factors) throw Exception ("The number of columns per contrast (" + str(contrasts[0].cols()) + ")" + (extra_columns.size() ? " (in addition to the " + str(extra_columns.size()) + " uses of -column)" : "") @@ -469,11 +469,8 @@ void run() } // Precompute default statistic and CFE statistic - matrix_type cfe_output (num_fixels, num_contrasts); - matrix_type tvalue_output (num_fixels, num_contrasts); - + matrix_type cfe_output, tvalue_output; Stats::PermTest::precompute_default_permutation (glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, tvalue_output); - for (size_t i = 0; i != num_contrasts; ++i) { write_fixel_output (Path::join (output_fixel_directory, "cfe" + postfix(i) + ".mif"), cfe_output.col(i), output_header); write_fixel_output (Path::join (output_fixel_directory, std::string(contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".mif"), tvalue_output.col(i), output_header); @@ -483,7 +480,6 @@ void run() if (!get_options ("notest").size()) { matrix_type perm_distribution, uncorrected_pvalues; - Stats::PermTest::run_permutations (glm_test, cfe_integrator, empirical_cfe_statistic, cfe_output, perm_distribution, uncorrected_pvalues); diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 1cdd6d76e6..ffbe28bd4b 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -134,7 +134,7 @@ class SubjectVoxelImport : public SubjectDataImportBase void operator() (matrix_type::RowXpr row) const override { assert (v2v); - assert (row.size() == size()); + assert (size_t(row.size()) == size()); Image temp (data); // For thread-safety for (size_t i = 0; i != size(); ++i) { assign_pos_of ((*v2v)[i]).to (temp); diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 3ec5402bbb..331c49fb7f 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -219,7 +219,6 @@ void run() std::shared_ptr enhancer; matrix_type null_distribution, uncorrected_pvalues; matrix_type empirical_distribution; // unused - Stats::PermTest::run_permutations (glm_test, enhancer, empirical_distribution, default_tvalues, null_distribution, uncorrected_pvalues); diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 495f4d6f44..545d2380d7 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -18,8 +18,6 @@ #include "misc/bitset.h" #include "thread_queue.h" -#define GLM_BATCH_SIZE 1024 - namespace MR { namespace Math @@ -264,7 +262,7 @@ namespace MR global_std_effect_size (std_effect_size), global_stdev (stdev) { - assert (design_fixed.cols() + extra_columns.size() == contrasts[0].cols()); + assert (size_t(design_fixed.cols()) + extra_columns.size() == size_t(contrasts[0].cols())); } bool operator() (const size_t& element_index) { @@ -314,14 +312,17 @@ namespace MR // Same model partitioning as is used in FSL randomise Contrast::Partition Contrast::partition (const matrix_type& design) const { - const matrix_type D = Math::pinv (design.transpose() * design); + // eval() calls necessary for older versions of Eigen / compiler to work: + // can't seem to map Eigen template result to const matrix_type& as the Math::pinv() input + // TODO See if some better template trickery can be done + const matrix_type D = Math::pinv ((design.transpose() * design).eval()); // Note: Cu is transposed with respect to how contrast matrices are stored elsewhere const matrix_type Cu = Eigen::FullPivLU (c).kernel(); - const auto inv_cDc = Math::pinv (c * D * c.transpose()); + const matrix_type inv_cDc = Math::pinv ((c * D * c.transpose()).eval()); // Note: Cv is transposed with respect to convention just as Cu is const matrix_type Cv = Cu - c.transpose() * inv_cDc * c * D * Cu; const matrix_type X = design * D * c.transpose() * inv_cDc; - const matrix_type Z = design * D * Cv * Math::pinv (Cv.transpose() * D * Cv); + const matrix_type Z = design * D * Cv * Math::pinv ((Cv.transpose() * D * Cv).eval()); return Partition (X, Z); } @@ -369,7 +370,7 @@ namespace MR void TestFixed::operator() (const matrix_type& shuffling_matrix, matrix_type& output) const { - assert (shuffling_matrix.rows() == num_subjects()); + assert (size_t(shuffling_matrix.rows()) == num_subjects()); if (!(size_t(output.rows()) == num_elements() && size_t(output.cols()) == num_outputs())) output.resize (num_elements(), num_outputs()); @@ -408,7 +409,7 @@ namespace MR const default_type one_over_dof = 1.0 / (num_subjects() - partitions[ic].rank_x - partitions[ic].rank_z); sse = (Rm*Sy).colwise().squaredNorm(); //VAR (sse.size()); - for (ssize_t ie = 0; ie != num_elements(); ++ie) { + for (size_t ie = 0; ie != num_elements(); ++ie) { beta.noalias() = c[ic].matrix() * lambdas.col (ie); //VAR (beta.rows()); //VAR (beta.cols()); @@ -525,7 +526,7 @@ namespace MR Mfull_masked.resize (finite_count, num_factors()); y_masked.resize (finite_count); perm_matrix_mask.clear (true); - ssize_t out_index = 0; + size_t out_index = 0; for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { if (element_mask[in_index]) { Mfull_masked.block (out_index, 0, 1, M.cols()) = M.row (in_index); @@ -542,7 +543,7 @@ namespace MR } } assert (out_index == ssize_t(finite_count)); - assert (perm_matrix_mask.count() == ssize_t(finite_count)); + assert (perm_matrix_mask.count() == finite_count); // Only after we've reduced the design matrix do we now reduce the permutation matrix perm_matrix_masked.resize (finite_count, num_subjects()); out_index = 0; diff --git a/core/math/stats/shuffle.cpp b/core/math/stats/shuffle.cpp index ce55e69a0c..b65a9bdfdc 100644 --- a/core/math/stats/shuffle.cpp +++ b/core/math/stats/shuffle.cpp @@ -149,10 +149,9 @@ namespace MR bool Shuffler::operator() (Shuffle& output) { output.index = counter; - if (counter + 1 >= nshuffles) { + if (counter >= nshuffles) { if (progress) progress.reset (nullptr); - counter = nshuffles; output.data.resize (0, 0); return false; } diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index e164409183..31ed021623 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -170,6 +170,9 @@ namespace MR matrix_type& default_statistics) { assert (stats_calculator); + default_statistics.resize (stats_calculator->num_elements(), stats_calculator->num_outputs()); + default_enhanced_statistics.resize (stats_calculator->num_elements(), stats_calculator->num_outputs()); + const matrix_type default_shuffle (matrix_type::Identity (stats_calculator->num_subjects(), stats_calculator->num_subjects())); (*stats_calculator) (default_shuffle, default_statistics); @@ -196,6 +199,7 @@ namespace MR Math::Stats::Shuffler shuffler (stats_calculator->num_subjects(), false, "Running permutations"); perm_dist.resize (shuffler.size(), stats_calculator->num_outputs()); uncorrected_pvalues.resize (stats_calculator->num_elements(), stats_calculator->num_outputs()); + vector> global_uncorrected_pvalue_count (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)); { Processor processor (stats_calculator, enhancer, From 7f896aea068cf686b0e41160ad18802bf8f32954 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 13 Dec 2017 16:10:37 +1100 Subject: [PATCH 0091/1471] GLM: Fixes for compatibility with Eigen 3.2.0 --- cmd/vectorstats.cpp | 2 +- core/math/stats/glm.cpp | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 331c49fb7f..be8e48465d 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -91,7 +91,7 @@ class SubjectVectorImport : public SubjectDataImportBase void operator() (matrix_type::RowXpr row) const override { - assert (row.size() == size()); + assert (size_t(row.size()) == size()); row = data; } diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 545d2380d7..ae88c353dd 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -190,7 +190,16 @@ namespace MR #else ++progress; #endif - vector_type sse = (measurements - design * betas).colwise().squaredNorm(); + // Explicit calculation of residuals before SSE, rather than in a single + // step, appears to be necessary for compatibility with Eigen 3.2.0 + const matrix_type residuals = (measurements - design * betas); +#ifdef GLM_ALL_STATS_DEBUG + std::cerr << "Residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; +#else + ++progress; +#endif + vector_type sse (residuals.cols()); + sse = residuals.colwise().squaredNorm(); #ifdef GLM_ALL_STATS_DEBUG std::cerr << "sse: " << sse.size() << ", max " << sse.maxCoeff() << "\n"; #else @@ -413,10 +422,8 @@ namespace MR beta.noalias() = c[ic].matrix() * lambdas.col (ie); //VAR (beta.rows()); //VAR (beta.cols()); - const auto numerator = (beta.transpose() * XtX * beta) / c[ic].rank(); - assert (numerator.rows() == 1); - assert (numerator.cols() == 1); - const value_type F = numerator (0, 0) / (one_over_dof * sse[ie]); + const value_type F = ((beta.transpose() * XtX * beta) (0,0) / c[ic].rank()) / + (one_over_dof * sse[ie]); if (!std::isfinite (F)) { output (ie, ic) = value_type(0); } else if (c[ic].is_F()) { @@ -542,7 +549,7 @@ namespace MR } } } - assert (out_index == ssize_t(finite_count)); + assert (out_index == finite_count); assert (perm_matrix_mask.count() == finite_count); // Only after we've reduced the design matrix do we now reduce the permutation matrix perm_matrix_masked.resize (finite_count, num_subjects()); From 40aa52ea7928b0c40584da6566f258de94b9f777 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 13 Dec 2017 17:44:03 +1100 Subject: [PATCH 0092/1471] BitSet class: Fix and cleanup - Fix stream output operator displaying BitSet contents as hexadecimal data. - Standardise some of the code regarding handling of BitSet sizes that are not a factor of 8. --- core/misc/bitset.cpp | 69 ++++++++++++++++++++++---------------------- core/misc/bitset.h | 13 +++++---- 2 files changed, 43 insertions(+), 39 deletions(-) diff --git a/core/misc/bitset.cpp b/core/misc/bitset.cpp index 767fe86fb5..48d78c89f2 100644 --- a/core/misc/bitset.cpp +++ b/core/misc/bitset.cpp @@ -11,7 +11,6 @@ * For more details, see http://www.mrtrix.org/. */ - #include "misc/bitset.h" @@ -25,6 +24,7 @@ namespace MR { const char BitSet::dbyte_to_hex[16] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'}; + BitSet::BitSet (const size_t b, const bool allocator) : bits (b), bytes ((bits + 7) / 8), @@ -34,6 +34,7 @@ namespace MR { } + BitSet::BitSet (const BitSet& that) : bits (that.bits), bytes (that.bytes), @@ -43,6 +44,7 @@ namespace MR { } + BitSet::~BitSet() { delete[] data; data = nullptr; } @@ -50,10 +52,8 @@ namespace MR { - void BitSet::resize (const size_t new_size, const bool allocator) { - size_t new_bits; new_bits = new_size; const size_t new_bytes = (new_bits + 7) / 8; @@ -62,8 +62,9 @@ namespace MR { if (new_bytes > bytes) { memcpy (new_data, data, bytes); memset (new_data + bytes, (allocator ? 0xFF : 0x00), new_bytes - bytes); - const uint8_t mask = 0xFF << excess_bits(); - data[bytes - 1] = allocator ? (data[bytes - 1] | mask) : (data[bytes - 1] & ~mask); + data[bytes - 1] = allocator ? + (data[bytes - 1] | excess_bit_mask()) : + (data[bytes - 1] & ~excess_bit_mask()); } else { memcpy (new_data, data, new_bytes); } @@ -75,55 +76,49 @@ namespace MR { bytes = new_bytes; data = new_data; new_data = nullptr; - } + void BitSet::clear (const bool allocator) { memset(data, (allocator ? 0xFF : 0x00), bytes); } + bool BitSet::full() const { - - const size_t bytes_to_test = (bits % 8) ? bytes - 1 : bytes; + const size_t bytes_to_test = have_excess_bits() ? bytes - 1 : bytes; for (size_t i = 0; i != bytes_to_test; ++i) { if (data[i] != 0xFF) return false; } - if (!(bits % 8)) + if (!have_excess_bits()) return true; - - const uint8_t mask = 0xFF << excess_bits(); - if ((data[bytes - 1] | mask) != 0xFF) + if ((data[bytes - 1] | excess_bit_mask()) != 0xFF) return false; return true; - } + bool BitSet::empty() const { - - const size_t bytes_to_test = (bits % 8) ? bytes - 1 : bytes; + const size_t bytes_to_test = have_excess_bits() ? bytes - 1 : bytes; for (size_t i = 0; i != bytes_to_test; ++i) { if (data[i]) return false; } - if (!(bits % 8)) + if (!have_excess_bits()) return true; - - const size_t excess_bits = bits - (8 * (bytes - 1)); - const uint8_t mask = ~(0xFF << excess_bits); - if (data[bytes - 1] & mask) + if (data[bytes - 1] & ~excess_bit_mask()) return false; return true; - } + size_t BitSet::count () const { size_t count = 0; @@ -138,16 +133,18 @@ namespace MR { + std::ostream& operator<< (std::ostream& stream, BitSet& d) { + if (!d.bytes) + return stream; stream << "0x"; - if (d.excess_bits()) { - const uint8_t mask = 0xFF << d.excess_bits(); - stream << d.byte_to_hex (d.data[d.bytes - 1] & mask); - for (size_t i = d.bytes - 2; i--;) + if (d.have_excess_bits()) { + stream << d.byte_to_hex (d.data[d.bytes - 1] & (0xFF >> d.excess_bits())); + for (ssize_t i = d.bytes - 2; i >= 0; --i) stream << d.byte_to_hex (d.data[i]); } else { - for (size_t i = d.bytes - 1; i--;) + for (ssize_t i = d.bytes - 1; i >= 0; --i) stream << d.byte_to_hex (d.data[i]); } return stream; @@ -158,7 +155,6 @@ namespace MR { - BitSet& BitSet::operator= (const BitSet& that) { delete[] data; @@ -170,15 +166,15 @@ namespace MR { } + bool BitSet::operator== (const BitSet& that) const { if (bits != that.bits) return false; - if (bits % bytes) { - if (memcmp(data, that.data, bytes - 1)) + if (have_excess_bits()) { + if (memcmp (data, that.data, bytes - 1)) return false; - const uint8_t mask = ~(0xFF << excess_bits()); - if ((data[bytes - 1] & mask) != (that.data[bytes - 1] & mask)) + if ((data[bytes - 1] & ~excess_bit_mask()) != (that.data[bytes - 1] & ~excess_bit_mask())) return false; return true; } else { @@ -187,12 +183,14 @@ namespace MR { } + bool BitSet::operator!= (const BitSet& that) const { return (!(*this == that)); } + BitSet& BitSet::operator|= (const BitSet& that) { assert (bits == that.bits); @@ -202,6 +200,7 @@ namespace MR { } + BitSet& BitSet::operator&= (const BitSet& that) { assert (bits == that.bits); @@ -211,6 +210,7 @@ namespace MR { } + BitSet& BitSet::operator^= (const BitSet& that) { assert (bits == that.bits); @@ -220,6 +220,7 @@ namespace MR { } + BitSet BitSet::operator| (const BitSet& that) const { BitSet result (*this); @@ -228,6 +229,7 @@ namespace MR { } + BitSet BitSet::operator& (const BitSet& that) const { BitSet result (*this); @@ -236,6 +238,7 @@ namespace MR { } + BitSet BitSet::operator^ (const BitSet& that) const { BitSet result (*this); @@ -244,6 +247,7 @@ namespace MR { } + BitSet BitSet::operator~() const { BitSet result (*this); @@ -254,7 +258,4 @@ namespace MR { - - - } diff --git a/core/misc/bitset.h b/core/misc/bitset.h index 47a408ba69..543264a719 100644 --- a/core/misc/bitset.h +++ b/core/misc/bitset.h @@ -18,6 +18,7 @@ #include #include +#include "debug.h" #include "mrtrix.h" @@ -107,8 +108,8 @@ namespace MR { * data. * \returns a Value or ConstValue class used to manipulate the bit data at * the specified index */ - ConstValue operator[] (const size_t i) const { return ConstValue (*this, i); } - Value operator[] (const size_t i) { return Value (*this, i); } + ConstValue operator[] (const size_t i) const { assert (i < bits); return ConstValue (*this, i); } + Value operator[] (const size_t i) { assert (i < bits); return Value (*this, i); } //! the number of boolean elements in the set /*! The size of the BitSet. Note that this is the number of boolean values @@ -190,10 +191,12 @@ namespace MR { protected: - size_t bits; - size_t bytes; + size_t bits; + size_t bytes; - size_t excess_bits() const { return (bits - (8 * (bytes - 1))); } + bool have_excess_bits() const { return (bits & size_t(0x03)); } + size_t excess_bits() const { return (8*bytes - bits); } + uint8_t excess_bit_mask() const { assert (have_excess_bits()); return 0xFF << excess_bits(); } bool test (const size_t index) const { From a4f773b1639da92d18a03e02203dd8bd151fcc95 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Wed, 13 Dec 2017 15:50:00 +0000 Subject: [PATCH 0093/1471] MRView: fix bugs introduced by switch to Eigen::Quaternion --- src/gui/mrview/mode/volume.cpp | 8 ++++---- src/gui/mrview/window.h | 2 +- src/gui/projection.h | 26 +++++++++++++------------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/gui/mrview/mode/volume.cpp b/src/gui/mrview/mode/volume.cpp index 8ccb594fd0..531e596c66 100644 --- a/src/gui/mrview/mode/volume.cpp +++ b/src/gui/mrview/mode/volume.cpp @@ -289,9 +289,9 @@ namespace MR { const auto V2S = image.voxel2scanner(); const Eigen::Vector3f pos = V2S * Eigen::Vector3f { -0.5f, -0.5f, -0.5f }; - const Eigen::Vector3f vec_X = V2S.rotation() * Eigen::Vector3f { float(image.header().size(0)), 0.0f, 0.0f }; - const Eigen::Vector3f vec_Y = V2S.rotation() * Eigen::Vector3f { 0.0f, float(image.header().size(1)), 0.0f }; - const Eigen::Vector3f vec_Z = V2S.rotation() * Eigen::Vector3f { 0.0f, 0.0f, float(image.header().size(2)) }; + const Eigen::Vector3f vec_X = V2S.linear() * Eigen::Vector3f { float(image.header().size(0)), 0.0f, 0.0f }; + const Eigen::Vector3f vec_Y = V2S.linear() * Eigen::Vector3f { 0.0f, float(image.header().size(1)), 0.0f }; + const Eigen::Vector3f vec_Z = V2S.linear() * Eigen::Vector3f { 0.0f, 0.0f, float(image.header().size(2)) }; GL::mat4 T2S; T2S(0,0) = vec_X[0]; T2S(1,0) = vec_X[1]; @@ -354,7 +354,7 @@ namespace MR GL::mat4 S2T = GL::inv (T2S); float step_size = 0.5f * std::min ( { float(image()->header().spacing (0)), float(image()->header().spacing (1)), float(image()->header().spacing (2)) } ); - Eigen::Vector3f ray = image()->scanner2voxel().rotation() * projection.screen_normal(); + Eigen::Vector3f ray = image()->scanner2voxel().linear() * projection.screen_normal(); Eigen::Vector3f ray_real_space = ray; ray *= step_size; ray[0] /= image()->header().size(0); diff --git a/src/gui/mrview/window.h b/src/gui/mrview/window.h index 803a50ebce..7513613786 100644 --- a/src/gui/mrview/window.h +++ b/src/gui/mrview/window.h @@ -119,7 +119,7 @@ namespace MR void set_target (const Eigen::Vector3f& p) { camera_target = p; emit targetChanged(); } void set_FOV (float value) { field_of_view = value; emit fieldOfViewChanged(); } void set_plane (int p) { anatomical_plane = p; emit planeChanged(); } - void set_orientation (const Eigen::Quaternionf& V) { orient = V; emit orientationChanged(); } + void set_orientation (const Eigen::Quaternionf& V) { orient = V; orient.normalize(); emit orientationChanged(); } void set_scaling (float min, float max) { if (!image()) return; image()->set_windowing (min, max); } void set_snap_to_image (bool onoff) { snap_to_image_axes_and_voxel = onoff; snap_to_image_action->setChecked(onoff); emit focusChanged(); } diff --git a/src/gui/projection.h b/src/gui/projection.h index d4a95f5e31..2b275a4482 100644 --- a/src/gui/projection.h +++ b/src/gui/projection.h @@ -33,9 +33,9 @@ namespace MR class Projection { MEMALIGN(Projection) public: - Projection (GL::Area* parent, const GL::Font& font) : - glarea (parent), - font (font) { } + Projection (GL::Area* parent, const GL::Font& font) : + glarea (parent), + font (font) { } void set_viewport (const QWidget& frame, int x, int y, int w, int h) { viewport[0] = x; @@ -95,8 +95,8 @@ namespace MR MVP(2,0)*x[0] + MVP(2,1)*x[1] + MVP(2,2)*x[2] + MVP(2,3)); if (MVP(3,2)) S /= MVP(3,0)*x[0] + MVP(3,1)*x[1] + MVP(3,2)*x[2] + MVP(3,3); - S[0] = viewport[0] + 0.5f*viewport[2]*(1.0f+S[0]); - S[1] = viewport[1] + 0.5f*viewport[3]*(1.0f+S[1]); + S[0] = viewport[0] + 0.5f*viewport[2]*(1.0f+S[0]); + S[1] = viewport[1] + 0.5f*viewport[3]*(1.0f+S[1]); return S; } @@ -117,7 +117,7 @@ namespace MR iMVP(0,0)*x + iMVP(0,1)*y + iMVP(0,2)*depth + iMVP(0,3), iMVP(1,0)*x + iMVP(1,1)*y + iMVP(1,2)*depth + iMVP(1,3), iMVP(2,0)*x + iMVP(2,1)*y + iMVP(2,2)*depth + iMVP(2,3)); - if (MVP(3,2)) + if (MVP(3,2)) S /= iMVP(3,0)*x + iMVP(3,1)*y + iMVP(3,2)*depth + iMVP(3,3); return S; } @@ -150,7 +150,7 @@ namespace MR x *= 2.0f/viewport[2]; y *= 2.0f/viewport[3]; Eigen::Vector3f S (iMVP(0,0)*x + iMVP(0,1)*y, iMVP(1,0)*x + iMVP(1,1)*y, iMVP(2,0)*x + iMVP(2,1)*y); - if (MVP(3,2)) + if (MVP(3,2)) S /= iMVP(3,2)*depth + iMVP(3,3); return S; } @@ -174,8 +174,8 @@ namespace MR void render_crosshairs (const Eigen::Vector3f& focus) const; - void setup_render_text (float red = 1.0, float green = 1.0, float blue = 0.0) const { - font.start (width(), height(), red, green, blue); + void setup_render_text (float red = 1.0, float green = 1.0, float blue = 0.0) const { + font.start (width(), height(), red, green, blue); } void done_render_text () const { font.stop(); } @@ -196,13 +196,13 @@ namespace MR void render_text_inset (int x, int y, const std::string& text, int inset = -1) const { QString s (text.c_str()); - if (inset < 0) + if (inset < 0) inset = font.metric.height() / 2; - if (x < inset) + if (x < inset) x = inset; - if (x + font.metric.width (s) + inset > width()) + if (x + font.metric.width (s) + inset > width()) x = width() - font.metric.width (s) - inset; - if (y < inset) + if (y < inset) y = inset; if (y + font.metric.height() + inset > height()) y = height() - font.metric.height() - inset; From df01d9ec38e6a432682ab108cf5afdcf720f00ce Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Wed, 13 Dec 2017 15:50:36 +0000 Subject: [PATCH 0094/1471] MRView transform tool: revert to activation via push button --- src/gui/mrview/tool/transform.cpp | 32 +++++++++++++++++++++---------- src/gui/mrview/tool/transform.h | 6 ++++++ 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/src/gui/mrview/tool/transform.cpp b/src/gui/mrview/tool/transform.cpp index e5a5a34f83..fc0983ae4b 100644 --- a/src/gui/mrview/tool/transform.cpp +++ b/src/gui/mrview/tool/transform.cpp @@ -28,27 +28,41 @@ namespace MR { VBoxLayout* main_box = new VBoxLayout (this); QLabel* label = new QLabel ( - "The transform tool is currently active

" - "Close this tool to deactivate.

" - "All camera view manipulations will now apply " + "When active, all camera view manipulations will apply " "to the main image, rather than to the camera"); label->setWordWrap (true); label->setAlignment (Qt::AlignHCenter); + main_box->addWidget (label); - main_box->addWidget (label, 0); + activate_button = new QPushButton ("Activate",this); + activate_button->setToolTip (tr ("Activate transform manipulation mode")); + activate_button->setIcon (QIcon (":/rotate.svg")); + activate_button->setCheckable (true); + connect (activate_button, SIGNAL (clicked(bool)), this, SLOT (onActivate (bool))); + main_box->addWidget (activate_button); main_box->addStretch (); + show(); } + void Transform::setActive (bool onoff) + { + activate_button->setChecked (onoff); + window().register_camera_interactor ( (isVisible() && onoff) ? this : nullptr ); + } + + void Transform::onActivate (bool onoff) + { + setActive (onoff); + } void Transform::showEvent (QShowEvent*) { - if (isVisible()) - window().register_camera_interactor (this); + setActive (false); } @@ -57,8 +71,7 @@ namespace MR void Transform::closeEvent (QCloseEvent*) { - if (window().active_camera_interactor() == this) - window().register_camera_interactor(); + setActive (false); } @@ -66,8 +79,7 @@ namespace MR void Transform::hideEvent (QHideEvent*) { - if (window().active_camera_interactor() == this) - window().register_camera_interactor(); + setActive (false); } diff --git a/src/gui/mrview/tool/transform.h b/src/gui/mrview/tool/transform.h index 10677280b3..35a3ba10fa 100644 --- a/src/gui/mrview/tool/transform.h +++ b/src/gui/mrview/tool/transform.h @@ -42,9 +42,15 @@ namespace MR bool rotate_event () override; protected: + QPushButton *activate_button; virtual void showEvent (QShowEvent* event) override; virtual void closeEvent (QCloseEvent* event) override; virtual void hideEvent (QHideEvent* event) override; + + void setActive (bool onoff); + + protected slots: + void onActivate (bool); }; } From 7f536df687f256271a4493d9a7246443e9c3e983 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Wed, 13 Dec 2017 16:52:00 +0000 Subject: [PATCH 0095/1471] MRView: fix volume tool focus depth --- src/gui/mrview/mode/volume.cpp | 13 ++++++++++++- src/gui/mrview/mode/volume.h | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/gui/mrview/mode/volume.cpp b/src/gui/mrview/mode/volume.cpp index 531e596c66..0e708d1a97 100644 --- a/src/gui/mrview/mode/volume.cpp +++ b/src/gui/mrview/mode/volume.cpp @@ -321,6 +321,18 @@ namespace MR + void Volume::tilt_event() + { + Base::tilt_event(); + + auto MV = adjust_projection_matrix (GL::transpose (GL::mat4 (orientation()))); + Eigen::Vector3f screen_normal (MV(2,0), MV(2,1), MV(2,2)); + screen_normal.normalize(); + + window().set_target (window().target() + screen_normal * screen_normal.dot (window().focus() - window().target())); + } + + @@ -332,7 +344,6 @@ namespace MR setup_projection (orientation(), projection); GL_CHECK_ERROR; - overlays_for_3D.clear(); render_tools (projection, true); gl::Disable (gl::BLEND); diff --git a/src/gui/mrview/mode/volume.h b/src/gui/mrview/mode/volume.h index b7d6a24b16..cbb1af125b 100644 --- a/src/gui/mrview/mode/volume.h +++ b/src/gui/mrview/mode/volume.h @@ -41,6 +41,7 @@ namespace MR } virtual void paint (Projection& projection); + virtual void tilt_event (); protected: GL::VertexBuffer volume_VB, volume_VI; From 4a177ae5d4dd3e76e2aca9dd43293204c02900c7 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 19 Dec 2017 10:56:50 +1100 Subject: [PATCH 0096/1471] vectorstats: Fix test 2 --- testing/data | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/data b/testing/data index 0ed349d5aa..1a4e3267f3 160000 --- a/testing/data +++ b/testing/data @@ -1 +1 @@ -Subproject commit 0ed349d5aaf3123246d685483b4f18de0f455261 +Subproject commit 1a4e3267f36934893e615002e71c39bf497061ea From 99778ea197418c7fb5c8b9442ccb67c2d9f096f3 Mon Sep 17 00:00:00 2001 From: Lee Reid Date: Mon, 22 Jan 2018 08:06:28 +1000 Subject: [PATCH 0097/1471] Added -include_ordered option to tckgen and tckedit. This enforces include regions to be passed through in order --- cmd/tckedit.cpp | 2 + cmd/tckgen.cpp | 8 +- src/dwi/tractography/editing/worker.cpp | 16 +- src/dwi/tractography/editing/worker.h | 6 +- src/dwi/tractography/properties.cpp | 2 + src/dwi/tractography/roi.cpp | 12 + src/dwi/tractography/roi.h | 273 +++++++++- src/dwi/tractography/tracking/exec.h | 12 +- .../cmd/testing_unit_tests_tractography.cpp | 56 ++ .../unit_tests/tractography/roi_unit_tests.h | 505 ++++++++++++++++++ testing/src/unit_tests/unit_test.h | 63 +++ testing/tests/unittests | 1 + 12 files changed, 925 insertions(+), 31 deletions(-) create mode 100644 testing/cmd/testing_unit_tests_tractography.cpp create mode 100644 testing/src/unit_tests/tractography/roi_unit_tests.h create mode 100644 testing/src/unit_tests/unit_test.h create mode 100644 testing/tests/unittests diff --git a/cmd/tckedit.cpp b/cmd/tckedit.cpp index 37ada0e6f8..af11ab17a3 100644 --- a/cmd/tckedit.cpp +++ b/cmd/tckedit.cpp @@ -162,6 +162,8 @@ void run () erase_if_present (properties, "max_weight"); Editing::load_properties (properties); + + // Parameters that the worker threads need to be aware of, but do not appear in Properties const bool inverse = get_options ("inverse").size(); const bool ends_only = get_options ("ends_only").size(); diff --git a/cmd/tckgen.cpp b/cmd/tckgen.cpp index 38f03c6bee..3828fb08e5 100644 --- a/cmd/tckgen.cpp +++ b/cmd/tckgen.cpp @@ -227,7 +227,6 @@ void run () auto opt = get_options ("algorithm"); if (opt.size()) algorithm = opt[0][0]; - load_rois (properties); Tracking::load_streamline_properties (properties); @@ -251,8 +250,15 @@ void run () WARN ("Overriding -seeds option (maximum number of seeds that will be attempted to track from), as seeds can only provide a finite number"); properties["max_num_seeds"] = str (properties.seeds.get_total_count()); + } + if (get_options("include_ordered").size() && !get_options("seed_unidirectional").size()) + throw Exception("-include_ordered requires that -seed_unidirectional is set, but this is not so"); + + //Now we are certain options are valid, load ROIs + load_rois(properties); + switch (algorithm) { case 0: Exec ::run (argument[0], argument[1], properties); diff --git a/src/dwi/tractography/editing/worker.cpp b/src/dwi/tractography/editing/worker.cpp index 57d42e7921..3c90f100b2 100644 --- a/src/dwi/tractography/editing/worker.cpp +++ b/src/dwi/tractography/editing/worker.cpp @@ -41,8 +41,8 @@ namespace MR { // Assign to ROIs if (properties.include.size() || properties.exclude.size()) { - include_visited.assign (properties.include.size(), false); - + include_visited.reset(); + if (ends_only) { for (size_t i = 0; i != 2; ++i) { const Eigen::Vector3f& p (i ? in.back() : in.front()); @@ -63,15 +63,21 @@ namespace MR { } } } - + // Make sure all of the include regions were visited - for (const auto& i : include_visited) { + if (!include_visited.all_entered()) { + if (inverse) + in.swap(out); + return true; + } + + /*for (const auto& i : include_visited) { if (!i) { if (inverse) in.swap (out); return true; } - } + }*/ } diff --git a/src/dwi/tractography/editing/worker.h b/src/dwi/tractography/editing/worker.h index 29396f6f42..205071d20a 100644 --- a/src/dwi/tractography/editing/worker.h +++ b/src/dwi/tractography/editing/worker.h @@ -43,14 +43,14 @@ namespace MR { inverse (inv), ends_only (end), thresholds (p), - include_visited (properties.include.size(), false) { } + include_visited (properties.include.size_unordered(), properties.include.size_ordered()) { } Worker (const Worker& that) : properties (that.properties), inverse (that.inverse), ends_only (that.ends_only), thresholds (that.thresholds), - include_visited (properties.include.size(), false) { } + include_visited (properties.include.size_unordered(), properties.include.size_ordered()) { } bool operator() (Streamline<>&, Streamline<>&) const; @@ -72,7 +72,7 @@ namespace MR { float step_size; } thresholds; - mutable vector include_visited; + mutable ROISet_ContainsLoopState include_visited; }; diff --git a/src/dwi/tractography/properties.cpp b/src/dwi/tractography/properties.cpp index 10c26a30ea..89ef7c4790 100644 --- a/src/dwi/tractography/properties.cpp +++ b/src/dwi/tractography/properties.cpp @@ -26,6 +26,8 @@ namespace MR { std::pair range = roi.equal_range ("include"); for (iter it = range.first; it != range.second; ++it) include.add (it->second); + range = roi.equal_range("include_ordered"); + for (iter it = range.first; it != range.second; ++it) include.add_ordered(it->second); range = roi.equal_range ("exclude"); for (iter it = range.first; it != range.second; ++it) exclude.add (it->second); range = roi.equal_range ("mask"); diff --git a/src/dwi/tractography/roi.cpp b/src/dwi/tractography/roi.cpp index c604f23e04..0364592404 100644 --- a/src/dwi/tractography/roi.cpp +++ b/src/dwi/tractography/roi.cpp @@ -35,6 +35,14 @@ namespace MR { .allow_multiple() + Argument ("image").type_text() + + Option("include_ordered", + "specify an inclusion region of interest, as either a binary mask image, " + "or as a sphere using 4 comma-separared values (x,y,z,radius). Streamlines " + "must traverse ALL inclusion_ordered regions in the order they are " + "specified in order to be accepted.") + .allow_multiple() + + Argument("image").type_text() + + Option ("exclude", "specify an exclusion region of interest, as either a binary mask image, " "or as a sphere using 4 comma-separared values (x,y,z,radius). Streamlines " @@ -57,6 +65,10 @@ namespace MR { for (size_t i = 0; i < opt.size(); ++i) properties.include.add (ROI (opt[i][0])); + opt = get_options("include_ordered"); + for (size_t i = 0; i < opt.size(); ++i) + properties.include.add_ordered(ROI(opt[i][0])); + opt = get_options ("exclude"); for (size_t i = 0; i < opt.size(); ++i) properties.exclude.add (ROI (opt[i][0])); diff --git a/src/dwi/tractography/roi.h b/src/dwi/tractography/roi.h index fd89e759c4..81781ccdbc 100644 --- a/src/dwi/tractography/roi.h +++ b/src/dwi/tractography/roi.h @@ -119,39 +119,278 @@ namespace MR + /** + Contains a state that is passed into and out of ROISubSet::contains, allowing it to be used in an external loop for an array of coordinates + */ + class ROISubSet_ContainsLoopState { + MEMALIGN(ROISubSet_ContainsLoopState) + + + public: + ROISubSet_ContainsLoopState(size_t no_rois, bool ordered) { + ordered_mode = ordered; + entered_by_roi = vector(); + reset(no_rois); + + } + + /** + Resets for re-use. ordered_mode and size remain unchanged + */ + void reset() + { + reset(entered_by_roi.size()); + } + + /** + Resets for re-use. ordered_mode remains unchanged + */ + void reset(size_t no_rois) { + if (ordered_mode) { + last_entered_ROI_index = -1; + next_legal_ROI_index = 0; + } + valid = true; + + + entered_by_roi.assign(no_rois, false); + } + + /** + Returns true if all ROIs have been entered legally, or if there are no ROIs + */ + bool all_entered() { + if (ordered_mode) + return valid && (next_legal_ROI_index == entered_by_roi.size()); + else { + for (size_t i = 0; i < entered_by_roi.size(); i++) { + if (!entered_by_roi[i]) + return false; + } + return true; + } + } - class ROISet { MEMALIGN(ROISet) - public: - ROISet () { } + bool ordered_mode;//true if the parent ROISubSet is in ordered mode + bool valid;//true if the order at which ROIs have been entered is legal + size_t last_entered_ROI_index;//ordered only + size_t next_legal_ROI_index;//ordered only + vector entered_by_roi;//unordered only + }; - void clear () { R.clear(); } - size_t size () const { return (R.size()); } - const ROI& operator[] (size_t i) const { return (R[i]); } - void add (const ROI& roi) { R.push_back (roi); } + + + /** + Contains a state that is passed into and out of ROISet::contains, allowing it to be used in an external loop for an array of coordinates + */ + class ROISet_ContainsLoopState { + MEMALIGN(ROISet_ContainsLoopState) + + + public: + ROISet_ContainsLoopState(size_t no_rois_unordered, size_t no_rois_ordered): + unordered(ROISubSet_ContainsLoopState(no_rois_unordered, false)), + ordered(ROISubSet_ContainsLoopState(no_rois_ordered, true)) + { + + } + + /** + Resets for re-use, retaining the same size + */ + void reset() { + unordered.reset(); + ordered.reset(); + } + + /** + Resets for re-use. + */ + void reset(size_t no_rois_unordered, size_t no_rois_ordered){ + unordered.reset(no_rois_unordered); + ordered.reset(no_rois_ordered); + } + + /** + True if all ROIs are entered. True if there are no ROIs + */ + bool all_entered() { + return ordered.all_entered() && unordered.all_entered(); + } + + ROISubSet_ContainsLoopState unordered; + ROISubSet_ContainsLoopState ordered; + }; - bool contains (const Eigen::Vector3f& p) const { + + + /** + Collection of ROIs. Can respond as to whether a coordinate is within any of these ROIs. + If set to ordered mode, contains(ROISubSetLoopState) only responds true if the ROIs are passed through in order + */ + class ROISubSet { + MEMALIGN(ROISubSet) + public: + ROISubSet(bool ordered_arg) { ordered = ordered_arg; } + + void clear() { R.clear(); } + size_t size() const { return (R.size()); } + const ROI& operator[] (size_t i) const { return (R[i]); } + void add(const ROI& roi) { R.push_back(roi); } + + /** + Returns true if any ROI contains the specified coordinate. + Ordering takes no effect + */ + bool contains(const Eigen::Vector3f& p) const { for (size_t n = 0; n < R.size(); ++n) - if (R[n].contains (p)) return (true); + if (R[n].contains(p)) return (true); return false; - } + } - void contains (const Eigen::Vector3f& p, vector& retval) const { + /** + Fills retval with true/false as to whether each ROI within this subset contains the specified coordinate. + Ordering takes no effect. + */ + void contains(const Eigen::Vector3f& p, vector& retval) const { for (size_t n = 0; n < R.size(); ++n) - if (R[n].contains (p)) retval[n] = true; - } + if (R[n].contains(p)) + retval[n] = true; + } + + /** + For use in an external loop. Provides a ROISubSet_ContainsLoopState which can specify whether each ROI within this subset has been entered. + Ordering takes effect if switched on + */ + void contains(const Eigen::Vector3f& p, ROISubSet_ContainsLoopState& loop_state) const { + + if (ordered) { + if (loop_state.valid)//do nothing if the series of cooredinates have already performed something illegal + for (size_t n = 0; n < R.size(); ++n) + if (R[n].contains(p)) { + if (n == loop_state.next_legal_ROI_index) { + //entered the next ROI in the list. Legal. + loop_state.last_entered_ROI_index = n; + loop_state.next_legal_ROI_index = n + 1; + } + else if (n != loop_state.last_entered_ROI_index) { + //entered an ROI in the wrong order + //this may be due: + //a) the series of coordinates entering ROI X, entering ROI Y, and then re-entering ROI X, or + //b) entering ROI[n], when ROI[n-1] has not yet been entered + loop_state.valid = false; + } + break; + } - friend inline std::ostream& operator<< (std::ostream& stream, const ROISet& R) { + } + else { + for (size_t n = 0; n < R.size(); ++n) + loop_state.entered_by_roi[n] = loop_state.entered_by_roi[n] || R[n].contains(p); + } + + } + + friend inline std::ostream& operator<< (std::ostream& stream, const ROISubSet& R) { if (R.R.empty()) return (stream); vector::const_iterator i = R.R.begin(); stream << *i; ++i; for (; i != R.R.end(); ++i) stream << ", " << *i; - return stream; + return stream; + } + + private: + vector R; + bool ordered; + }; + + + /** + Collection of ROIs. Can respond as to whether a coordinate is within any of these ROIs. + Internally, this contains two sub-collections of ROIs - one that is ordered and one that is not. + */ + class ROISet { + MEMALIGN(ROISet) + + + public: + ROISet () : + unordered ( ROISubSet(false)), + ordered ( ROISubSet(true)) + { + } - private: - vector R; + void clear () { + unordered.clear(); + ordered.clear(); + } + size_t size () const { + return unordered.size() + ordered.size(); + } + size_t size_unordered() const { + return unordered.size(); + } + size_t size_ordered() const { + return ordered.size(); + } + const ROI& operator[] (size_t i) const { + if (i < unordered.size()) { + return unordered[i]; + } + else { + return ordered[i - unordered.size()]; + } + } + /** + Adds an ROI to the (internal) unordered list + */ + void add (const ROI& roi) { unordered.add(roi); } + /** + Adds an ROI to the next position in the (internal) ordered list + */ + void add_ordered (const ROI& roi) { ordered.add(roi); } + + /** + Returns true if any ROI contains the specified coordinate + */ + bool contains (const Eigen::Vector3f& p) const { + return unordered.contains(p) || ordered.contains(p); + } + + /** + For use in an external loop. + OrderedROIs are tested but their order is ignored + */ + void contains (const Eigen::Vector3f& p, vector& retval) const { + unordered.contains(p, retval); + ordered.contains(p, retval); + } + + /** + For use in an external loop. Provides a ROISubSet_ContainsLoopState which can specify whether each ROI within this subset has been entered. + Ordering takes effect for the ordered ROIs + */ + void contains(const Eigen::Vector3f& p, ROISet_ContainsLoopState& state) const { + unordered.contains(p, state.unordered); + ordered.contains(p, state.ordered); + } + + + friend inline std::ostream& operator<< (std::ostream& stream, const ROISet& R) { + stream << R.unordered; + stream << R.ordered; + return stream; + } + + + + private: + ROISubSet unordered; + ROISubSet ordered; + }; diff --git a/src/dwi/tractography/tracking/exec.h b/src/dwi/tractography/tracking/exec.h index df3ccc5be9..e4329edb80 100644 --- a/src/dwi/tractography/tracking/exec.h +++ b/src/dwi/tractography/tracking/exec.h @@ -112,7 +112,8 @@ namespace MR S (shared), method (shared), track_excluded (false), - track_included (S.properties.include.size(), false) { } + track_included(S.properties.include.size_unordered(), S.properties.include.size_ordered()) //(S.properties.include.size(), false) + { } bool operator() (GeneratedTrack& item) { @@ -142,7 +143,7 @@ namespace MR Math::RNG thread_local_RNG; Method method; bool track_excluded; - vector track_included; + ROISet_ContainsLoopState track_included; term_t iterate () @@ -183,7 +184,7 @@ namespace MR { tck.clear(); track_excluded = false; - track_included.assign (track_included.size(), false); + track_included.reset(); method.dir = { NaN, NaN, NaN }; if (S.properties.seeds.is_finite()) { @@ -424,10 +425,11 @@ namespace MR bool traversed_all_include_regions () { - for (size_t n = 0; n < track_included.size(); ++n) + return track_included.all_entered(); + /*for (size_t n = 0; n < track_included.size(); ++n) if (!track_included[n]) return false; - return true; + return true;*/ } diff --git a/testing/cmd/testing_unit_tests_tractography.cpp b/testing/cmd/testing_unit_tests_tractography.cpp new file mode 100644 index 0000000000..ed487f75e1 --- /dev/null +++ b/testing/cmd/testing_unit_tests_tractography.cpp @@ -0,0 +1,56 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "command.h" +#include "progressbar.h" +#include "datatype.h" +#include "math/rng.h" + +#include "image.h" +#include "unit_tests/tractography/roi_unit_tests.h" + +using namespace MR; +using namespace App; + +void usage () +{ + AUTHOR = "Lee Reid (lee.reid@csiro.au)"; + + SYNOPSIS = "Runs units tests for tractography-related classes"; + + ARGUMENTS + + Argument("run", "run the tests (else just show this message)"); +} + + +void run () +{ + + bool allPassed = Testing::UnitTests::Tractography::ROIUnitTests::ROIUnitTests::run(); + //Add further tests here like so: + //allPassed &= + //OR + //allPassed = allPassed && + + + if (allPassed) + { + std::cout << "All Passed"; + } + else + { + std::cout << "Failed"; + throw 1;//Register an error - command.h doesn't let us return a variable - it only listens for something being thrown + } +} diff --git a/testing/src/unit_tests/tractography/roi_unit_tests.h b/testing/src/unit_tests/tractography/roi_unit_tests.h new file mode 100644 index 0000000000..fb23697b83 --- /dev/null +++ b/testing/src/unit_tests/tractography/roi_unit_tests.h @@ -0,0 +1,505 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#ifndef __testing_unittests_tractography_roiunittests_h__ +#define __testing_unittests_tractography_roiunittests_h__ + +#include "dwi/tractography/roi.h" +#include "unit_tests/unit_test.h" +using namespace MR::DWI::Tractography; +using namespace Eigen; + +using namespace MR::Testing::UnitTests; +namespace MR +{ + namespace Testing + { + namespace UnitTests + { + namespace Tractography + { + /** + Runs unit tests for classes closely related with ROIs + */ + class ROIUnitTests : MR::Testing::UnitTests::UnitTest + { + MEMALIGN(ROIUnitTests) + + + + public: + ROIUnitTests(): + state(0, 0), + MR::Testing::UnitTests::UnitTest("ROIUnitTests") + + {} + + /** + Runs unit tests for classes closely associated with ROIs + */ + static bool run() { + try { + ROIUnitTests t = ROIUnitTests(); + t.run_ROISet(); + + //Add additional ROI-based tests here + + return true; + } + catch (...) + { + //A test failed + return false; + } + } + + private: + /** + Unit testing for ROISet + */ + void run_ROISet() + { + std::cout << "ROISet...\n"; + run_ROISet_contains_all(); + //Add additional tests here + + std::cout << "passed\n"; + } + + /** + Unit testing for external looping with contains(pos,state) --> state.contains_all() + */ + void run_ROISet_contains_all() + { + //NO ROIS + { + ROISet_Initialise(0); + std::array tck = { + Vector3f(0,0,0), + Vector3f(3,0,0), + Vector3f(0,5,0), + Vector3f(0,0,7), + Vector3f(11,0,7), + Vector3f(11,13,7), + Vector3f(11,13,0), + Vector3f(0,13,7), + }; + + //All entered should be true when there are no ROIs - there are non NOT entered. + check(state.all_entered()); + + + for (size_t i = 0; i < tck.size(); i++) + { + rs.contains(tck[i], state); + //All entered should still say true + check(state.all_entered(), "No ROI"); + } + } + + //UNORDERED ONLY + + //---ONE ROI + { + ROISet_Initialise(1); + std::array tck = { + Vector3f(3,0,0), + Vector3f(0,5,0), + Vector3f(0,0,7), + Vector3f(11,0,7), + Vector3f(11,13,7), + Vector3f(11,13,0), + Vector3f(0,13,7), + }; + + //All entered should be false before anything is tested + check(!state.all_entered(), "One ROI - pretest"); + + + for (size_t i = 0; i < tck.size(); i++) + { + rs.contains(tck[i], state); + //All entered should still say false + check(!state.all_entered(), "One ROI"); + } + + //Test one inside + rs.contains(Vector3f(0.1, 0.2, 0.3), state); + check(state.all_entered(), "One ROI final A"); + + //Test another that is outside and ensure that the state still says true + rs.contains(Vector3f(11, 17, 310), state); + check(state.all_entered(), "One ROI final B"); + } + + //---THREE ROIS + { + ROISet_Initialise(3); + std::array tck = { + Vector3f(3,0,0), + Vector3f(0,5,0), + Vector3f(0,0,7), + Vector3f(11,0,7), + Vector3f(10,0,0),//inside roi[1] + Vector3f(11,13,7), + Vector3f(11,13,0), + Vector3f(0,10,0),//inside roi[2] + Vector3f(0,13,7), + Vector3f(0,0,0),//inside roi[0] + Vector3f(1000,100,70), + }; + + //All entered should be false before anything is tested + check(!state.all_entered(), "three ROIs - pretest"); + + + for (size_t i = 0; i < tck.size(); i++) + { + rs.contains(tck[i], state); + bool resultShouldBe = i >= 9;//we enter all of them on the [9]th + check(state.all_entered() == resultShouldBe, "Three ROIs: " + str(i)); + } + + } + + + + //ORDERED ONLY + //---ONE ROI + { + /*The rois are at + 0,0,-100 + */ + ROISet_Initialise(1, true); + std::array tck = { + Vector3f(0,0,-100),//inside [0] + }; + + //All entered should be false before anything is tested + check(!state.all_entered(), "one ROI ordered - pretest"); + + for (size_t i = 0; i < tck.size(); i++) + { + rs.contains(tck[i], state); + bool resultShouldBe = i >= 0; + check(state.all_entered() == resultShouldBe, "One ROI ordered: " + str(i)); + } + + } + //---THREE ROIS CORRECT ORDER SIMPLE + { + /*The rois are at + 0,0,-100 + 10,0,-100 + 0,10,-100 + */ + ROISet_Initialise(3, true); + std::array tck = { + //outside + + Vector3f(0,0,-100),//inside [0] + Vector3f(10,0,-100),//inside [1] + Vector3f(0,10,-100),//inside [2] + Vector3f(0,10,100),//outside + }; + + //All entered should be false before anything is tested + check(!state.all_entered(), "three ROIs ordered - pretest"); + + + for (size_t i = 0; i < tck.size(); i++) + { + rs.contains(tck[i], state); + bool resultShouldBe = i >= 2;//we enter all of them on the [12]th + check(state.all_entered() == resultShouldBe, "Three ROIs ordered (simple): " + str(i)); + } + + } + //---THREE ROIS CORRECT ORDER + { + /*The rois are at + 0,0,-100 + 10,0,-100 + 0,10,-100 + */ + ROISet_Initialise(3, true); + std::array tck = { + //outside + Vector3f(3,0,0), + Vector3f(0,5,0), + Vector3f(0,0,7), + Vector3f(0,0,-100),//enter [0] + Vector3f(11,0,7),//outside, [0] done + Vector3f(0,0,-100),//re-enter [0] (this is legal) + Vector3f(110,0,7),//outside, [0] done + Vector3f(0,0,-100),//re-enter [0] (this is legal) + Vector3f(110,0,7),//outside, [0] done + Vector3f(10,0,-100),//inside; [0],[1] done + Vector3f(-110,0,7),//outside, [0],[1] done + Vector3f(10,0,-100),//re-enter [1] (this is legal) + Vector3f(0,10,-100),//inside roi[2]; [0],[1],[2] done + Vector3f(11,13,7),//outside, [0],[1],[2] done + Vector3f(11,13,0),//outside, [0],[1],[2] done + }; + + //All entered should be false before anything is tested + check(!state.all_entered(), "three ROIs ordered - pretest"); + + + for (size_t i = 0; i < tck.size(); i++) + { + rs.contains(tck[i], state); + bool resultShouldBe = i >= 12;//we enter all of them on the [12]th + check(state.all_entered() == resultShouldBe, "Three ROIs ordered: " + str(i)); + } + + } + //---THREE ROIS INCORRECT ORDER A->B->A + { + /*The rois are at + 0,0,-100 + 10,0,-100 + 0,10,-100 + */ + ROISet_Initialise(3, true); + std::array tck = { + //outside + Vector3f(3,0,0), + Vector3f(0,5,0), + Vector3f(0,0,7), + Vector3f(0,0,-100),//enter first + Vector3f(11,0,7),//outside, [0] done + Vector3f(0,0,-100),//re-enter first (this is legal) + Vector3f(110,0,7),//outside, [0] done + Vector3f(0,0,-100),//re-enter first (this is legal) + Vector3f(110,0,7),//outside, [0] done + Vector3f(10,0,-100),//inside; [0],[1] done + Vector3f(0,0,-100),//re-enter first <--------- this is NOT legal as we have entered region [1] + Vector3f(-110,0,7),//outside, [0],[1] done + Vector3f(10,0,-100),//re-enter second + Vector3f(0,10,-100),//inside roi[2]; [0],[1],[2] done + Vector3f(11,13,7),//outside, [0],[1],[2] done + Vector3f(11,13,0),//outside, [0],[1],[2] done + }; + + //All entered should be false before anything is tested + check(!state.all_entered(), "three ROIs ordered - pretest"); + + + for (size_t i = 0; i < tck.size(); i++) + { + rs.contains(tck[i], state); + bool resultShouldBe = false; + check(state.all_entered() == resultShouldBe, "Three ROIs ordered illegal ABA"); + } + + } + //---FOUR ROIS INCORRECT ORDER A->B->C->A->D + { + /*The rois are at + 0,0,-100 + 10,0,-100 + 0,10,-100 + 10,10,-100 + */ + ROISet_Initialise(4, true); + std::array tck = { + //outside + Vector3f(3,0,0), + Vector3f(0,5,0), + Vector3f(0,0,7), + Vector3f(0,0,-100),//enter first + Vector3f(11,0,7),//outside, [0] done + Vector3f(0,0,-100),//re-enter first (this is legal) + Vector3f(110,0,7),//outside, [0] done + Vector3f(0,0,-100),//re-enter first (this is legal) + Vector3f(110,0,7),//outside, [0] done + Vector3f(10,0,-100),//inside; [0],[1] done + Vector3f(-110,0,7),//outside, [0],[1] done + Vector3f(10,0,-100),//re-enter second (this is legal) + Vector3f(0,10,-100),//inside roi[2]; [0],[1],[2] done + Vector3f(11,13,7),//outside, [0],[1],[2] done + Vector3f(11,13,0),//outside, [0],[1],[2] done + Vector3f(0,0,-100),//re-enter first <--------- this is NOT legal as we have entered region [1] and [2] + Vector3f(10,10,-100),//inside roi[2]; [0],[1],[2],[3] done + }; + + //All entered should be false before anything is tested + check(!state.all_entered(), "three ROIs ordered - pretest"); + + + for (size_t i = 0; i < tck.size(); i++) + { + rs.contains(tck[i], state); + bool resultShouldBe = false;//we enter all of them on the [12]th + check(state.all_entered() == resultShouldBe, "Three ROIs ordered - illegal ABCA"); + } + + } + + + //COMBINATION + //---FOUR ORDERED ROIS (A-D), and Two unordered ROIs (J,K) + //---A->B->J->C->D->K->D->J->K->B + { + //The rois are at + Vector3f A = Vector3f(0, 0, -100); + Vector3f B = Vector3f(10, 0, -100); + Vector3f C = Vector3f(0, 10, -100); + Vector3f D = Vector3f(10, 10, -100); + //J and K: + Vector3f J = Vector3f(0, 0, 0); + Vector3f K = Vector3f(10, 0, 0); + + ROISet_Initialise(2, 4, true); + std::array tck = { + //outside + Vector3f(3,0,0), + Vector3f(0,5,0), + Vector3f(0,0,7), + A,//enter A + Vector3f(11,0,7),//outside + B,//enter B; A->B + Vector3f(11,0,7),//outside + J,C,D,K,//all entered once we enter K + D,//Legal re-entry into D + J,K,//Legal re-entry into unordered rois + B,//<---- Illegal re-entry into B + Vector3f(110,0,7)//outside + }; + + //All entered should be false before anything is tested + check(!state.all_entered(), "three ROIs ordered - pretest"); + + + for (size_t i = 0; i < tck.size(); i++) + { + rs.contains(tck[i], state); + bool resultShouldBe = i >= 10 && i < 14; + check(state.all_entered() == resultShouldBe, "FOUR ORDERED ROIS (A-D), and Two unordered ROIs (J,K): " + str(i)); + } + } + + } + + /** + Sets up the ROI set ready for tests to be run + */ + void ROISet_Initialise(size_t no_rois_unordered, size_t no_rois_ordered, bool ordered = false) + { + rs = ROISet(); + for (size_t i = 0; i < no_rois_unordered; i++){ + rs.add(ROISet_GetROI(i)); + } + for (size_t i = 0; i < no_rois_ordered; i++){ + rs.add_ordered(ROISet_GetROI(i, -100)); + } + state.reset(rs.size_unordered(), rs.size_ordered()); + } + /** + Sets up the ROI set ready for tests to be run + */ + void ROISet_Initialise(size_t no_rois, bool ordered=false) + { + rs = ROISet(); + for (size_t i = 0; i < no_rois; i++) + { + + if (ordered) + { + rs.add_ordered(ROISet_GetROI(i,-100)); + } + else + { + rs.add(ROISet_GetROI(i)); + } + + + } + + state = ROISet_ContainsLoopState(rs.size_unordered(), rs.size_ordered()); + } + + /** + Returns a spherical ROI for the ROISet_Initialise method or similar + */ + ROI ROISet_GetROI(size_t i, float offset_z = 0) + { + Vector3f position; + //Method could be made much nicer with a modulo operator but time is of the essence + + if (i == 0) + { + position = Vector3f(0, 0, offset_z); + + } + else if (i == 1) + { + position = Vector3f(10, 0, offset_z); + } + else if (i == 2) + { + position = Vector3f(0, 10, offset_z); + } + else if (i == 3) + { + position = Vector3f(10, 10, offset_z); + } + else if (i == 4) + { + position = Vector3f(0, 0, 10+ offset_z); + } + else if (i == 5) + { + position = Vector3f(10, 0, 10 + offset_z); + } + else if (i == 6) + { + position = Vector3f(0, 10, 10 + offset_z); + } + else if (i == 7) + { + position = Vector3f(10, 10, 10 + offset_z); + } + else + { + throw Exception("Not implemented"); + } + + return ROI(position, 1); + } + + /** + For debugging + */ + void print_position(Vector3f p) + { + std::cout << str(p[0]) << " " << str(p[1]) << " " << str(p[2]) << "\n"; + } + + //Parameters: + ROISet rs; + ROISet_ContainsLoopState state; + + + + + + }; + } + } + } +} + +#endif diff --git a/testing/src/unit_tests/unit_test.h b/testing/src/unit_tests/unit_test.h new file mode 100644 index 0000000000..e9cbaaa8c6 --- /dev/null +++ b/testing/src/unit_tests/unit_test.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#ifndef __testing_unittests_unittest_h__ +#define __testing_unittests_unittest_h__ + + + +using namespace std; +namespace MR +{ + namespace Testing + { + namespace UnitTests + { + + class UnitTest + { + MEMALIGN(UnitTest) + public: + UnitTest(string unit_name_arg) { + unit_name = unit_name_arg; + } + + + public: + /** + If the condition is FALSE, the message and class name are printed out and execution aborts + */ + void check(bool pass, std::string message = "(no message provided)") + { + if (!pass) + { + //We write info on the error here becasue C++ assert doesn't accept a string and other ways around this are horrible language hacks + message = "FAIL: " + unit_name + ":\t" + message + "\n"; + WARN(message); + throw Exception(message); + } + + } + + + string unit_name; + }; + + + } + } +} + + +#endif diff --git a/testing/tests/unittests b/testing/tests/unittests new file mode 100644 index 0000000000..1fc916838a --- /dev/null +++ b/testing/tests/unittests @@ -0,0 +1 @@ +testing_unit_tests_tractography run \ No newline at end of file From 757d3014046338667f4e584c3e5579ad6e0671b2 Mon Sep 17 00:00:00 2001 From: Lee Reid Date: Mon, 22 Jan 2018 10:10:43 +1000 Subject: [PATCH 0098/1471] Tidy up. Whitespace, commented out code etc --- cmd/tckedit.cpp | 2 -- src/dwi/tractography/editing/worker.cpp | 8 -------- src/dwi/tractography/tracking/exec.h | 19 +++---------------- 3 files changed, 3 insertions(+), 26 deletions(-) diff --git a/cmd/tckedit.cpp b/cmd/tckedit.cpp index af11ab17a3..37ada0e6f8 100644 --- a/cmd/tckedit.cpp +++ b/cmd/tckedit.cpp @@ -162,8 +162,6 @@ void run () erase_if_present (properties, "max_weight"); Editing::load_properties (properties); - - // Parameters that the worker threads need to be aware of, but do not appear in Properties const bool inverse = get_options ("inverse").size(); const bool ends_only = get_options ("ends_only").size(); diff --git a/src/dwi/tractography/editing/worker.cpp b/src/dwi/tractography/editing/worker.cpp index 3c90f100b2..0461c08077 100644 --- a/src/dwi/tractography/editing/worker.cpp +++ b/src/dwi/tractography/editing/worker.cpp @@ -70,14 +70,6 @@ namespace MR { in.swap(out); return true; } - - /*for (const auto& i : include_visited) { - if (!i) { - if (inverse) - in.swap (out); - return true; - } - }*/ } diff --git a/src/dwi/tractography/tracking/exec.h b/src/dwi/tractography/tracking/exec.h index e4329edb80..6088e409c6 100644 --- a/src/dwi/tractography/tracking/exec.h +++ b/src/dwi/tractography/tracking/exec.h @@ -112,7 +112,7 @@ namespace MR S (shared), method (shared), track_excluded (false), - track_included(S.properties.include.size_unordered(), S.properties.include.size_ordered()) //(S.properties.include.size(), false) + track_included(S.properties.include.size_unordered(), S.properties.include.size_ordered()) { } @@ -171,7 +171,7 @@ namespace MR if (!(S.is_act() && S.act().backtrack())) S.properties.include.contains (method.pos, track_included); - if (S.stop_on_all_include && traversed_all_include_regions()) + if (S.stop_on_all_include && track_included.all_entered()) return TRAVERSE_ALL_INCLUDE; return CONTINUE; @@ -412,7 +412,7 @@ namespace MR } - if (!traversed_all_include_regions()) { + if (!track_included.all_entered()) { S.add_rejection (MISSED_INCLUDE_REGION); return true; } @@ -421,19 +421,6 @@ namespace MR } - - - bool traversed_all_include_regions () - { - return track_included.all_entered(); - /*for (size_t n = 0; n < track_included.size(); ++n) - if (!track_included[n]) - return false; - return true;*/ - } - - - bool satisfy_wm_requirement (const vector& tck) { // If using the Seed_test algorithm (indicated by max_num_points == 2), don't want to execute this check From 5ddf3da95f3302f2276b66184563faafb1346a38 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 23 Jan 2018 14:55:14 +1100 Subject: [PATCH 0099/1471] GLM stats: Don't permit all-zero contrasts --- core/math/stats/glm.cpp | 8 ++++++++ core/math/stats/glm.h | 5 +++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 4ea41becf6..04392823b3 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -337,6 +337,14 @@ namespace MR + void Contrast::check_nonzero() const + { + if (c.isZero()) + throw Exception ("Cannot specify a contrast that consists entirely of zeroes"); + } + + + matrix_type Contrast::check_rank (const matrix_type& in, const size_t index) const { // FullPivLU.image() provides column-space of matrix; diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 58e0fa73a6..c8e8db84c7 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -83,13 +83,13 @@ namespace MR c (in), r (Math::rank (c)), F (false), - i (index) { } + i (index) { check_nonzero(); } Contrast (const matrix_type& in, const size_t index) : c (check_rank (in, index)), r (Math::rank (c)), F (true), - i (index) { } + i (index) { check_nonzero(); } Partition partition (const matrix_type&) const; @@ -105,6 +105,7 @@ namespace MR const bool F; const size_t i; + void check_nonzero() const; matrix_type check_rank (const matrix_type&, const size_t) const; }; From a0e93a5d83be0554277e2aa1f50293801038606f Mon Sep 17 00:00:00 2001 From: Lee Reid Date: Tue, 23 Jan 2018 15:31:11 +1000 Subject: [PATCH 0100/1471] Fixes issue where -stop is ignored if all include regions are -include_ordered --- cmd/tckgen.cpp | 13 ++++++----- .../tractography/tracking/tractography.cpp | 23 ++++++++----------- src/dwi/tractography/tracking/tractography.h | 2 +- 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/cmd/tckgen.cpp b/cmd/tckgen.cpp index 3828fb08e5..3923676a69 100644 --- a/cmd/tckgen.cpp +++ b/cmd/tckgen.cpp @@ -228,7 +228,7 @@ void run () if (opt.size()) algorithm = opt[0][0]; - Tracking::load_streamline_properties (properties); + ACT::load_act_properties (properties); @@ -238,6 +238,11 @@ void run () if (algorithm == 2) Algorithms::load_iFOD2_options (properties); + + //load ROIs and tractography specific options + //NB must occur before seed check below due to -select option override + Tracking::load_streamline_properties_and_rois(properties); + // Check validity of options -select and -seeds; these are meaningless if seeds are number-limited // By over-riding the values in properties, the progress bar should still be valid if (properties.seeds.is_finite()) { @@ -253,11 +258,7 @@ void run () } - if (get_options("include_ordered").size() && !get_options("seed_unidirectional").size()) - throw Exception("-include_ordered requires that -seed_unidirectional is set, but this is not so"); - - //Now we are certain options are valid, load ROIs - load_rois(properties); + switch (algorithm) { case 0: diff --git a/src/dwi/tractography/tracking/tractography.cpp b/src/dwi/tractography/tracking/tractography.cpp index e04d1b8af2..93c56d7237 100644 --- a/src/dwi/tractography/tracking/tractography.cpp +++ b/src/dwi/tractography/tracking/tractography.cpp @@ -83,10 +83,17 @@ namespace MR + Argument ("factor").type_integer (2); - - void load_streamline_properties (Properties& properties) + /** + Loads properties related to streamlines AND loads include etc ROIs. + */ + void load_streamline_properties_and_rois (Properties& properties) { + //Validity check + if (get_options("include_ordered").size() && !get_options("seed_unidirectional").size()) + throw Exception("-include_ordered requires that -seed_unidirectional is set, but this is not so"); + + using namespace MR::App; auto opt = get_options ("select"); @@ -116,17 +123,7 @@ namespace MR opt = get_options ("rk4"); if (opt.size()) properties["rk4"] = "1"; - opt = get_options ("include"); - for (size_t i = 0; i < opt.size(); ++i) - properties.include.add (ROI (opt[i][0])); - - opt = get_options ("exclude"); - for (size_t i = 0; i < opt.size(); ++i) - properties.exclude.add (ROI (opt[i][0])); - - opt = get_options ("mask"); - for (size_t i = 0; i < opt.size(); ++i) - properties.mask.add (ROI (opt[i][0])); + load_rois(properties);//rois must be loaded before stop parameter in order to check its validity opt = get_options ("stop"); if (opt.size()) { diff --git a/src/dwi/tractography/tracking/tractography.h b/src/dwi/tractography/tracking/tractography.h index 0f8f2802bc..e0b13b1e33 100644 --- a/src/dwi/tractography/tracking/tractography.h +++ b/src/dwi/tractography/tracking/tractography.h @@ -44,7 +44,7 @@ namespace MR extern const App::OptionGroup TrackOption; - void load_streamline_properties (Properties&); + void load_streamline_properties_and_rois (Properties&); } } From 514e6c67fa331167cf61aeb6c862d20ffd22c3f9 Mon Sep 17 00:00:00 2001 From: Thijs Dhollander Date: Mon, 12 Feb 2018 15:01:33 +1100 Subject: [PATCH 0101/1471] setting up for dhollander algo improvement experiments --- bin/dwipreproc | 4 +- docs/reference/scripts/dwi2response.rst | 101 +++++++++++- lib/mrtrix3/dwi2response/dhollander.py | 2 +- lib/mrtrix3/dwi2response/dhollander_old.py | 182 +++++++++++++++++++++ lib/mrtrix3/dwi2response/fa.py | 2 +- 5 files changed, 285 insertions(+), 6 deletions(-) create mode 100644 lib/mrtrix3/dwi2response/dhollander_old.py diff --git a/bin/dwipreproc b/bin/dwipreproc index 2754727bc6..52aa8dd211 100755 --- a/bin/dwipreproc +++ b/bin/dwipreproc @@ -236,14 +236,14 @@ if eddy_mporder: # Need to know the mean b-value in each shell, and the asymmetry value of each shell # But don't bother testing / warning the user if they're already controlling for this if not app.args.eddy_options or not any(s.startswith('--slm=') for s in app.args.eddy_options.split()): - shell_bvalues = [ int(round(float(value))) for value in image.mrinfo('dwi.mif', 'shellvalues').split() ] + shell_bvalues = [ int(round(float(value))) for value in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] shell_asymmetry = [ float(value) for value in run.command('dirstat dwi.mif -output asym')[0].splitlines() ] # dirstat will skip any b=0 shell by default; therefore for correspondence between # shell_bvalues and shell_symmetry, need to remove any b=0 from the former if len(shell_bvalues) == len(shell_asymmetry) + 1: shell_bvalues = shell_bvalues[1:] elif len(shell_bvalues) != len(shell_asymmetry): - app.error('Number of b-values reported by mrinfo (' + len(shell_bvalues) + ') does not match number of outputs provided by dirstat (' + len(shell_asymmetry) + ')') + app.error('Number of b-values reported by mrinfo (' + str(len(shell_bvalues)) + ') does not match number of outputs provided by dirstat (' + str(len(shell_asymmetry)) + ')') for b, s in zip(shell_bvalues, shell_asymmetry): if s >= 0.1: app.warn('sampling of b=' + str(b) + ' shell is ' + ('strongly' if s >= 0.4 else 'moderately') + \ diff --git a/docs/reference/scripts/dwi2response.rst b/docs/reference/scripts/dwi2response.rst index 7e6cf58620..74fe5b21a6 100644 --- a/docs/reference/scripts/dwi2response.rst +++ b/docs/reference/scripts/dwi2response.rst @@ -15,7 +15,7 @@ Usage dwi2response algorithm [ options ] ... -- *algorithm*: Select the algorithm to be used to complete the script operation; additional details and options become available once an algorithm is nominated. Options are: dhollander, fa, manual, msmt_5tt, tax, tournier +- *algorithm*: Select the algorithm to be used to complete the script operation; additional details and options become available once an algorithm is nominated. Options are: dhollander, dhollander_old, fa, manual, msmt_5tt, tax, tournier Description ----------- @@ -89,7 +89,7 @@ dwi2response dhollander Synopsis -------- -Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof) +Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is an improved version of the algorithm. Usage -------- @@ -164,6 +164,103 @@ References +**Author:** Thijs Dhollander (thijs.dhollander@gmail.com) + +**Copyright:** Copyright (c) 2008-2018 the MRtrix3 contributors. + +This Source Code Form is subject to the terms of the Mozilla Public +License, v. 2.0. If a copy of the MPL was not distributed with this +file, you can obtain one at http://mozilla.org/MPL/2.0/ + +MRtrix3 is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty +of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +For more details, see http://www.mrtrix.org/ + +.. _dwi2response_dhollander_old: + +dwi2response dhollander_old +=========================== + +Synopsis +-------- + +Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is the original version of the algorithm. + +Usage +-------- + +:: + + dwi2response dhollander_old input out_sfwm out_gm out_csf [ options ] + +- *input*: The input DWI +- *out_sfwm*: Output single-fibre WM response text file +- *out_gm*: Output GM response text file +- *out_csf*: Output CSF response text file + +Options +------- + +Options specific to the 'dhollander_old' algorithm +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- **-erode** Number of erosion passes to apply to initial (whole brain) mask. (default: 3) + +- **-fa** FA threshold for crude WM versus GM-CSF separation. (default: 0.2) + +- **-sfwm** Number of single-fibre WM voxels to select, as a percentage of refined WM. (default: 0.5 per cent) + +- **-gm** Number of GM voxels to select, as a percentage of refined GM. (default: 2 per cent) + +- **-csf** Number of CSF voxels to select, as a percentage of refined CSF. (default: 10 per cent) + +Options common to all dwi2response algorithms +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- **-shells** The b-value shell(s) to use in response function estimation (single value for single-shell response, comma-separated list for multi-shell response) + +- **-lmax** The maximum harmonic degree(s) of response function estimation (single value for single-shell response, comma-separated list for multi-shell response) + +- **-mask** Provide an initial mask for response voxel selection + +- **-voxels** Output an image showing the final voxel selection(s) + +- **-grad** Pass the diffusion gradient table in MRtrix format + +- **-fslgrad bvecs bvals** Pass the diffusion gradient table in FSL bvecs/bvals format + +Standard options +^^^^^^^^^^^^^^^^ + +- **-continue ** Continue the script from a previous execution; must provide the temporary directory path, and the name of the last successfully-generated file + +- **-force** Force overwrite of output files if pre-existing + +- **-help** Display help information for the script + +- **-nocleanup** Do not delete temporary files during script, or temporary directory at script completion + +- **-nthreads number** Use this number of threads in MRtrix multi-threaded applications (0 disables multi-threading) + +- **-tempdir /path/to/tmp/** Manually specify the path in which to generate the temporary directory + +- **-quiet** Suppress all console output during script execution + +- **-info** Display additional information and progress for every command invoked + +- **-debug** Display additional debugging information over and above the output of -info + +References +^^^^^^^^^^ + +* Dhollander, T.; Raffelt, D. & Connelly, A. Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5 + +-------------- + + + **Author:** Thijs Dhollander (thijs.dhollander@gmail.com) **Copyright:** Copyright (c) 2008-2018 the MRtrix3 contributors. diff --git a/lib/mrtrix3/dwi2response/dhollander.py b/lib/mrtrix3/dwi2response/dhollander.py index 8fc65eeaaf..657afdd99a 100644 --- a/lib/mrtrix3/dwi2response/dhollander.py +++ b/lib/mrtrix3/dwi2response/dhollander.py @@ -1,5 +1,5 @@ def initialise(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('dhollander', author='Thijs Dhollander (thijs.dhollander@gmail.com)', synopsis='Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof)', parents=[base_parser]) + parser = subparsers.add_parser('dhollander', author='Thijs Dhollander (thijs.dhollander@gmail.com)', synopsis='Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is an improved version of the algorithm.', parents=[base_parser]) parser.addCitation('', 'Dhollander, T.; Raffelt, D. & Connelly, A. Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5', False) parser.add_argument('input', help='The input DWI') parser.add_argument('out_sfwm', help='Output single-fibre WM response text file') diff --git a/lib/mrtrix3/dwi2response/dhollander_old.py b/lib/mrtrix3/dwi2response/dhollander_old.py new file mode 100644 index 0000000000..06a4c43645 --- /dev/null +++ b/lib/mrtrix3/dwi2response/dhollander_old.py @@ -0,0 +1,182 @@ +def initialise(base_parser, subparsers): #pylint: disable=unused-variable + parser = subparsers.add_parser('dhollander_old', author='Thijs Dhollander (thijs.dhollander@gmail.com)', synopsis='Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is the original version of the algorithm.', parents=[base_parser]) + parser.addCitation('', 'Dhollander, T.; Raffelt, D. & Connelly, A. Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5', False) + parser.add_argument('input', help='The input DWI') + parser.add_argument('out_sfwm', help='Output single-fibre WM response text file') + parser.add_argument('out_gm', help='Output GM response text file') + parser.add_argument('out_csf', help='Output CSF response text file') + options = parser.add_argument_group('Options specific to the \'dhollander_old\' algorithm') + options.add_argument('-erode', type=int, default=3, help='Number of erosion passes to apply to initial (whole brain) mask. (default: 3)') + options.add_argument('-fa', type=float, default=0.2, help='FA threshold for crude WM versus GM-CSF separation. (default: 0.2)') + options.add_argument('-sfwm', type=float, default=0.5, help='Number of single-fibre WM voxels to select, as a percentage of refined WM. (default: 0.5 per cent)') + options.add_argument('-gm', type=float, default=2.0, help='Number of GM voxels to select, as a percentage of refined GM. (default: 2 per cent)') + options.add_argument('-csf', type=float, default=10.0, help='Number of CSF voxels to select, as a percentage of refined CSF. (default: 10 per cent)') + + + +def checkOutputPaths(): #pylint: disable=unused-variable + from mrtrix3 import app + app.checkOutputPath(app.args.out_sfwm) + app.checkOutputPath(app.args.out_gm) + app.checkOutputPath(app.args.out_csf) + + + +def getInputs(): #pylint: disable=unused-variable + pass + + + +def needsSingleShell(): #pylint: disable=unused-variable + return False + + + +def execute(): #pylint: disable=unused-variable + import shutil + from mrtrix3 import app, image, path, run + + + # Get b-values and number of volumes per b-value. + bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] + bvolumes = [ int(x) for x in image.mrinfo('dwi.mif', 'shell_sizes').split() ] + app.console(str(len(bvalues)) + ' unique b-value(s) detected: ' + ','.join(map(str,bvalues)) + ' with ' + ','.join(map(str,bvolumes)) + ' volumes.') + if len(bvalues) < 2: + app.error('Need at least 2 unique b-values (including b=0).') + + + # Get lmax information (if provided). + sfwm_lmax = [ ] + if app.args.lmax: + sfwm_lmax = [ int(x.strip()) for x in app.args.lmax.split(',') ] + if not len(sfwm_lmax) == len(bvalues): + app.error('Number of lmax\'s (' + str(len(sfwm_lmax)) + ', as supplied to the -lmax option: ' + ','.join(map(str,sfwm_lmax)) + ') does not match number of unique b-values.') + for l in sfwm_lmax: + if l%2: + app.error('Values supplied to the -lmax option must be even.') + if l<0: + app.error('Values supplied to the -lmax option must be non-negative.') + + + # Erode (brain) mask. + if app.args.erode > 0: + run.command('maskfilter mask.mif erode eroded_mask.mif -npass ' + str(app.args.erode)) + else: + run.command('mrconvert mask.mif eroded_mask.mif -datatype bit') + + + # Get volumes, compute mean signal and SDM per b-value; compute overall SDM; get rid of erroneous values. + totvolumes = 0 + fullsdmcmd = 'mrcalc' + errcmd = 'mrcalc' + zeropath = 'mean_b' + str(bvalues[0]) + '.mif' + for i, b in enumerate(bvalues): + meanpath = 'mean_b' + str(b) + '.mif' + run.command('dwiextract dwi.mif -shells ' + str(b) + ' - | mrmath - mean ' + meanpath + ' -axis 3') + errpath = 'err_b' + str(b) + '.mif' + run.command('mrcalc ' + meanpath + ' -finite ' + meanpath + ' 0 -if 0 -le ' + errpath + ' -datatype bit') + errcmd += ' ' + errpath + if i>0: + errcmd += ' -add' + sdmpath = 'sdm_b' + str(b) + '.mif' + run.command('mrcalc ' + zeropath + ' ' + meanpath + ' -divide -log ' + sdmpath) + totvolumes += bvolumes[i] + fullsdmcmd += ' ' + sdmpath + ' ' + str(bvolumes[i]) + ' -mult' + if i>1: + fullsdmcmd += ' -add' + fullsdmcmd += ' ' + str(totvolumes) + ' -divide full_sdm.mif' + run.command(fullsdmcmd) + run.command('mrcalc full_sdm.mif -finite full_sdm.mif 0 -if 0 -le err_sdm.mif -datatype bit') + errcmd += ' err_sdm.mif -add 0 eroded_mask.mif -if safe_mask.mif -datatype bit' + run.command(errcmd) + run.command('mrcalc safe_mask.mif full_sdm.mif 0 -if 10 -min safe_sdm.mif') + + + # Compute FA and principal eigenvectors; crude WM versus GM-CSF separation based on FA. + run.command('dwi2tensor dwi.mif - -mask safe_mask.mif | tensor2metric - -fa safe_fa.mif -vector safe_vecs.mif -modulate none -mask safe_mask.mif') + run.command('mrcalc safe_mask.mif safe_fa.mif 0 -if ' + str(app.args.fa) + ' -gt crude_wm.mif -datatype bit') + run.command('mrcalc crude_wm.mif 0 safe_mask.mif -if _crudenonwm.mif -datatype bit') + + # Crude GM versus CSF separation based on SDM. + crudenonwmmedian = image.statistic('safe_sdm.mif', 'median', '-mask _crudenonwm.mif') + run.command('mrcalc _crudenonwm.mif safe_sdm.mif ' + str(crudenonwmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudenonwm.mif | mrcalc _crudenonwm.mif - 0 -if crude_csf.mif -datatype bit') + run.command('mrcalc crude_csf.mif 0 _crudenonwm.mif -if crude_gm.mif -datatype bit') + + + # Refine WM: remove high SDM outliers. + crudewmmedian = image.statistic('safe_sdm.mif', 'median', '-mask crude_wm.mif') + run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmmedian) + ' -gt _crudewmhigh.mif -datatype bit') + run.command('mrcalc _crudewmhigh.mif 0 crude_wm.mif -if _crudewmlow.mif -datatype bit') + crudewmQ1 = float(image.statistic('safe_sdm.mif', 'median', '-mask _crudewmlow.mif')) + crudewmQ3 = float(image.statistic('safe_sdm.mif', 'median', '-mask _crudewmhigh.mif')) + crudewmoutlthresh = crudewmQ3 + (crudewmQ3 - crudewmQ1) + run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmoutlthresh) + ' -gt _crudewmoutliers.mif -datatype bit') + run.command('mrcalc _crudewmoutliers.mif 0 crude_wm.mif -if refined_wm.mif -datatype bit') + + # Refine GM: separate safer GM from partial volumed voxels. + crudegmmedian = image.statistic('safe_sdm.mif', 'median', '-mask crude_gm.mif') + run.command('mrcalc crude_gm.mif safe_sdm.mif 0 -if ' + str(crudegmmedian) + ' -gt _crudegmhigh.mif -datatype bit') + run.command('mrcalc _crudegmhigh.mif 0 crude_gm.mif -if _crudegmlow.mif -datatype bit') + run.command('mrcalc _crudegmhigh.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract 0 -if - | mrthreshold - - -mask _crudegmhigh.mif -invert | mrcalc _crudegmhigh.mif - 0 -if _crudegmhighselect.mif -datatype bit') + run.command('mrcalc _crudegmlow.mif safe_sdm.mif ' + str(crudegmmedian) + ' -subtract -neg 0 -if - | mrthreshold - - -mask _crudegmlow.mif -invert | mrcalc _crudegmlow.mif - 0 -if _crudegmlowselect.mif -datatype bit') + run.command('mrcalc _crudegmhighselect.mif 1 _crudegmlowselect.mif -if refined_gm.mif -datatype bit') + + # Refine CSF: recover lost CSF from crude WM SDM outliers, separate safer CSF from partial volumed voxels. + crudecsfmin = image.statistic('safe_sdm.mif', 'min', '-mask crude_csf.mif') + run.command('mrcalc _crudewmoutliers.mif safe_sdm.mif 0 -if ' + str(crudecsfmin) + ' -gt 1 crude_csf.mif -if _crudecsfextra.mif -datatype bit') + run.command('mrcalc _crudecsfextra.mif safe_sdm.mif ' + str(crudecsfmin) + ' -subtract 0 -if - | mrthreshold - - -mask _crudecsfextra.mif | mrcalc _crudecsfextra.mif - 0 -if refined_csf.mif -datatype bit') + + + # Get final voxels for single-fibre WM response function estimation from WM using 'tournier' algorithm. + refwmcount = float(image.statistic('refined_wm.mif', 'count', '-mask refined_wm.mif')) + voxsfwmcount = int(round(refwmcount * app.args.sfwm / 100.0)) + app.console('Running \'tournier\' algorithm to select ' + str(voxsfwmcount) + ' single-fibre WM voxels.') + cleanopt = '' + if not app.cleanup: + cleanopt = ' -nocleanup' + run.command('dwi2response tournier dwi.mif _respsfwmss.txt -sf_voxels ' + str(voxsfwmcount) + ' -iter_voxels ' + str(voxsfwmcount * 10) + ' -mask refined_wm.mif -voxels voxels_sfwm.mif -tempdir ' + app.tempDir + cleanopt) + + # Get final voxels for GM response function estimation from GM. + refgmmedian = image.statistic('safe_sdm.mif', 'median', '-mask refined_gm.mif') + run.command('mrcalc refined_gm.mif safe_sdm.mif 0 -if ' + str(refgmmedian) + ' -gt _refinedgmhigh.mif -datatype bit') + run.command('mrcalc _refinedgmhigh.mif 0 refined_gm.mif -if _refinedgmlow.mif -datatype bit') + refgmhighcount = float(image.statistic('_refinedgmhigh.mif', 'count', '-mask _refinedgmhigh.mif')) + refgmlowcount = float(image.statistic('_refinedgmlow.mif', 'count', '-mask _refinedgmlow.mif')) + voxgmhighcount = int(round(refgmhighcount * app.args.gm / 100.0)) + voxgmlowcount = int(round(refgmlowcount * app.args.gm / 100.0)) + run.command('mrcalc _refinedgmhigh.mif safe_sdm.mif 0 -if - | mrthreshold - - -bottom ' + str(voxgmhighcount) + ' -ignorezero | mrcalc _refinedgmhigh.mif - 0 -if _refinedgmhighselect.mif -datatype bit') + run.command('mrcalc _refinedgmlow.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxgmlowcount) + ' -ignorezero | mrcalc _refinedgmlow.mif - 0 -if _refinedgmlowselect.mif -datatype bit') + run.command('mrcalc _refinedgmhighselect.mif 1 _refinedgmlowselect.mif -if voxels_gm.mif -datatype bit') + + # Get final voxels for CSF response function estimation from CSF. + refcsfcount = float(image.statistic('refined_csf.mif', 'count', '-mask refined_csf.mif')) + voxcsfcount = int(round(refcsfcount * app.args.csf / 100.0)) + run.command('mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxcsfcount) + ' -ignorezero | mrcalc refined_csf.mif - 0 -if voxels_csf.mif -datatype bit') + + + # Show summary of voxels counts. + textarrow = ' --> ' + app.console('Summary of voxel counts:') + app.console('Mask: ' + str(int(image.statistic('mask.mif', 'count', '-mask mask.mif'))) + textarrow + str(int(image.statistic('eroded_mask.mif', 'count', '-mask eroded_mask.mif'))) + textarrow + str(int(image.statistic('safe_mask.mif', 'count', '-mask safe_mask.mif')))) + app.console('WM: ' + str(int(image.statistic('crude_wm.mif', 'count', '-mask crude_wm.mif'))) + textarrow + str(int(image.statistic('refined_wm.mif', 'count', '-mask refined_wm.mif'))) + textarrow + str(int(image.statistic('voxels_sfwm.mif', 'count', '-mask voxels_sfwm.mif'))) + ' (SF)') + app.console('GM: ' + str(int(image.statistic('crude_gm.mif', 'count', '-mask crude_gm.mif'))) + textarrow + str(int(image.statistic('refined_gm.mif', 'count', '-mask refined_gm.mif'))) + textarrow + str(int(image.statistic('voxels_gm.mif', 'count', '-mask voxels_gm.mif')))) + app.console('CSF: ' + str(int(image.statistic('crude_csf.mif', 'count', '-mask crude_csf.mif'))) + textarrow + str(int(image.statistic('refined_csf.mif', 'count', '-mask refined_csf.mif'))) + textarrow + str(int(image.statistic('voxels_csf.mif', 'count', '-mask voxels_csf.mif')))) + + + # Generate single-fibre WM, GM and CSF responses + bvalues_option = ' -shells ' + ','.join(map(str,bvalues)) + sfwm_lmax_option = '' + if sfwm_lmax: + sfwm_lmax_option = ' -lmax ' + ','.join(map(str,sfwm_lmax)) + run.command('amp2response dwi.mif voxels_sfwm.mif safe_vecs.mif response_sfwm.txt' + bvalues_option + sfwm_lmax_option) + run.command('amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' + bvalues_option + ' -isotropic') + run.command('amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' + bvalues_option + ' -isotropic') + run.function(shutil.copyfile, 'response_sfwm.txt', path.fromUser(app.args.out_sfwm, False)) + run.function(shutil.copyfile, 'response_gm.txt', path.fromUser(app.args.out_gm, False)) + run.function(shutil.copyfile, 'response_csf.txt', path.fromUser(app.args.out_csf, False)) + + + # Generate 4D binary images with voxel selections at major stages in algorithm (RGB as in MSMT-CSD paper). + run.command('mrcat crude_csf.mif crude_gm.mif crude_wm.mif crude.mif -axis 3') + run.command('mrcat refined_csf.mif refined_gm.mif refined_wm.mif refined.mif -axis 3') + run.command('mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif voxels.mif -axis 3') diff --git a/lib/mrtrix3/dwi2response/fa.py b/lib/mrtrix3/dwi2response/fa.py index ec7cc68f3f..8f83e8b576 100644 --- a/lib/mrtrix3/dwi2response/fa.py +++ b/lib/mrtrix3/dwi2response/fa.py @@ -30,7 +30,7 @@ def needsSingleShell(): #pylint: disable=unused-variable def execute(): #pylint: disable=unused-variable import shutil from mrtrix3 import app, image, path, run - bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shellvalues').split() ] + bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] if len(bvalues) < 2: app.error('Need at least 2 unique b-values (including b=0).') lmax_option = '' From 3b657ec8a63c2d99a3511a4ec4a5a1f14b45041b Mon Sep 17 00:00:00 2001 From: Thijs Dhollander Date: Mon, 12 Feb 2018 16:41:45 +1100 Subject: [PATCH 0102/1471] further set up for dhollander algo improvement experiments --- docs/reference/scripts/dwi2response.rst | 4 ++-- lib/mrtrix3/dwi2response/dhollander.py | 2 +- lib/mrtrix3/dwi2response/dhollander_old.py | 2 +- lib/mrtrix3/dwi2response/tournier.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/reference/scripts/dwi2response.rst b/docs/reference/scripts/dwi2response.rst index 74fe5b21a6..b29c122cd9 100644 --- a/docs/reference/scripts/dwi2response.rst +++ b/docs/reference/scripts/dwi2response.rst @@ -89,7 +89,7 @@ dwi2response dhollander Synopsis -------- -Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is an improved version of the algorithm. +Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is an improved version of the Dhollander et al. (2016) algorithm. Usage -------- @@ -186,7 +186,7 @@ dwi2response dhollander_old Synopsis -------- -Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is the original version of the algorithm. +Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is the original version of the Dhollander et al. (2016) algorithm. Usage -------- diff --git a/lib/mrtrix3/dwi2response/dhollander.py b/lib/mrtrix3/dwi2response/dhollander.py index 657afdd99a..b9ec49b875 100644 --- a/lib/mrtrix3/dwi2response/dhollander.py +++ b/lib/mrtrix3/dwi2response/dhollander.py @@ -1,5 +1,5 @@ def initialise(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('dhollander', author='Thijs Dhollander (thijs.dhollander@gmail.com)', synopsis='Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is an improved version of the algorithm.', parents=[base_parser]) + parser = subparsers.add_parser('dhollander', author='Thijs Dhollander (thijs.dhollander@gmail.com)', synopsis='Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is an improved version of the Dhollander et al. (2016) algorithm.', parents=[base_parser]) parser.addCitation('', 'Dhollander, T.; Raffelt, D. & Connelly, A. Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5', False) parser.add_argument('input', help='The input DWI') parser.add_argument('out_sfwm', help='Output single-fibre WM response text file') diff --git a/lib/mrtrix3/dwi2response/dhollander_old.py b/lib/mrtrix3/dwi2response/dhollander_old.py index 06a4c43645..4f13089bc4 100644 --- a/lib/mrtrix3/dwi2response/dhollander_old.py +++ b/lib/mrtrix3/dwi2response/dhollander_old.py @@ -1,5 +1,5 @@ def initialise(base_parser, subparsers): #pylint: disable=unused-variable - parser = subparsers.add_parser('dhollander_old', author='Thijs Dhollander (thijs.dhollander@gmail.com)', synopsis='Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is the original version of the algorithm.', parents=[base_parser]) + parser = subparsers.add_parser('dhollander_old', author='Thijs Dhollander (thijs.dhollander@gmail.com)', synopsis='Unsupervised estimation of WM, GM and CSF response functions; does not require a T1 image (or segmentation thereof). This is the original version of the Dhollander et al. (2016) algorithm.', parents=[base_parser]) parser.addCitation('', 'Dhollander, T.; Raffelt, D. & Connelly, A. Unsupervised 3-tissue response function estimation from single-shell or multi-shell diffusion MR data without a co-registered T1 image. ISMRM Workshop on Breaking the Barriers of Diffusion MRI, 2016, 5', False) parser.add_argument('input', help='The input DWI') parser.add_argument('out_sfwm', help='Output single-fibre WM response text file') diff --git a/lib/mrtrix3/dwi2response/tournier.py b/lib/mrtrix3/dwi2response/tournier.py index 72e3c243d3..452f195948 100644 --- a/lib/mrtrix3/dwi2response/tournier.py +++ b/lib/mrtrix3/dwi2response/tournier.py @@ -54,7 +54,7 @@ def execute(): #pylint: disable=unused-variable iter_lmax_option = lmax_option # Run CSD - run.command('dwi2fod csd dwi.mif ' + RF_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path + iter_lmax_option) + run.command('dwi2fod csd dwi.mif ' + RF_in_path + ' ' + prefix + 'FOD.mif -mask ' + mask_in_path) # Get amplitudes of two largest peaks, and direction of largest run.command('fod2fixel ' + prefix + 'FOD.mif ' + prefix + 'fixel -peak peaks.mif -mask ' + mask_in_path + ' -fmls_no_thresholds') file.delTemporary(prefix + 'FOD.mif') From 73188587481c924f22dad36434e347344dd7dcd6 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 27 Feb 2018 02:15:47 +0000 Subject: [PATCH 0103/1471] GLM: Name t-test outputs "t#" rather than "c#" ("contrast") --- core/math/stats/glm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index c8e8db84c7..4ffbbbe93a 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -97,7 +97,7 @@ namespace MR ssize_t cols() const { return c.cols(); } size_t rank() const { return r; } bool is_F() const { return F; } - std::string name() const { return std::string(F ? "F" : "c") + str(i+1); } + std::string name() const { return std::string(F ? "F" : "t") + str(i+1); } private: const matrix_type c; From 1971b092c6a8dca5653e0d6da666fc0bb368348f Mon Sep 17 00:00:00 2001 From: Chun-Hung Jimmy Yeh Date: Mon, 6 Mar 2017 16:28:56 +1100 Subject: [PATCH 0104/1471] class VertexTransform: transform_t::FS2REAL Add method to transform Freesurfer surface file to real space. The corresponding T1 data in mgz format is needed for getting the ras translation vector. --- src/surface/filter/vertex_transform.cpp | 20 ++++++++++++++++++++ src/surface/filter/vertex_transform.h | 3 ++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/surface/filter/vertex_transform.cpp b/src/surface/filter/vertex_transform.cpp index 7df4a7b2eb..bce287d7fc 100644 --- a/src/surface/filter/vertex_transform.cpp +++ b/src/surface/filter/vertex_transform.cpp @@ -14,6 +14,7 @@ #include "surface/filter/vertex_transform.h" +#include "file/nifti1_utils.h" #include "exception.h" @@ -88,6 +89,25 @@ namespace MR } break; + case transform_t::FS2REAL: + std::vector< size_t > axes( 3 ); + auto M = File::NIfTI::adjust_transform( header, axes ); + Eigen::Vector3d cras( 3, 1 ); + for ( size_t i = 0; i < 3; i++ ) + { + cras[ i ] = M( i, 3 ); + for ( size_t j = 0; j < 3; j++ ) + { + cras[ i ] += 0.5 * header.size( axes[ j ] ) + * header.spacing( axes[ j ] ) * M( i, j ); + } + } + for ( size_t i = 0; i != V; ++i ) + { + vertices.push_back ( in.vert(i) + cras ); + } + break; + } out.load (vertices, normals, in.get_triangles(), in.get_quads()); diff --git a/src/surface/filter/vertex_transform.h b/src/surface/filter/vertex_transform.h index b3388bb20d..2df473e392 100644 --- a/src/surface/filter/vertex_transform.h +++ b/src/surface/filter/vertex_transform.h @@ -36,7 +36,7 @@ namespace MR class VertexTransform : public Base { MEMALIGN (VertexTransform) public: - enum class transform_t { UNDEFINED, FIRST2REAL, REAL2FIRST, VOXEL2REAL, REAL2VOXEL }; + enum class transform_t { UNDEFINED, FIRST2REAL, REAL2FIRST, VOXEL2REAL, REAL2VOXEL, FS2REAL }; VertexTransform (const Header& H) : header (H), @@ -47,6 +47,7 @@ namespace MR void set_real2first() { mode = transform_t::REAL2FIRST; } void set_voxel2real() { mode = transform_t::VOXEL2REAL; } void set_real2voxel() { mode = transform_t::REAL2VOXEL; } + void set_fs2real () { mode = transform_t::FS2REAL ; } transform_t get_mode() const { return mode; } From 73926adfaaedf48511d3618b997a5878afa97345 Mon Sep 17 00:00:00 2001 From: Chun-Hung Jimmy Yeh Date: Tue, 7 Mar 2017 12:40:17 +1100 Subject: [PATCH 0105/1471] meshconvert: support loading in freesurfer surface format --- cmd/meshconvert.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/meshconvert.cpp b/cmd/meshconvert.cpp index 76b3a30c50..d6971bcd20 100644 --- a/cmd/meshconvert.cpp +++ b/cmd/meshconvert.cpp @@ -27,7 +27,7 @@ using namespace MR::Surface; -const char* transform_choices[] = { "first2real", "real2first", "voxel2real", "real2voxel", nullptr }; +const char* transform_choices[] = { "first2real", "real2first", "voxel2real", "real2voxel", "fs2real", nullptr }; @@ -46,7 +46,8 @@ void usage () + Option ("binary", "write the output mesh file in binary format (if supported)") + Option ("transform", "transform vertices from one coordinate space to another, based on a template image; " - "options are: " + join(transform_choices, ", ")) + "options are: " + join(transform_choices, ", ") + ". " + "(When using fs2real, the relevant .mgz file must be provided.)") + Argument ("mode").type_choice (transform_choices) + Argument ("image").type_image_in(); @@ -79,6 +80,7 @@ void run () case 1: transform->set_real2first(); break; case 2: transform->set_voxel2real(); break; case 3: transform->set_real2voxel(); break; + case 4: transform->set_fs2real (); break; default: throw Exception ("Unexpected mode for spatial transformation of vertices"); } MeshMulti temp; From 6bcceb0fd782fb542b7c75946f065aa1a1767bd7 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sat, 10 Mar 2018 19:58:30 +1100 Subject: [PATCH 0106/1471] mesh2voxel: Multi-thread partial volume estimation --- cmd/meshconvert.cpp | 3 +- docs/reference/commands/meshconvert.rst | 2 +- src/surface/algo/mesh2image.cpp | 233 +++++++++++++++--------- src/surface/filter/vertex_transform.cpp | 6 +- 4 files changed, 153 insertions(+), 91 deletions(-) diff --git a/cmd/meshconvert.cpp b/cmd/meshconvert.cpp index d6971bcd20..2d45876851 100644 --- a/cmd/meshconvert.cpp +++ b/cmd/meshconvert.cpp @@ -46,8 +46,7 @@ void usage () + Option ("binary", "write the output mesh file in binary format (if supported)") + Option ("transform", "transform vertices from one coordinate space to another, based on a template image; " - "options are: " + join(transform_choices, ", ") + ". " - "(When using fs2real, the relevant .mgz file must be provided.)") + "options are: " + join(transform_choices, ", ")) + Argument ("mode").type_choice (transform_choices) + Argument ("image").type_image_in(); diff --git a/docs/reference/commands/meshconvert.rst b/docs/reference/commands/meshconvert.rst index 6501f17210..8ff91fe017 100644 --- a/docs/reference/commands/meshconvert.rst +++ b/docs/reference/commands/meshconvert.rst @@ -23,7 +23,7 @@ Options - **-binary** write the output mesh file in binary format (if supported) -- **-transform mode image** transform vertices from one coordinate space to another, based on a template image; options are: first2real, real2first, voxel2real, real2voxel +- **-transform mode image** transform vertices from one coordinate space to another, based on a template image; options are: first2real, real2first, voxel2real, real2voxel, fs2real Standard options ^^^^^^^^^^^^^^^^ diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 68a3a0ccec..0495a1c1b8 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -19,6 +19,7 @@ #include "header.h" #include "progressbar.h" +#include "thread_queue.h" #include "types.h" #include "surface/types.h" @@ -33,6 +34,9 @@ namespace MR { + constexpr size_t pve_os_ratio = 10; + constexpr size_t pve_nsamples = Math::pow3 (pve_os_ratio); + void mesh2image (const Mesh& mesh_realspace, Image& image) { @@ -185,124 +189,183 @@ namespace MR } ++progress; - // Get better partial volume estimates for all necessary voxels - // TODO This could be multi-threaded, but hard to justify the dev time - static const size_t pve_os_ratio = 10; + // Construct class functors necessary to calculate, for each voxel intersected by the + // surface, the partial volume fraction + class Source + { MEMALIGN(Source) + public: + Source (const Vox2Poly& data) : + data (data), + i (data.begin()) { } + + bool operator() (std::pair>& out) + { + if (i == data.end()) + return false; + out = std::make_pair (i->first, i->second); + ++i; + return true; + } + + private: + const Vox2Poly& data; + Vox2Poly::const_iterator i; + }; + + class Pipe + { NOMEMALIGN + public: + Pipe (const Mesh& mesh, const vector& polygon_normals) : + mesh (mesh), + polygon_normals (polygon_normals) { } + + bool operator() (const std::pair>& in, std::pair& out) const + { + const Vox& voxel (in.first); + + // Generate a set of points within this voxel that need to be tested individually + vector to_test; + to_test.reserve (pve_nsamples); + for (size_t x_idx = 0; x_idx != pve_os_ratio; ++x_idx) { + const default_type x = voxel[0] - 0.5 + ((default_type(x_idx) + 0.5) / default_type(pve_os_ratio)); + for (size_t y_idx = 0; y_idx != pve_os_ratio; ++y_idx) { + const default_type y = voxel[1] - 0.5 + ((default_type(y_idx) + 0.5) / default_type(pve_os_ratio)); + for (size_t z_idx = 0; z_idx != pve_os_ratio; ++z_idx) { + const default_type z = voxel[2] - 0.5 + ((default_type(z_idx) + 0.5) / default_type(pve_os_ratio)); + to_test.push_back (Vertex (x, y, z)); + } + } + } - for (Vox2Poly::const_iterator i = voxel2poly.begin(); i != voxel2poly.end(); ++i) { + // Count the number of these points that lie inside the mesh + size_t inside_mesh_count = 0; + for (vector::const_iterator i_p = to_test.begin(); i_p != to_test.end(); ++i_p) { + const Vertex& p (*i_p); - const Vox& voxel (i->first); + default_type best_min_edge_distance = -std::numeric_limits::infinity(); + bool best_result_inside = false; - // Generate a set of points within this voxel that need to be tested individually - vector to_test; - to_test.reserve (Math::pow3 (pve_os_ratio)); - for (size_t x_idx = 0; x_idx != pve_os_ratio; ++x_idx) { - const default_type x = voxel[0] - 0.5 + ((default_type(x_idx) + 0.5) / default_type(pve_os_ratio)); - for (size_t y_idx = 0; y_idx != pve_os_ratio; ++y_idx) { - const default_type y = voxel[1] - 0.5 + ((default_type(y_idx) + 0.5) / default_type(pve_os_ratio)); - for (size_t z_idx = 0; z_idx != pve_os_ratio; ++z_idx) { - const default_type z = voxel[2] - 0.5 + ((default_type(z_idx) + 0.5) / default_type(pve_os_ratio)); - to_test.push_back (Vertex (x, y, z)); - } - } - } + // Only test against those polygons that are near this voxel + for (vector::const_iterator polygon_index = in.second.begin(); polygon_index != in.second.end(); ++polygon_index) { + const Eigen::Vector3& n (polygon_normals[*polygon_index]); - // Count the number of these points that lie inside the mesh - int inside_mesh_count = 0; - for (vector::const_iterator i_p = to_test.begin(); i_p != to_test.end(); ++i_p) { - const Vertex& p (*i_p); + const size_t polygon_num_vertices = (*polygon_index < mesh.num_triangles()) ? 3 : 4; + VertexList v; - default_type best_min_edge_distance = -std::numeric_limits::infinity(); - bool best_result_inside = false; + bool is_inside = false; + default_type min_edge_distance = std::numeric_limits::infinity(); - // Only test against those polygons that are near this voxel - for (vector::const_iterator polygon_index = i->second.begin(); polygon_index != i->second.end(); ++polygon_index) { - const Eigen::Vector3& n (polygon_normals[*polygon_index]); + if (polygon_num_vertices == 3) { - const size_t polygon_num_vertices = (*polygon_index < mesh.num_triangles()) ? 3 : 4; - VertexList v; + mesh.load_triangle_vertices (v, *polygon_index); - bool is_inside = false; - default_type min_edge_distance = std::numeric_limits::infinity(); + // First: is it aligned with the normal? + const Vertex poly_centre ((v[0] + v[1] + v[2]) * (1.0/3.0)); + const Vertex diff (p - poly_centre); + is_inside = (diff.dot (n) <= 0.0); - if (polygon_num_vertices == 3) { + // Second: how well does it project onto this polygon? + const Vertex p_on_plane (p - (n * (diff.dot (n)))); - mesh.load_triangle_vertices (v, *polygon_index); + std::array edge_distances; + Vertex zero = (v[2]-v[0]).cross (n); zero.normalize(); + Vertex one = (v[1]-v[2]).cross (n); one .normalize(); + Vertex two = (v[0]-v[1]).cross (n); two .normalize(); + edge_distances[0] = (p_on_plane-v[0]).dot (zero); + edge_distances[1] = (p_on_plane-v[2]).dot (one); + edge_distances[2] = (p_on_plane-v[1]).dot (two); + min_edge_distance = std::min (edge_distances[0], std::min (edge_distances[1], edge_distances[2])); - // First: is it aligned with the normal? - const Vertex poly_centre ((v[0] + v[1] + v[2]) * (1.0/3.0)); - const Vertex diff (p - poly_centre); - is_inside = (diff.dot (n) <= 0.0); + } else { - // Second: how well does it project onto this polygon? - const Vertex p_on_plane (p - (n * (diff.dot (n)))); + mesh.load_quad_vertices (v, *polygon_index); - std::array edge_distances; - Vertex zero = (v[2]-v[0]).cross (n); zero.normalize(); - Vertex one = (v[1]-v[2]).cross (n); one .normalize(); - Vertex two = (v[0]-v[1]).cross (n); two .normalize(); - edge_distances[0] = (p_on_plane-v[0]).dot (zero); - edge_distances[1] = (p_on_plane-v[2]).dot (one); - edge_distances[2] = (p_on_plane-v[1]).dot (two); - min_edge_distance = std::min (edge_distances[0], std::min (edge_distances[1], edge_distances[2])); + // This may be slightly ill-posed with a quad; no guarantee of fixed normal + // Proceed regardless - } else { + // First: is it aligned with the normal? + const Vertex poly_centre ((v[0] + v[1] + v[2] + v[3]) * 0.25); + const Vertex diff (p - poly_centre); + is_inside = (diff.dot (n) <= 0.0); - mesh.load_quad_vertices (v, *polygon_index); + // Second: how well does it project onto this polygon? + const Vertex p_on_plane (p - (n * (diff.dot (n)))); - // This may be slightly ill-posed with a quad; no guarantee of fixed normal - // Proceed regardless + for (int edge = 0; edge != 4; ++edge) { + // Want an appropriate vector emanating from this edge from which to test the 'on-plane' distance + // (bearing in mind that there may not be a uniform normal) + // For this, I'm going to take a weighted average based on the relative distance between the + // two points at either end of this edge + // Edge is between points p1 and p2; edge 0 is between points 0 and 1 + const Vertex& p0 ((edge-1) >= 0 ? v[edge-1] : v[3]); + const Vertex& p1 (v[edge]); + const Vertex& p2 ((edge+1) < 4 ? v[edge+1] : v[0]); + const Vertex& p3 ((edge+2) < 4 ? v[edge+2] : v[edge-2]); - // First: is it aligned with the normal? - const Vertex poly_centre ((v[0] + v[1] + v[2] + v[3]) * 0.25); - const Vertex diff (p - poly_centre); - is_inside = (diff.dot (n) <= 0.0); + const default_type d1 = (p1 - p_on_plane).norm(); + const default_type d2 = (p2 - p_on_plane).norm(); + // Give more weight to the normal at the point that's closer + Vertex edge_normal = (d2*(p0-p1) + d1*(p3-p2)); + edge_normal.normalize(); - // Second: how well does it project onto this polygon? - const Vertex p_on_plane (p - (n * (diff.dot (n)))); + // Now, how far away is the point within the plane from this edge? + const default_type this_edge_distance = (p_on_plane - p1).dot (edge_normal); + min_edge_distance = std::min (min_edge_distance, this_edge_distance); - for (int edge = 0; edge != 4; ++edge) { - // Want an appropriate vector emanating from this edge from which to test the 'on-plane' distance - // (bearing in mind that there may not be a uniform normal) - // For this, I'm going to take a weighted average based on the relative distance between the - // two points at either end of this edge - // Edge is between points p1 and p2; edge 0 is between points 0 and 1 - const Vertex& p0 ((edge-1) >= 0 ? v[edge-1] : v[3]); - const Vertex& p1 (v[edge]); - const Vertex& p2 ((edge+1) < 4 ? v[edge+1] : v[0]); - const Vertex& p3 ((edge+2) < 4 ? v[edge+2] : v[edge-2]); + } - const default_type d1 = (p1 - p_on_plane).norm(); - const default_type d2 = (p2 - p_on_plane).norm(); - // Give more weight to the normal at the point that's closer - Vertex edge_normal = (d2*(p0-p1) + d1*(p3-p2)); - edge_normal.normalize(); + } - // Now, how far away is the point within the plane from this edge? - const default_type this_edge_distance = (p_on_plane - p1).dot (edge_normal); - min_edge_distance = std::min (min_edge_distance, this_edge_distance); + if (min_edge_distance > best_min_edge_distance) { + best_min_edge_distance = min_edge_distance; + best_result_inside = is_inside; + } } - } + if (best_result_inside) + ++inside_mesh_count; - if (min_edge_distance > best_min_edge_distance) { - best_min_edge_distance = min_edge_distance; - best_result_inside = is_inside; } + out = std::make_pair (voxel, (default_type)inside_mesh_count / (default_type)pve_nsamples); + return true; } - if (best_result_inside) - ++inside_mesh_count; + private: + const Mesh& mesh; + const vector& polygon_normals; - } + }; - assign_pos_of (voxel).to (image); - image.value() = (default_type)inside_mesh_count / (default_type)Math::pow3 (pve_os_ratio); + class Sink + { MEMALIGN(Sink) + public: + Sink (Image& image) : + image (image) { } - } + bool operator() (const std::pair& in) + { + assign_pos_of (in.first).to (image); + assert (!is_out_of_bounds (image)); + image.value() = in.second; + return true; + } + + private: + Image image; + + }; + + Source source (voxel2poly); + Pipe pipe (mesh, polygon_normals); + Sink sink (image); + Thread::run_queue (source, + std::pair>(), + Thread::multi (pipe), + std::pair(), + sink); } diff --git a/src/surface/filter/vertex_transform.cpp b/src/surface/filter/vertex_transform.cpp index bce287d7fc..b5a179944b 100644 --- a/src/surface/filter/vertex_transform.cpp +++ b/src/surface/filter/vertex_transform.cpp @@ -14,7 +14,7 @@ #include "surface/filter/vertex_transform.h" -#include "file/nifti1_utils.h" +#include "file/nifti_utils.h" #include "exception.h" @@ -90,9 +90,9 @@ namespace MR break; case transform_t::FS2REAL: - std::vector< size_t > axes( 3 ); + vector axes( 3 ); auto M = File::NIfTI::adjust_transform( header, axes ); - Eigen::Vector3d cras( 3, 1 ); + Eigen::Vector3 cras( 3, 1 ); for ( size_t i = 0; i < 3; i++ ) { cras[ i ] = M( i, 3 ); From b3e59b0acff52b4b2f356f0793c88724f466ca48 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sun, 11 Mar 2018 18:55:25 +1100 Subject: [PATCH 0107/1471] Surface::Algos::mesh2image(): Better region filling Previously, after identifying voxels lying on the surface, the image volume was filled inwards from the eight external corners of the FoV to select those voxels outside the surface, and all remaining voxels were labelled as being inside the surface. This however failed to account for more complex surface shapes, that could result in separation of some voxels strictly outside the mesh from the FoV corners. This new approach uses the vertex normals to label some voxels just inside / outside the surface, and then exploits them for majority voting labelling of all voxel connected components. --- src/surface/algo/mesh2image.cpp | 127 ++++++++++++++++++++++---------- src/surface/types.h | 1 + 2 files changed, 88 insertions(+), 40 deletions(-) diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 0495a1c1b8..01215ebccb 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -42,9 +42,9 @@ namespace MR { // For initial segmentation of mesh - identify voxels on the mesh, inside & outside - enum vox_mesh_t { UNDEFINED, ON_MESH, OUTSIDE, INSIDE }; + enum vox_mesh_t { UNDEFINED, ON_MESH, PRELIM_OUTSIDE, PRELIM_INSIDE, FILL_TEMP, OUTSIDE, INSIDE }; - ProgressBar progress ("converting mesh to PVE image", 6); + ProgressBar progress ("converting mesh to partial volume image", 7); // For speed, want the vertex data to be in voxel positions Filter::VertexTransform transform (image); @@ -52,6 +52,10 @@ namespace MR Mesh mesh; transform (mesh_realspace, mesh); + // These are needed now for interior filling section of algorithm + if (!mesh.have_normals()) + mesh.calculate_normals(); + static const Vox adj_voxels[6] = { { -1, 0, 0 }, { +1, 0, 0 }, { 0, -1, 0 }, @@ -104,6 +108,7 @@ namespace MR } // For all voxels within this rectangular region, assign this polygon to the map + // TODO This is not ideal; too generous in some areas, results in an empty set in others Vox voxel; for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { @@ -131,50 +136,91 @@ namespace MR ++progress; - // Find all voxels that are not partial-volumed with the mesh, and are not inside the mesh - // Use a corner of the image FoV to commence filling of the volume, and then check that all - // eight corners have been flagged as outside the volume - const Vox corner_voxels[8] = { - Vox ( 0, 0, 0), - Vox ( 0, 0, H.size (2) - 1), - Vox ( 0, H.size (1) - 1, 0), - Vox ( 0, H.size (1) - 1, H.size (2) - 1), - Vox (H.size (0) - 1, 0, 0), - Vox (H.size (0) - 1, 0, H.size (2) - 1), - Vox (H.size (0) - 1, H.size (1) - 1, 0), - Vox (H.size (0) - 1, H.size (1) - 1, H.size (2) - 1)}; - - // TODO This is slow; is there a faster implementation? - // This is essentially a connected-component analysis... - vector to_expand; - to_expand.push_back (corner_voxels[0]); - assign_pos_of (corner_voxels[0]).to (init_seg); - init_seg.value() = vox_mesh_t::OUTSIDE; - do { - const Vox centre_voxel (to_expand.back()); - to_expand.pop_back(); - for (size_t adj_vox_idx = 0; adj_vox_idx != 6; ++adj_vox_idx) { - const Vox this_voxel (centre_voxel + adj_voxels[adj_vox_idx]); - assign_pos_of (this_voxel).to (init_seg); - if (!is_out_of_bounds (init_seg) && init_seg.value() == vox_mesh_t (UNDEFINED)) { - init_seg.value() = vox_mesh_t (OUTSIDE); - to_expand.push_back (this_voxel); + // TODO Alternative implementation of filling in the centre of the mesh + // Rather than selecting the eight external corners and filling in outside the + // mesh (which may omit some areas), selecting anything remaining as 'inside', + // fill inwards from vertices according to their normals, and select anything + // remaining as 'outside'. + std::stack to_expand; + for (size_t i = 0; i != mesh.num_vertices(); ++i) { + const Vox voxel (mesh.vert (i)); + Eigen::Vector3 normal (mesh.norm (i)); + // Scale the normal such that the maximum length along any individual axis is 1.0 (but may be negative) + normal /= normal.array().abs().maxCoeff(); + // Use this to select an adjacent voxel outside the structure (based on the + const Vox outside_neighbour (voxel + Vox(normal)); + // Add this to the set of exterior voxels to be expanded if appropriate + assign_pos_of (outside_neighbour).to (init_seg); + if (!is_out_of_bounds (init_seg)) { + if (init_seg.value() == vox_mesh_t::UNDEFINED) { + init_seg.value() = vox_mesh_t::PRELIM_OUTSIDE; + //to_expand.push (outside_neighbour); + } + } + // Now do the same for inside the structure + const Vox inside_neighbour (voxel - Vox(normal)); + assign_pos_of (inside_neighbour).to (init_seg); + if (!is_out_of_bounds (init_seg)) { + if (init_seg.value() == vox_mesh_t::UNDEFINED) { + init_seg.value() = vox_mesh_t::PRELIM_INSIDE; + //to_expand.push (inside_neighbour); } } - } while (!to_expand.empty()); + } ++progress; - for (size_t cnr_idx = 0; cnr_idx != 8; ++cnr_idx) { - assign_pos_of (corner_voxels[cnr_idx]).to (init_seg); - if (init_seg.value() == vox_mesh_t (UNDEFINED)) - throw Exception ("Mesh is not bound within image field of view"); + // Can't guarantee that mesh might have a single isolated polygon pointing the wrong way + // Therefore, need to: + // - Select voxels both inside and outside the mesh to expand + // - When expanding each region, count the number of pre-assigned voxels both inside and outside + // - For the final region selection, assign values to voxels based on a majority vote + Image seed (init_seg); + vector to_fill; + for (auto l = Loop(seed) (seed); l; ++l) { + if (seed.value() == vox_mesh_t::PRELIM_INSIDE || seed.value() == vox_mesh_t::PRELIM_OUTSIDE) { + size_t prelim_inside_count = 0, prelim_outside_count = 0; + if (seed.value() == vox_mesh_t::PRELIM_INSIDE) + prelim_inside_count = 1; + else + prelim_outside_count = 1; + to_expand.push (Vox (seed.index(0), seed.index(1), seed.index(2))); + to_fill.assign (1, to_expand.top()); + do { + const Vox voxel (to_expand.top()); + to_expand.pop(); + for (size_t adj_vox_idx = 0; adj_vox_idx != 6; ++adj_vox_idx) { + const Vox adj_voxel (voxel + adj_voxels[adj_vox_idx]); + assign_pos_of (adj_voxel).to (init_seg); + if (!is_out_of_bounds (init_seg)) { + const uint8_t adj_value = init_seg.value(); + if (adj_value == vox_mesh_t::UNDEFINED || adj_value == vox_mesh_t::PRELIM_INSIDE || adj_value == vox_mesh_t::PRELIM_OUTSIDE) { + if (adj_value == vox_mesh_t::PRELIM_INSIDE) + ++prelim_inside_count; + else if (adj_value == vox_mesh_t::PRELIM_OUTSIDE) + ++prelim_outside_count; + to_expand.push (adj_voxel); + to_fill.push_back (adj_voxel); + init_seg.value() = vox_mesh_t::FILL_TEMP; + } + } + } + } while (to_expand.size()); + if (prelim_inside_count == prelim_outside_count) + throw Exception ("Mapping mesh to image failed: Unable to label connected voxel region as inside or outside mesh"); + const vox_mesh_t fill_value = (prelim_inside_count > prelim_outside_count ? vox_mesh_t::INSIDE : vox_mesh_t::OUTSIDE); + for (auto voxel : to_fill) { + assign_pos_of (voxel).to (init_seg); + init_seg.value() = fill_value; + } + to_fill.clear(); + } } + ++progress; - - // Find those voxels that remain unassigned, and set them to INSIDE - for (auto l = Loop (init_seg) (init_seg); l; ++l) { - if (init_seg.value() == vox_mesh_t (UNDEFINED)) - init_seg.value() = vox_mesh_t (INSIDE); + // Any voxel not yet processed must lie outside the structure(s) + for (auto l = Loop(init_seg) (init_seg); l; ++l) { + if (init_seg.value() == vox_mesh_t::UNDEFINED) + init_seg.value() = vox_mesh_t::OUTSIDE; } ++progress; @@ -185,6 +231,7 @@ namespace MR case vox_mesh_t (ON_MESH): image.value() = 0.5; break; case vox_mesh_t (OUTSIDE): image.value() = 0.0; break; case vox_mesh_t (INSIDE): image.value() = 1.0; break; + default: assert (0); } } ++progress; diff --git a/src/surface/types.h b/src/surface/types.h index b00ae5c7b3..39e1df3d7c 100644 --- a/src/surface/types.h +++ b/src/surface/types.h @@ -39,6 +39,7 @@ namespace MR { MEMALIGN (Vox) public: using Eigen::Array3i::Array3i; + Vox (const Eigen::Vector3& p) : Eigen::Array3i (int(std::round (p[0])), int(std::round (p[1])), int(std::round (p[2]))) { } bool operator< (const Vox& i) const { return ((*this)[2] == i[2] ? (((*this)[1] == i[1]) ? ((*this)[0] < i[0]) : ((*this)[1] < i[1])) : ((*this)[2] < i[2])); From c24cb4d584d56d9d2f720b1b608d0b9c2c91f80d Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 12 Mar 2018 16:00:44 +1100 Subject: [PATCH 0108/1471] Surface::Algo::mesh2image: Improves estimates The algorithm used for labeling each sample point within a voxel as inside or outside a mesh relies on only those polygons intersecting that voxel being tested. This however was not strictly the case previously: Each polygon would be tested in all voxels within the axis-aligned bounding box encapsulating the polygon. This change ensures that only those polygons strictly intersecting any particular voxel are included in testing. This results in more realistic-looking partial volume estimate maps. It also decreases execution time by about 20-25 percent. --- src/surface/algo/mesh2image.cpp | 139 ++++++++++++++++++++++++-------- 1 file changed, 107 insertions(+), 32 deletions(-) diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 01215ebccb..59866b4aea 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -108,35 +108,105 @@ namespace MR } // For all voxels within this rectangular region, assign this polygon to the map - // TODO This is not ideal; too generous in some areas, results in an empty set in others + // Use the Separating Axis Theorem to be more stringent as to which voxels this + // polygon will be processed in + auto overlap = [&] (const Vox& vox, const size_t poly_index) -> bool { + + VertexList vertices; + if (num_vertices == 3) + mesh.load_triangle_vertices (vertices, poly_index); + else + mesh.load_quad_vertices (vertices, poly_index - mesh.num_triangles()); + + // Test whether or not the two objects can be separated via projection onto an axis + auto separating_axis = [&] (const Eigen::Vector3& axis) -> bool { + default_type voxel_low = std::numeric_limits::infinity(); + default_type voxel_high = -std::numeric_limits::infinity(); + default_type poly_low = std::numeric_limits::infinity(); + default_type poly_high = -std::numeric_limits::infinity(); + + static const Eigen::Vector3 voxel_offsets[8] = { { -0.5, -0.5, -0.5 }, + { -0.5, -0.5, 0.5 }, + { -0.5, 0.5, -0.5 }, + { -0.5, 0.5, 0.5 }, + { 0.5, -0.5, -0.5 }, + { 0.5, -0.5, 0.5 }, + { 0.5, 0.5, -0.5 }, + { 0.5, 0.5, 0.5 } }; + + for (size_t i = 0; i != 8; ++i) { + const Eigen::Vector3 v (vox.matrix().cast() + voxel_offsets[i]); + const default_type projection = axis.dot (v); + voxel_low = std::min (voxel_low, projection); + voxel_high = std::max (voxel_high, projection); + } + + for (const auto& v : vertices) { + const default_type projection = axis.dot (v); + poly_low = std::min (poly_low, projection); + poly_high = std::max (poly_high, projection); + } + + // Is this a separating axis? + return (poly_low > voxel_high || voxel_low > poly_high); + }; + + // The following axes need to be tested as potential separating axes: + // x, y, z + // All cross-products between voxel and polygon edges + // Polygon normal + for (size_t i = 0; i != 3; ++i) { + Eigen::Vector3 axis (0.0, 0.0, 0.0); + axis[i] = 1.0; + if (separating_axis (axis)) + return false; + for (size_t j = 0; j != num_vertices; ++j) { + if (separating_axis (axis.cross (vertices[j+1] - vertices[j]))) + return false; + } + if (separating_axis (axis.cross (vertices[num_vertices-1] - vertices[0]))) + return false; + } + if (separating_axis (polygon_normals[poly_index])) + return false; + + // No axis has been found that separates the two objects + // Therefore, the two objects overlap + return true; + }; + Vox voxel; for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { for (voxel[0] = lower_bound[0]; voxel[0] <= upper_bound[0]; ++voxel[0]) { - vector this_voxel_polys; - //#if __clang__ - // Vox2Poly::const_iterator existing = voxel2poly.find (voxel); - //#else - // Vox2Poly::iterator existing = voxel2poly.find (voxel); - //#endif - Vox2Poly::const_iterator existing = voxel2poly.find (voxel); - if (existing != voxel2poly.end()) { - this_voxel_polys = existing->second; - voxel2poly.erase (existing); - } else { - // Only call this once each voxel, regardless of the number of intersecting polygons - assign_pos_of (voxel).to (init_seg); - init_seg.value() = ON_MESH; - } - this_voxel_polys.push_back (poly_index); - voxel2poly.insert (std::make_pair (voxel, this_voxel_polys)); - } } } + // Rather than adding this polygon to the list of polygons to test for + // every single voxel within this 3D bounding box, only test it within + // those voxels that the polygon actually intersects + if (overlap (voxel, poly_index)) { + vector this_voxel_polys; + // Has this voxel already been intersected by at least one polygon? + // If it has, we need to concatenate this polygon to the list + // (which involves deleting the existing entry then re-writing the concatenated list); + // If it has not, we're adding a new entry to the list of voxels to be tested, + // with only one entry in the list for that voxel + Vox2Poly::const_iterator existing = voxel2poly.find (voxel); + if (existing != voxel2poly.end()) { + this_voxel_polys = existing->second; + voxel2poly.erase (existing); + } else { + // Only call this once each voxel, regardless of the number of intersecting polygons + assign_pos_of (voxel).to (init_seg); + init_seg.value() = ON_MESH; + } + this_voxel_polys.push_back (poly_index); + voxel2poly.insert (std::make_pair (voxel, this_voxel_polys)); + } } } } } ++progress; - // TODO Alternative implementation of filling in the centre of the mesh + // New implementation of filling in the centre of the mesh // Rather than selecting the eight external corners and filling in outside the // mesh (which may omit some areas), selecting anything remaining as 'inside', // fill inwards from vertices according to their normals, and select anything @@ -264,30 +334,33 @@ namespace MR public: Pipe (const Mesh& mesh, const vector& polygon_normals) : mesh (mesh), - polygon_normals (polygon_normals) { } + polygon_normals (polygon_normals) - bool operator() (const std::pair>& in, std::pair& out) const { - const Vox& voxel (in.first); - // Generate a set of points within this voxel that need to be tested individually - vector to_test; - to_test.reserve (pve_nsamples); + offsets_to_test.reset(new vector()); + offsets_to_test->reserve (pve_nsamples); for (size_t x_idx = 0; x_idx != pve_os_ratio; ++x_idx) { - const default_type x = voxel[0] - 0.5 + ((default_type(x_idx) + 0.5) / default_type(pve_os_ratio)); + const default_type x = -0.5 + ((default_type(x_idx) + 0.5) / default_type(pve_os_ratio)); for (size_t y_idx = 0; y_idx != pve_os_ratio; ++y_idx) { - const default_type y = voxel[1] - 0.5 + ((default_type(y_idx) + 0.5) / default_type(pve_os_ratio)); + const default_type y = -0.5 + ((default_type(y_idx) + 0.5) / default_type(pve_os_ratio)); for (size_t z_idx = 0; z_idx != pve_os_ratio; ++z_idx) { - const default_type z = voxel[2] - 0.5 + ((default_type(z_idx) + 0.5) / default_type(pve_os_ratio)); - to_test.push_back (Vertex (x, y, z)); + const default_type z = -0.5 + ((default_type(z_idx) + 0.5) / default_type(pve_os_ratio)); + offsets_to_test->push_back (Vertex (x, y, z)); } } } + } + + bool operator() (const std::pair>& in, std::pair& out) const + { + const Vox& voxel (in.first); // Count the number of these points that lie inside the mesh size_t inside_mesh_count = 0; - for (vector::const_iterator i_p = to_test.begin(); i_p != to_test.end(); ++i_p) { - const Vertex& p (*i_p); + for (vector::const_iterator i_p = offsets_to_test->begin(); i_p != offsets_to_test->end(); ++i_p) { + Vertex p (*i_p); + p += Eigen::Vector3 (voxel[0], voxel[1], voxel[2]); default_type best_min_edge_distance = -std::numeric_limits::infinity(); bool best_result_inside = false; @@ -383,6 +456,8 @@ namespace MR const Mesh& mesh; const vector& polygon_normals; + std::shared_ptr> offsets_to_test; + }; class Sink From 558ff42972af54201e6c2d3493de0e854a65fb7a Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 16 Mar 2018 16:46:33 +1100 Subject: [PATCH 0109/1471] 5ttgen: New algorithm HSVS "Hybrid Surface and Volumetric Segmentation" This algorithm uses a combination of FreeSurfer surface-based representations, FreeSurfer voxel-based segmentations, FSL FIRST and FSL FAST to provide a 5TT image. --- bin/5ttgen | 2 +- lib/mrtrix3/_5ttgen/hsvs.py | 301 ++++++++++++++++++++++++++++++++++++ lib/mrtrix3/fsl.py | 5 +- lib/mrtrix3/run.py | 4 + 4 files changed, 309 insertions(+), 3 deletions(-) create mode 100644 lib/mrtrix3/_5ttgen/hsvs.py diff --git a/bin/5ttgen b/bin/5ttgen index 3414b1c889..a148978b14 100755 --- a/bin/5ttgen +++ b/bin/5ttgen @@ -49,7 +49,7 @@ alg.execute() stderr = run.command('5ttcheck result.mif')[1] if stderr: app.warn('Generated image does not perfectly conform to 5TT format:') - for line in stderr: + for line in stderr.splitlines(): app.warn(line) run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else '')) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py new file mode 100644 index 0000000000..6848a10701 --- /dev/null +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -0,0 +1,301 @@ +def initialise(base_parser, subparsers): + import argparse + from mrtrix3 import app + parser = subparsers.add_parser('hsvs', author='Robert E. Smith (robert.smith@florey.edu.au)', synopsis='Generate a 5TT image based on Hybrid Surface and Volume Segmentation (HSVS), using FreeSurfer and FSL tools', parents=[base_parser]) + # TODO Permit either FreeSurfer directory or T1 image as input + parser.add_argument('input', help='The input FreeSurfer subject directory') + parser.add_argument('output', help='The output 5TT image') + + + +def checkOutputPaths(): + pass + + + +def getInputs(): + # FreeSurfer files will be accessed in-place; no need to pre-convert them into the temporary directory + pass + + + +def execute(): + import os, sys + from mrtrix3 import app, fsl, path, run + from distutils.spawn import find_executable + + def checkFile(filepath): + import os + if not os.path.isfile(filepath): + app.error('Required input file missing (expected location: ' + filepath + ')') + + def checkDir(dirpath): + import os + if not os.path.isdir(dirpath): + app.error('Unable to find sub-directory \'' + dirpath + '\' within input directory') + + subject_dir = os.path.abspath(path.fromUser(app.args.input, False)) + if not os.path.isdir(subject_dir): + app.error('Input to hsvs algorithm must be a directory') + surf_dir = os.path.join(subject_dir, 'surf') + mri_dir = os.path.join(subject_dir, 'mri') + checkDir(surf_dir) + checkDir(mri_dir) + aparc_image = os.path.join(mri_dir, 'aparc+aseg.mgz') + mask_image = os.path.join(mri_dir, 'brainmask.mgz') + reg_file = os.path.join(mri_dir, 'transforms', 'talairach.xfm') + checkFile(aparc_image) + checkFile(mask_image) + checkFile(reg_file) + + sgm_first_map = { } + have_first = True + have_fast = True + fsl_path = os.environ.get('FSLDIR', '') + if fsl_path: + # Use brain-extracted, bias-corrected image for FSL tools + norm_image = os.path.join(mri_dir, 'norm.mgz') + checkFile(norm_image) + run.command('mrconvert ' + norm_image + ' T1.nii -stride -1,+2,+3') + # Verify FAST availability + fast_cmd = fsl.exeName('fast', False) + if fast_cmd: + if fast_cmd == 'fast': + fast_suffix = fsl.suffix() + else: + fast_suffix = '.nii.gz' + else: + have_fast = False + app.warn('Could not find FSL program fast; script will not use fast for cerebellar tissue segmentation') + # Verify FIRST availability + first_cmd = fsl.exeName('run_first_all', False) + if first_cmd: + first_atlas_path = os.path.join(fsl_path, 'data', 'first', 'models_336_bin') + if os.path.isdir(first_atlas_path): + sgm_first_map = { 'L_Accu':'Left-Accumbens-area', 'R_Accu':'Right-Accumbens-area', \ + 'L_Amyg':'Left-Amygdala', 'R_Amyg':'Right-Amygdala', \ + 'L_Caud':'Left-Caudate', 'R_Caud':'Right-Caudate', \ + 'L_Hipp':'Left-Hippocampus', 'R_Hipp':'Right-Hippocampus', \ + 'L_Pall':'Left-Pallidum', 'R_Pall':'Right-Pallidum', \ + 'L_Puta':'Left-Putamen', 'R_Puta':'Right-Putamen', \ + 'L_Thal':'Left-Thalamus-Proper', 'R_Thal':'Right-Thalamus-Proper' } + else: + app.warn('Atlases required for FSL\'s FIRST program not installed; script will proceed without using FIRST for sub-cortical grey matter segmentation') + else: + have_first = False + app.warn('Could not find FSL program run_first_all; script will proceed without using FIRST for sub-cortical grey matter segmentation') + else: + have_first = have_fast = False + app.warn('Environment variable FSLDIR is not set; script will run without FSL components') + + + + + if have_first: + app.console('Running FSL FIRST to segment sub-cortical grey matter structures') + run.command(first_cmd + ' -s ' + ','.join(sgm_first_map.keys()) + ' -i T1.nii -b -o first') + fsl.checkFirst('first', sgm_first_map.keys()) + progress = app.progressBar('Mapping sub-cortical structures segmented by FIRST from surface to voxel representation', len(sgm_first_map)) + for key, value in sgm_first_map.items(): + vtk_in_path = 'first-' + key + '_first.vtk' + run.command('meshconvert ' + vtk_in_path + ' first-' + key + '_transformed.vtk -transform first2real T1.nii') + run.command('mesh2voxel first-' + key + '_transformed.vtk ' + aparc_image + ' ' + value + '.mif') + progress.increment() + progress.done() + + + + # TODO If releasing, this should ideally read from FreeSurferColorLUT.txt to get the indices + # TODO Currently mesh2voxel assumes a single closed surface; + # may need to run a connected component analysis first for some structures e.g. lesions + # TODO Need to figure out a solution for identifying the vertices at the bottom of the + # brain stem, and labeling those as outside the brain, so that streamlines are permitted + # to terminate there. + + # Honour -sgm_amyg_hipp option + ah = 2 if app.args.sgm_amyg_hipp else 0 + + structures = [ ( 4, 3, 'Left-Lateral-Ventricle'), + ( 5, 3, 'Left-Inf-Lat-Vent'), + ( 6, 3, 'Left-Cerebellum-Exterior'), + ( 7, 2, 'Left-Cerebellum-White-Matter'), + ( 8, 1, 'Left-Cerebellum-Cortex'), + ( 9, 1, 'Left-Thalamus'), + (10, 1, 'Left-Thalamus-Proper'), + (11, 1, 'Left-Caudate'), + (12, 1, 'Left-Putamen'), + (13, 1, 'Left-Pallidum'), + (14, 3, '3rd-Ventricle'), + (15, 3, '4th-Ventricle'), + (16, 2, 'Brain-Stem'), + (17, ah, 'Left-Hippocampus'), + (18, ah, 'Left-Amygdala'), + (24, 3, 'CSF'), + (25, 4, 'Left-Lesion'), + (26, 1, 'Left-Accumbens-area'), + (27, 1, 'Left-Substancia-Nigra'), + (30, 3, 'Left-vessel'), + (31, 1, 'Left-choroid-plexus'), + (43, 3, 'Right-Lateral-Ventricle'), + (44, 3, 'Right-Inf-Lat-Vent'), + (45, 3, 'Right-Cerebellum-Exterior'), + (46, 2, 'Right-Cerebellum-White-Matter'), + (47, 1, 'Right-Cerebellum-Cortex'), + (48, 1, 'Right-Thalamus'), + (49, 1, 'Right-Thalamus-Proper'), + (50, 1, 'Right-Caudate'), + (51, 1, 'Right-Putamen'), + (52, 1, 'Right-Pallidum'), + (53, ah, 'Right-Hippocampus'), + (54, ah, 'Right-Amygdala'), + (57, 4, 'Right-Lesion'), + (58, 1, 'Right-Accumbens-area'), + (59, 1, 'Right-Substancia-Nigra'), + (62, 3, 'Right-vessel'), + (63, 1, 'Right-choroid-plexus'), + (72, 3, '5th-Ventricle'), + (192, 2, 'Corpus_Callosum'), + (250, 2, 'Fornix') ] + + + + # Get the main cerebrum segments; these are already smooth + # FIXME There may be some minor mismatch between the WM and pial segments within the medial section + # where no optimisation is performed, but vertices are simply defined in order to guarantee + # closed surfaces. Ideally these should be removed from the CGM tissue. + progress = app.progressBar('Mapping FreeSurfer cortical reconstruction to partial volume images', 4) + for hemi in [ 'lh', 'rh' ]: + for basename in [ hemi+'.white', hemi+'.pial' ]: + filepath = os.path.join(surf_dir, basename) + checkFile(filepath) + transformed_path = basename + '_realspace.obj' + run.command('meshconvert ' + filepath + ' ' + transformed_path + ' -binary -transform fs2real ' + aparc_image) + run.command('mesh2voxel ' + transformed_path + ' ' + aparc_image + ' ' + basename + '.mif') + progress.increment() + progress.done() + + # Get other structures that need to be converted from the voxel image + progress = app.progressBar('Smoothing non-cortex structures segmented by FreeSurfer', len(structures)) + for (index, tissue, name) in structures: + # Don't segment anything for which we have instead obtained estimates using FIRST + if not name in sgm_first_map.values(): + # If we're going to subsequently use fast, don't bother smoothing cerebellar segmentations; + # we're only going to use them to produce a mask anyway + if 'Cerebellum' in name and have_fast: + run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq ' + name + '.mif -datatype float32') + else: + run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq - | mrmesh - -threshold 0.5 ' + name + '_init.obj') + run.command('meshfilter ' + name + '_init.obj smooth ' + name + '.obj') + run.command('mesh2voxel ' + name + '.obj ' + aparc_image + ' ' + name + '.mif') + progress.increment() + progress.done() + + # Construct images with the partial volume of each tissue + progress = app.progressBar('Combining segmentations of all structures corresponding to each tissue type', 5) + for tissue in range(0,5): + image_list = [ n + '.mif' for (i, t, n) in structures if t == tissue ] + # For cortical GM and WM, need to also add the main cerebrum segments + if tissue == 0: + image_list.extend([ 'lh.pial.mif', 'rh.pial.mif' ]) + elif tissue == 2: + image_list.extend([ 'lh.white.mif', 'rh.white.mif' ]) + run.command('mrmath ' + ' '.join(image_list) + ' sum - | mrcalc - 1.0 -min tissue' + str(tissue) + '_init.mif') + progress.increment() + progress.done() + + # TODO Need to fill in any potential gaps in the WM image in the centre of the brain + # This can hopefully be done with a connected-component analysis: Take just the WM image, and + # fill in any gaps (i.e. select the inverse, select the largest connected component, invert again) + # Make sure that floating-point values are handled appropriately + + # Combine these images together using the appropriate logic in order to form the 5TT image + progress = app.progressBar('Combining tissue images in order to preserve 5TT format requirements', 10) + run.command('mrconvert tissue4_init.mif tissue4.mif') + progress.increment() + run.command('mrcalc tissue3_init.mif tissue3_init.mif tissue4.mif -add 1.0 -sub 0.0 -max -sub tissue3.mif') + progress.increment() + run.command('mrmath tissue3.mif tissue4.mif sum tissuesum_34.mif') + progress.increment() + run.command('mrcalc tissue1_init.mif tissue1_init.mif tissuesum_34.mif -add 1.0 -sub 0.0 -max -sub tissue1.mif') + progress.increment() + run.command('mrmath tissuesum_34.mif tissue1.mif sum tissuesum_134.mif') + progress.increment() + run.command('mrcalc tissue2_init.mif tissue2_init.mif tissuesum_134.mif -add 1.0 -sub 0.0 -max -sub tissue2.mif') + progress.increment() + run.command('mrmath tissuesum_134.mif tissue2.mif sum tissuesum_1234.mif') + progress.increment() + run.command('mrcalc tissue0_init.mif tissue0_init.mif tissuesum_1234.mif -add 1.0 -sub 0.0 -max -sub tissue0.mif') + progress.increment() + run.command('mrmath tissuesum_1234.mif tissue0.mif sum tissuesum_01234.mif') + progress.increment() + + # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 + run.command('mrcalc 1.0 tissuesum_01234.mif -sub ' + mask_image + ' 0.0 -gt -mult csf_fill.mif') + progress.increment() + run.command('mrcalc tissue3.mif csf_fill.mif -add tissue3_filled.mif') + progress.done() + + # Branch depending on whether or not FSL fast will be used to re-segment the cerebellum + if have_fast: + + # Generate a mask of all voxels classified as cerebellum by FreeSurfer + cerebellar_indices = [ i for (i, t, n) in structures if 'Cerebellum' in n ] + cerebellar_submask_list = [ ] + progress = app.progressBar('Generating whole-cerebellum mask from FreeSurfer segmentations', len(cerebellar_indices)+1) + for index in cerebellar_indices: + filename = 'Cerebellum_' + str(index) + '.mif' + run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq ' + filename + ' -datatype bit') + cerebellar_submask_list.append(filename) + progress.increment() + run.command('mrmath ' + ' '.join(cerebellar_submask_list) + ' sum Cerebellar_mask.mif') + progress.done() + + app.console('Running FSL fast to segment the cerebellum based on intensity information') + + # FAST image input needs to be pre-masked + run.command('mrcalc T1.nii Cerebellar_mask.mif -mult - | mrconvert - T1_cerebellum.nii -stride -1,+2,+3') + + # Run FSL FAST just within the cerebellum + # TESTME Should bias field estimation be disabled within fast? + run.command(fast_cmd + ' -N T1_cerebellum.nii') + fast_output_prefix = 'T1_cerebellum' + + # Generate the revised tissue images, using output from FAST inside the cerebellum and + # output from previous processing everywhere else + # Note that the middle intensity (grey matter) in the FAST output here gets assigned + # to the sub-cortical grey matter component + progress = app.progressBar('Introducing intensity-based cerebellar segmentation into the 5TT image', 5) + + # Some of these voxels may have a non-zero cortical GM component. + # In that case, let's find a multiplier to apply to all tissues (including CGM) such that the sum is 1.0 + run.command('mrcalc 1.0 tissue0.mif 1.0 -add -div Cerebellar_mask.mif -mult Cerebellar_multiplier.mif') + progress.increment() + run.command('mrcalc Cerebellar_mask.mif Cerebellar_multiplier.mif 1.0 -if tissue0.mif -mult tissue0_fast.mif') + progress.increment() + run.command('mrcalc Cerebellar_mask.mif ' + fast_output_prefix + '_pve_0' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue3_filled.mif -if tissue3_filled_fast.mif') + progress.increment() + run.command('mrcalc Cerebellar_mask.mif ' + fast_output_prefix + '_pve_1' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue1.mif -if tissue1_fast.mif') + progress.increment() + run.command('mrcalc Cerebellar_mask.mif ' + fast_output_prefix + '_pve_2' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue2.mif -if tissue2_fast.mif') + progress.done() + + # Finally, concatenate the volumes to produce the 5TT image + run.command('mrcat tissue0_fast.mif tissue1_fast.mif tissue2_fast.mif tissue3_filled_fast.mif tissue4.mif 5TT.mif') + + else: + run.command('mrcat tissue0.mif tissue1.mif tissue2.mif tissue3_filled.mif tissue4.mif 5TT.mif') + + # Maybe don't go off all tissues here, since FreeSurfer's mask can be fairly liberal; + # instead get just a voxel clearance from all other tissue types (maybe two) + if app.args.nocrop: + run.function(os.rename, '5TT.mif', 'result.mif') + else: + run.command('mrconvert 5TT.mif -coord 3 0,1,2,4 - | mrmath - sum - -axis 3 | mrthreshold - - -abs 0.001 | maskfilter - dilate crop_mask.mif') + run.command('mrcrop 5TT.mif result.mif -mask crop_mask.mif') + + + + app.warn('Script algorithm is not yet capable of performing requisite image modifications in order to ' + 'permit streamlines travelling from the brain stem down the spinal column. Recommend using ' + '5ttedit -none, in conjunction with a manually-drawn ROI labelling the bottom part of the ' + 'brain stem, such that streamlines in this region are characterised by ACT as exiting the image.') diff --git a/lib/mrtrix3/fsl.py b/lib/mrtrix3/fsl.py index 43f2e1d9d9..3a40aea2dd 100644 --- a/lib/mrtrix3/fsl.py +++ b/lib/mrtrix3/fsl.py @@ -89,7 +89,7 @@ def exeName(name): #pylint: disable=unused-variable # FSL commands will generate based on the suffix() function, the FSL binaries themselves # ignore the FSLOUTPUTTYPE environment variable. Therefore, the safest approach is: # Whenever receiving an output image from an FSL command, explicitly search for the path -def findImage(name): #pylint: disable=unused-variable +def findImage(name, *required=True): #pylint: disable=unused-variable import os from mrtrix3 import app prefix = os.path.join(os.path.dirname(name), os.path.basename(name).split('.')[0]) @@ -100,7 +100,8 @@ def findImage(name): #pylint: disable=unused-variable if os.path.isfile(prefix + suf): app.debug('Expected image at \"' + prefix + suffix() + '\", but found at \"' + prefix + suf + '\"') return prefix + suf - app.error('Unable to find FSL output file for path \"' + name + '\"') + if required: + app.error('Unable to find FSL output file for path \"' + name + '\"') return '' diff --git a/lib/mrtrix3/run.py b/lib/mrtrix3/run.py index c0c6d82a55..c94b2be746 100644 --- a/lib/mrtrix3/run.py +++ b/lib/mrtrix3/run.py @@ -18,7 +18,11 @@ def setContinue(filename): #pylint: disable=unused-variable + from mrtrix3 import app + global _lastfile _lastFile = filename + app.var(_lastfile) + From 4577d67b6ce5288a02b6a40086d605edec0e03da Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 16 Mar 2018 16:47:34 +1100 Subject: [PATCH 0110/1471] mesh2voxel: Ensure output image is floating-point --- cmd/mesh2voxel.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/mesh2voxel.cpp b/cmd/mesh2voxel.cpp index 273057c7ac..fc60475ecf 100644 --- a/cmd/mesh2voxel.cpp +++ b/cmd/mesh2voxel.cpp @@ -38,7 +38,7 @@ void usage () SYNOPSIS = "Convert a mesh surface to a partial volume estimation image"; - REFERENCES + REFERENCES + "Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. " // Internal "Anatomically-constrained tractography: Improved diffusion MRI streamlines tractography through effective use of anatomical information. " "NeuroImage, 2012, 62, 1924-1938"; @@ -62,6 +62,11 @@ void run () Header template_header = Header::open (argument[1]); check_3D_nonunity (template_header); + // Ensure that a floating-point representation is used for the output image, + // as is required for representing partial volumes + template_header.datatype() = DataType::Float32; + template_header.datatype().set_byte_order_native(); + // Create the output image template_header.datatype() = DataType::Float32; template_header.datatype().set_byte_order_native(); From 71669fd29974f9a0c279c2f89f28fdc328963c60 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 16 Mar 2018 17:02:36 +1100 Subject: [PATCH 0111/1471] mrtrix3.fsl: Fix exeName() function --- lib/mrtrix3/fsl.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/mrtrix3/fsl.py b/lib/mrtrix3/fsl.py index 3a40aea2dd..2399ff7d2e 100644 --- a/lib/mrtrix3/fsl.py +++ b/lib/mrtrix3/fsl.py @@ -71,14 +71,14 @@ def eddyBinary(cuda): #pylint: disable=unused-variable # makes it more convenient to locate these commands. # Note that if FSL 4 and 5 are installed side-by-side, the approach taken in this # function will select the version 5 executable. -def exeName(name): #pylint: disable=unused-variable +def exeName(name, required=True): #pylint: disable=unused-variable from mrtrix3 import app from distutils.spawn import find_executable if find_executable('fsl5.0-' + name): output = 'fsl5.0-' + name elif find_executable(name): output = name - else: + elif required: app.error('Could not find FSL program \"' + name + '\"; please verify FSL install') app.debug(output) return output @@ -89,7 +89,7 @@ def exeName(name): #pylint: disable=unused-variable # FSL commands will generate based on the suffix() function, the FSL binaries themselves # ignore the FSLOUTPUTTYPE environment variable. Therefore, the safest approach is: # Whenever receiving an output image from an FSL command, explicitly search for the path -def findImage(name, *required=True): #pylint: disable=unused-variable +def findImage(name): #pylint: disable=unused-variable import os from mrtrix3 import app prefix = os.path.join(os.path.dirname(name), os.path.basename(name).split('.')[0]) @@ -100,8 +100,7 @@ def findImage(name, *required=True): #pylint: disable=unused-variable if os.path.isfile(prefix + suf): app.debug('Expected image at \"' + prefix + suffix() + '\", but found at \"' + prefix + suf + '\"') return prefix + suf - if required: - app.error('Unable to find FSL output file for path \"' + name + '\"') + app.error('Unable to find FSL output file for path \"' + name + '\"') return '' From 048eb2de5330a5f6a18aa34df211f22bcd42305a Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 21 Mar 2018 12:34:56 +1100 Subject: [PATCH 0112/1471] Stats: Fix -nonstationary option Echoes issue fixed in 1206cddf / #1264, but re-implemented for altered stats code in 'stats_enhancements' branch. --- src/stats/permtest.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index 4f95817c45..45921dd7a5 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -146,16 +146,17 @@ namespace MR matrix_type& empirical_statistic) { assert (stats_calculator); - vector> global_enhanced_count (empirical_statistic.rows(), vector (empirical_statistic.cols(), 0)); + empirical_statistic = matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs()); + vector> global_enhanced_count (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)); { Math::Stats::Shuffler shuffler (stats_calculator->num_subjects(), true, "Pre-computing empirical statistic for non-stationarity correction"); PreProcessor preprocessor (stats_calculator, enhancer, empirical_statistic, global_enhanced_count); Thread::run_queue (shuffler, Math::Stats::Shuffle(), Thread::multi (preprocessor)); } - for (ssize_t row = 0; row != empirical_statistic.rows(); ++row) { - for (ssize_t i = 0; i != empirical_statistic.cols(); ++i) { - if (global_enhanced_count[row][i] > 0.0) - empirical_statistic(row, i) /= static_cast (global_enhanced_count[row][i]); + for (ssize_t contrast = 0; contrast != stats_calculator->num_outputs(); ++contrast) { + for (ssize_t element = 0; element != stats_calculator->num_elements(); ++element) { + if (global_enhanced_count[contrast][element] > 0) + empirical_statistic(contrast, element) /= static_cast (global_enhanced_count[contrast][element]); } } } From 7cef9e9facb90b29167e1d7620e6b81493fa7898 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 21 Mar 2018 13:14:13 +1100 Subject: [PATCH 0113/1471] Stats: Fix -nonstationary -> -nonstationarity --- cmd/connectomestats.cpp | 4 ++-- cmd/fixelcfestats.cpp | 4 ++-- cmd/mrclusterstats.cpp | 6 +++--- core/math/stats/shuffle.cpp | 2 +- docs/reference/commands/connectomestats.rst | 2 +- docs/reference/commands/fixelcfestats.rst | 2 +- docs/reference/commands/mrclusterstats.rst | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 62ed467179..0827501d8b 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -199,7 +199,7 @@ void run() throw Exception ("Unknown enhancement algorithm"); } - const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); + const bool do_nonstationarity_adjustment = get_options ("nonstationarity").size(); // Load design matrix const matrix_type design = load_matrix (argument[2]); @@ -289,7 +289,7 @@ void run() // If performing non-stationarity adjustment we need to pre-compute the empirical statistic matrix_type empirical_statistic; - if (do_nonstationary_adjustment) { + if (do_nonstationarity_adjustment) { Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_statistic); for (size_t i = 0; i != num_contrasts; ++i) save_matrix (mat2vec.V2M (empirical_statistic.col(i)), output_prefix + "_empirical" + postfix(i) + ".csv"); diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 2cfe35e4dc..03f1eb7bcd 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -259,7 +259,7 @@ void run() const value_type cfe_c = get_option_value ("cfe_c", DEFAULT_CFE_C); const value_type smooth_std_dev = get_option_value ("smooth", DEFAULT_SMOOTHING_STD) / 2.3548; const value_type connectivity_threshold = get_option_value ("connectivity", DEFAULT_CONNECTIVITY_THRESHOLD); - const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); + const bool do_nonstationarity_adjustment = get_options ("nonstationarity").size(); const value_type angular_threshold = get_option_value ("angle", DEFAULT_ANGLE_THRESHOLD); @@ -593,7 +593,7 @@ void run() // If performing non-stationarity adjustment we need to pre-compute the empirical CFE statistic matrix_type empirical_cfe_statistic; - if (do_nonstationary_adjustment) { + if (do_nonstationarity_adjustment) { Stats::PermTest::precompute_empirical_stat (glm_test, cfe_integrator, empirical_cfe_statistic); output_header.keyval()["nonstationary adjustment"] = str(true); for (size_t i = 0; i != num_contrasts; ++i) diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index c2d327134e..0dab3b2f6d 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -179,7 +179,7 @@ void run() { const value_type tfce_E = get_option_value ("tfce_e", DEFAULT_TFCE_E); const bool use_tfce = !std::isfinite (cluster_forming_threshold); const bool do_26_connectivity = get_options("connectivity").size(); - const bool do_nonstationary_adjustment = get_options ("nonstationary").size(); + const bool do_nonstationarity_adjustment = get_options ("nonstationarity").size(); // Load analysis mask and compute adjacency auto mask_header = Header::open (argument[3]); @@ -254,7 +254,7 @@ void run() { output_header.datatype() = DataType::Float32; //output_header.keyval()["num permutations"] = str(num_perms); output_header.keyval()["26 connectivity"] = str(do_26_connectivity); - output_header.keyval()["nonstationary adjustment"] = str(do_nonstationary_adjustment); + output_header.keyval()["nonstationary adjustment"] = str(do_nonstationarity_adjustment); if (use_tfce) { output_header.keyval()["tfce_dh"] = str(tfce_dh); output_header.keyval()["tfce_e"] = str(tfce_E); @@ -307,7 +307,7 @@ void run() { } matrix_type empirical_enhanced_statistic; - if (do_nonstationary_adjustment) { + if (do_nonstationarity_adjustment) { if (!use_tfce) throw Exception ("Nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_enhanced_statistic); diff --git a/core/math/stats/shuffle.cpp b/core/math/stats/shuffle.cpp index b65a9bdfdc..6b6034fd5b 100644 --- a/core/math/stats/shuffle.cpp +++ b/core/math/stats/shuffle.cpp @@ -60,7 +60,7 @@ namespace MR result + Option ("nonstationarity", "perform non-stationarity correction") - + Option ("nshuffles_nonstationary", "the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: " + str(DEFAULT_NUMBER_SHUFFLES_NONSTATIONARITY) + ")") + + Option ("nshuffles_nonstationarity", "the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: " + str(DEFAULT_NUMBER_SHUFFLES_NONSTATIONARITY) + ")") + Argument ("number").type_integer (1) + Option ("permutations_nonstationarity", "manually define the permutations (relabelling) for computing the emprical statistics for non-stationarity correction. " diff --git a/docs/reference/commands/connectomestats.rst b/docs/reference/commands/connectomestats.rst index 5f98c5d67d..84035b4234 100644 --- a/docs/reference/commands/connectomestats.rst +++ b/docs/reference/commands/connectomestats.rst @@ -42,7 +42,7 @@ Options relating to shuffling of data for nonparametric statistical inference - **-nonstationarity** perform non-stationarity correction -- **-nshuffles_nonstationary number** the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: 5000) +- **-nshuffles_nonstationarity number** the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: 5000) - **-permutations_nonstationarity file** manually define the permutations (relabelling) for computing the emprical statistics for non-stationarity correction. The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) Overrides the -nshuffles_nonstationarity option. diff --git a/docs/reference/commands/fixelcfestats.rst b/docs/reference/commands/fixelcfestats.rst index 610b292331..7888aff07a 100644 --- a/docs/reference/commands/fixelcfestats.rst +++ b/docs/reference/commands/fixelcfestats.rst @@ -45,7 +45,7 @@ Options relating to shuffling of data for nonparametric statistical inference - **-nonstationarity** perform non-stationarity correction -- **-nshuffles_nonstationary number** the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: 5000) +- **-nshuffles_nonstationarity number** the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: 5000) - **-permutations_nonstationarity file** manually define the permutations (relabelling) for computing the emprical statistics for non-stationarity correction. The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) Overrides the -nshuffles_nonstationarity option. diff --git a/docs/reference/commands/mrclusterstats.rst b/docs/reference/commands/mrclusterstats.rst index 2989139f23..d41f8e1242 100644 --- a/docs/reference/commands/mrclusterstats.rst +++ b/docs/reference/commands/mrclusterstats.rst @@ -42,7 +42,7 @@ Options relating to shuffling of data for nonparametric statistical inference - **-nonstationarity** perform non-stationarity correction -- **-nshuffles_nonstationary number** the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: 5000) +- **-nshuffles_nonstationarity number** the number of shuffles to use when precomputing the empirical statistic image for non-stationarity correction (default: 5000) - **-permutations_nonstationarity file** manually define the permutations (relabelling) for computing the emprical statistics for non-stationarity correction. The input should be a text file defining a m x n matrix, where each relabelling is defined as a column vector of size m, and the number of columns, n, defines the number of permutations. Can be generated with the palm_quickperms function in PALM (http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/PALM) Overrides the -nshuffles_nonstationarity option. From 215b704fee52607e993aa7bfd7c0ba9aef35cdf1 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 21 Mar 2018 13:44:20 +1100 Subject: [PATCH 0114/1471] Stats: Use of Eigen arrays in back-end Rather than combining floating-point Eigen matrices with vector> for count-related fields, use an Eigen::Array for count-related fields. This improves ccode consistency when referring to elements to be tested (rows) and separate contrasts / hypothesis tests (columns). This process also appears to have fixed a remaining issue with use of the -nonstationarity option. --- src/stats/permtest.cpp | 46 ++++++++++++++++-------------------------- src/stats/permtest.h | 13 ++++++------ 2 files changed, 24 insertions(+), 35 deletions(-) diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index 45921dd7a5..2873946dc8 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -26,13 +26,13 @@ namespace MR PreProcessor::PreProcessor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, matrix_type& global_enhanced_sum, - vector>& global_enhanced_count) : + count_matrix_type& global_enhanced_count) : stats_calculator (stats_calculator), enhancer (enhancer), global_enhanced_sum (global_enhanced_sum), global_enhanced_count (global_enhanced_count), enhanced_sum (vector_type::Zero (global_enhanced_sum.size())), - enhanced_count (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)), + enhanced_count (count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())), stats (global_enhanced_sum.rows(), global_enhanced_sum.cols()), enhanced_stats (global_enhanced_sum.rows(), global_enhanced_sum.cols()), mutex (new std::mutex()) @@ -47,10 +47,7 @@ namespace MR { std::lock_guard lock (*mutex); global_enhanced_sum.array() += enhanced_sum.array(); - for (ssize_t row = 0; row != global_enhanced_sum.rows(); ++row) { - for (ssize_t col = 0; col != global_enhanced_sum.cols(); ++col) - global_enhanced_count[row][col] += enhanced_count[row][col]; - } + global_enhanced_count += enhanced_count; } @@ -61,11 +58,11 @@ namespace MR return false; (*stats_calculator) (shuffle.data, stats); (*enhancer) (stats, enhanced_stats); - for (ssize_t c = 0; c != enhanced_stats.rows(); ++c) { - for (ssize_t i = 0; i < enhanced_stats.cols(); ++i) { - if (enhanced_stats(c, i) > 0.0) { - enhanced_sum(c, i) += enhanced_stats(c, i); - enhanced_count[c][i]++; + for (ssize_t c = 0; c != enhanced_stats.cols(); ++c) { + for (ssize_t i = 0; i < enhanced_stats.rows(); ++i) { + if (enhanced_stats(i, c) > 0.0) { + enhanced_sum(i, c) += enhanced_stats(i, c); + enhanced_count(i, c)++; } } } @@ -83,15 +80,14 @@ namespace MR const matrix_type& empirical_enhanced_statistics, const matrix_type& default_enhanced_statistics, matrix_type& perm_dist, - vector>& global_uncorrected_pvalue_counter) : + count_matrix_type& global_uncorrected_pvalue_counter) : stats_calculator (stats_calculator), enhancer (enhancer), empirical_enhanced_statistics (empirical_enhanced_statistics), default_enhanced_statistics (default_enhanced_statistics), statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), enhanced_statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), - // TODO Consider changing to Eigen::Array - uncorrected_pvalue_counter (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)), + uncorrected_pvalue_counter (count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())), perm_dist (perm_dist), global_uncorrected_pvalue_counter (global_uncorrected_pvalue_counter), mutex (new std::mutex()) @@ -104,10 +100,7 @@ namespace MR Processor::~Processor () { std::lock_guard lock (*mutex); - for (size_t contrast = 0; contrast != stats_calculator->num_outputs(); ++contrast) { - for (size_t element = 0; element != stats_calculator->num_elements(); ++element) - global_uncorrected_pvalue_counter[contrast][element] += uncorrected_pvalue_counter[contrast][element]; - } + global_uncorrected_pvalue_counter += uncorrected_pvalue_counter; } @@ -128,7 +121,7 @@ namespace MR for (ssize_t contrast = 0; contrast != enhanced_statistics.cols(); ++contrast) { for (ssize_t element = 0; element != enhanced_statistics.rows(); ++element) { if (default_enhanced_statistics(element, contrast) > enhanced_statistics(element, contrast)) - uncorrected_pvalue_counter[contrast][element]++; + uncorrected_pvalue_counter(element, contrast)++; } } @@ -147,7 +140,7 @@ namespace MR { assert (stats_calculator); empirical_statistic = matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs()); - vector> global_enhanced_count (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)); + count_matrix_type global_enhanced_count (count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())); { Math::Stats::Shuffler shuffler (stats_calculator->num_subjects(), true, "Pre-computing empirical statistic for non-stationarity correction"); PreProcessor preprocessor (stats_calculator, enhancer, empirical_statistic, global_enhanced_count); @@ -155,8 +148,8 @@ namespace MR } for (ssize_t contrast = 0; contrast != stats_calculator->num_outputs(); ++contrast) { for (ssize_t element = 0; element != stats_calculator->num_elements(); ++element) { - if (global_enhanced_count[contrast][element] > 0) - empirical_statistic(contrast, element) /= static_cast (global_enhanced_count[contrast][element]); + if (global_enhanced_count(element, contrast) > 0) + empirical_statistic(element, contrast) /= static_cast (global_enhanced_count(element, contrast)); } } } @@ -199,9 +192,8 @@ namespace MR assert (stats_calculator); Math::Stats::Shuffler shuffler (stats_calculator->num_subjects(), false, "Running permutations"); perm_dist.resize (shuffler.size(), stats_calculator->num_outputs()); - uncorrected_pvalues.resize (stats_calculator->num_elements(), stats_calculator->num_outputs()); - vector> global_uncorrected_pvalue_count (stats_calculator->num_outputs(), vector (stats_calculator->num_elements(), 0)); + count_matrix_type global_uncorrected_pvalue_count (count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())); { Processor processor (stats_calculator, enhancer, empirical_enhanced_statistic, @@ -210,11 +202,7 @@ namespace MR global_uncorrected_pvalue_count); Thread::run_queue (shuffler, Math::Stats::Shuffle(), Thread::multi (processor)); } - - for (size_t contrast = 0; contrast != stats_calculator->num_outputs(); ++contrast) { - for (size_t element = 0; element != stats_calculator->num_elements(); ++element) - uncorrected_pvalues(element, contrast) = global_uncorrected_pvalue_count[contrast][element] / default_type(shuffler.size()); - } + uncorrected_pvalues = global_uncorrected_pvalue_count.cast() / default_type(shuffler.size()); } diff --git a/src/stats/permtest.h b/src/stats/permtest.h index d8787e126f..00e61e3ac4 100644 --- a/src/stats/permtest.h +++ b/src/stats/permtest.h @@ -47,6 +47,7 @@ namespace MR using value_type = Math::Stats::value_type; using vector_type = Math::Stats::vector_type; using matrix_type = Math::Stats::matrix_type; + using count_matrix_type = Eigen::Array; @@ -56,7 +57,7 @@ namespace MR PreProcessor (const std::shared_ptr stats_calculator, const std::shared_ptr enhancer, matrix_type& global_enhanced_sum, - vector>& global_enhanced_count); + count_matrix_type& global_enhanced_count); ~PreProcessor(); @@ -66,9 +67,9 @@ namespace MR std::shared_ptr stats_calculator; std::shared_ptr enhancer; matrix_type& global_enhanced_sum; - vector>& global_enhanced_count; + count_matrix_type& global_enhanced_count; matrix_type enhanced_sum; - vector> enhanced_count; + count_matrix_type enhanced_count; matrix_type stats; matrix_type enhanced_stats; std::shared_ptr mutex; @@ -85,7 +86,7 @@ namespace MR const matrix_type& empirical_enhanced_statistics, const matrix_type& default_enhanced_statistics, matrix_type& perm_dist, - vector>& global_uncorrected_pvalue_counter); + count_matrix_type& global_uncorrected_pvalue_counter); ~Processor(); @@ -98,9 +99,9 @@ namespace MR const matrix_type& default_enhanced_statistics; matrix_type statistics; matrix_type enhanced_statistics; - vector> uncorrected_pvalue_counter; + count_matrix_type uncorrected_pvalue_counter; matrix_type& perm_dist; - vector>& global_uncorrected_pvalue_counter; + count_matrix_type& global_uncorrected_pvalue_counter; std::shared_ptr mutex; }; From b29ed870ba3911216eacecae352ee6c213987b03 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 21 Mar 2018 14:34:34 +1100 Subject: [PATCH 0115/1471] 5ttgen hsvs: Use FreeSurfer hippocampal subfield module --- lib/mrtrix3/_5ttgen/hsvs.py | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py index 6848a10701..f56976b02b 100644 --- a/lib/mrtrix3/_5ttgen/hsvs.py +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -89,6 +89,28 @@ def checkDir(dirpath): app.warn('Environment variable FSLDIR is not set; script will run without FSL components') + hipp_subfield_image_map = { os.path.join(mri_dir, 'lh.hippoSfLabels-T1.v10.mgz'): 'Left-Hippocampus', + os.path.join(mri_dir, 'rh.hippoSfLabels-T1.v10.mgz'): 'Right-Hippocampus' } + + if all(os.path.isfile(entry) for entry in hipp_subfield_image_map.keys()): + progress = app.progressBar('Using detected FreeSurfer hippocampal subfields module output', 6) + for filename, outputname in hipp_subfield_image_map.items(): + init_mesh_path = os.path.basename(filename)[0:2] + '_hipp_init.vtk' + smooth_mesh_path = os.path.basename(filename)[0:2] + '_hipp_smooth.vtk' + run.command('mrthreshold ' + filename + ' - -abs 0.5 | mrmesh - ' + init_mesh_path) + progress.increment() + # Since the hippocampal subfields segmentation can include some fine structures, reduce the extent of smoothing + run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path + ' -smooth_spatial 5 -smooth_influence 5') + progress.increment() + run.command('mesh2voxel ' + smooth_mesh_path + ' ' + aparc_image + ' ' + outputname + '.mif') + progress.increment() + progress.done() + # If we're going to be running FIRST, then we don't want to run it on the hippocampi + if have_first: + sgm_first_map = { key:value for key, value in sgm_first_map.items() if value not in hipp_subfield_image_map.values() } + else: + hipp_subfield_image_map = { } + if have_first: @@ -175,10 +197,11 @@ def checkDir(dirpath): progress.done() # Get other structures that need to be converted from the voxel image - progress = app.progressBar('Smoothing non-cortex structures segmented by FreeSurfer', len(structures)) + progress = app.progressBar('Smoothing non-cortical structures segmented by FreeSurfer', len(structures)) for (index, tissue, name) in structures: # Don't segment anything for which we have instead obtained estimates using FIRST - if not name in sgm_first_map.values(): + # Also don't segment the hippocampi from the aparc+aseg image if we're using the hippocampal subfields module + if not name in sgm_first_map.values() and not name in hipp_subfield_image_map.values(): # If we're going to subsequently use fast, don't bother smoothing cerebellar segmentations; # we're only going to use them to produce a mask anyway if 'Cerebellum' in name and have_fast: @@ -207,6 +230,8 @@ def checkDir(dirpath): # This can hopefully be done with a connected-component analysis: Take just the WM image, and # fill in any gaps (i.e. select the inverse, select the largest connected component, invert again) # Make sure that floating-point values are handled appropriately + # TODO Idea: Dilate voxelised brain stem 2 steps, add only intersection with voxelised WM, + # then run through mrmesh # Combine these images together using the appropriate logic in order to form the 5TT image progress = app.progressBar('Combining tissue images in order to preserve 5TT format requirements', 10) @@ -230,6 +255,8 @@ def checkDir(dirpath): progress.increment() # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 + # TODO Rather than doing this blindly, look for holes in the brain, and assign he remainder to WM; + # only within the mask but outside the brain should the CSF fraction be filled run.command('mrcalc 1.0 tissuesum_01234.mif -sub ' + mask_image + ' 0.0 -gt -mult csf_fill.mif') progress.increment() run.command('mrcalc tissue3.mif csf_fill.mif -add tissue3_filled.mif') From 7f0b440de54c70ef08691c6f748eba326ffb47e9 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 21 Mar 2018 17:18:14 +1100 Subject: [PATCH 0116/1471] fixelcfestats / mrclusterstats: Fix erroneous assertions Assertions were ensuring that the data to be filled was the same size as the input data; but with the introduction of capability to provide a mask, this is not appropriate. --- cmd/fixelcfestats.cpp | 4 +--- cmd/mrclusterstats.cpp | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 03f1eb7bcd..9db29a7907 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -149,7 +149,6 @@ void write_fixel_output (const std::string& filename, const VectorType& data, const Header& header) { - assert (data.size() == header.size (0)); assert (size_t(header.size(0)) == fixel2column.size()); auto output = Image::create (filename, header); for (size_t i = 0; i != fixel2column.size(); ++i) { @@ -179,7 +178,6 @@ class SubjectFixelImport : public SubjectDataImportBase void operator() (matrix_type::RowXpr row) const override { - assert (size_t(row.size()) == size()); Image temp (data); // For thread-safety // Due to merging 'stats_enhancements' with '3.0_RC2', // this class now needs to be made aware of the fixel2row contents @@ -210,8 +208,8 @@ class SubjectFixelImport : public SubjectDataImportBase { assert (index < column2fixel.size()); Image temp (data); // For thread-safety - assert (column2fixel[index] < size()); temp.index(0) = column2fixel[index]; + assert (!is_out_of_bounds (temp)); return default_type(temp.value()); } diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 0dab3b2f6d..0d03fc9ea5 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -134,7 +134,6 @@ class SubjectVoxelImport : public SubjectDataImportBase void operator() (matrix_type::RowXpr row) const override { assert (v2v); - assert (size_t(row.size()) == size()); Image temp (data); // For thread-safety for (size_t i = 0; i != size(); ++i) { assign_pos_of ((*v2v)[i]).to (temp); @@ -145,9 +144,9 @@ class SubjectVoxelImport : public SubjectDataImportBase default_type operator[] (const size_t index) const override { assert (v2v); - assert (index < size()); Image temp (data); // For thread-safety assign_pos_of ((*v2v)[index]).to (temp); + assert (!is_out_of_bounds (temp)); return temp.value(); } From c9a6e43ccd06379dac02099537b31fa739692fee Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 21 Mar 2018 17:39:46 +1100 Subject: [PATCH 0117/1471] 5ttgen hsvs: Fixes - Properly map amygdalae and hippocampi to sub-cortical grey matter if -sgm_amyg_hipp option is provided. - Add bilateral VentralDC to white matter structures to be segmented. - Add corpus callosum segments to white matter structures to be segmented. - First attempt at appropriately filling residual mid-brain volume as white matter, anything outside the brain as CSF. --- lib/mrtrix3/_5ttgen/hsvs.py | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py index f56976b02b..67a725ca7e 100644 --- a/lib/mrtrix3/_5ttgen/hsvs.py +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -5,6 +5,8 @@ def initialise(base_parser, subparsers): # TODO Permit either FreeSurfer directory or T1 image as input parser.add_argument('input', help='The input FreeSurfer subject directory') parser.add_argument('output', help='The output 5TT image') + # TODO Option to specify spatial resolution of output image? + # Or just a template image; that way can control voxel size & axis orientations @@ -135,7 +137,7 @@ def checkDir(dirpath): # to terminate there. # Honour -sgm_amyg_hipp option - ah = 2 if app.args.sgm_amyg_hipp else 0 + ah = 1 if app.args.sgm_amyg_hipp else 0 structures = [ ( 4, 3, 'Left-Lateral-Ventricle'), ( 5, 3, 'Left-Inf-Lat-Vent'), @@ -156,6 +158,7 @@ def checkDir(dirpath): (25, 4, 'Left-Lesion'), (26, 1, 'Left-Accumbens-area'), (27, 1, 'Left-Substancia-Nigra'), + (28, 2, 'Left-VentralDC'), (30, 3, 'Left-vessel'), (31, 1, 'Left-choroid-plexus'), (43, 3, 'Right-Lateral-Ventricle'), @@ -173,11 +176,18 @@ def checkDir(dirpath): (57, 4, 'Right-Lesion'), (58, 1, 'Right-Accumbens-area'), (59, 1, 'Right-Substancia-Nigra'), + (60, 2, 'Right-VentralDC'), (62, 3, 'Right-vessel'), (63, 1, 'Right-choroid-plexus'), (72, 3, '5th-Ventricle'), (192, 2, 'Corpus_Callosum'), - (250, 2, 'Fornix') ] + (250, 2, 'Fornix'), + # TODO Would rather combine CC segments into a single mask before converting to mesh + (251, 2, 'CC_Posterior'), + (252, 2, 'CC_Mid_Posterior'), + (253, 2, 'CC_Central'), + (254, 2, 'CC_Mid_Anterior'), + (255, 2, 'CC_Anterior') ] @@ -234,7 +244,7 @@ def checkDir(dirpath): # then run through mrmesh # Combine these images together using the appropriate logic in order to form the 5TT image - progress = app.progressBar('Combining tissue images in order to preserve 5TT format requirements', 10) + progress = app.progressBar('Combining tissue images in appropriate manner to preserve 5TT format requirements', 14) run.command('mrconvert tissue4_init.mif tissue4.mif') progress.increment() run.command('mrcalc tissue3_init.mif tissue3_init.mif tissue4.mif -add 1.0 -sub 0.0 -max -sub tissue3.mif') @@ -255,9 +265,16 @@ def checkDir(dirpath): progress.increment() # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 - # TODO Rather than doing this blindly, look for holes in the brain, and assign he remainder to WM; + # TESTME Rather than doing this blindly, look for holes in the brain, and assign the remainder to WM; # only within the mask but outside the brain should the CSF fraction be filled - run.command('mrcalc 1.0 tissuesum_01234.mif -sub ' + mask_image + ' 0.0 -gt -mult csf_fill.mif') + # TODO Can definitely do better than just an erosion step here + run.command('mrthreshold tissuesum_01234.mif -abs 0.5 - | maskfilter - erode - | mrcalc 1.0 - -sub - | maskfilter - connect -largest - | mrcalc 1.0 - -sub wm_fill_mask.mif') + progress.increment() + run.command('mrcalc 1.0 tissuesum_01234.mif -sub wm_fill_mask.mif -mult wm_fill.mif') + progress.increment() + run.command('mrcalc tissue2.mif wm_fill.mif -add tissue2_filled.mif') + progress.increment() + run.command('mrcalc 1.0 tissuesum_01234.mif wm_fill.mif -add -sub ' + mask_image + ' 0.0 -gt -mult csf_fill.mif') progress.increment() run.command('mrcalc tissue3.mif csf_fill.mif -add tissue3_filled.mif') progress.done() @@ -303,20 +320,21 @@ def checkDir(dirpath): progress.increment() run.command('mrcalc Cerebellar_mask.mif ' + fast_output_prefix + '_pve_1' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue1.mif -if tissue1_fast.mif') progress.increment() - run.command('mrcalc Cerebellar_mask.mif ' + fast_output_prefix + '_pve_2' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue2.mif -if tissue2_fast.mif') + run.command('mrcalc Cerebellar_mask.mif ' + fast_output_prefix + '_pve_2' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue2_filled.mif -if tissue2_filled_fast.mif') progress.done() # Finally, concatenate the volumes to produce the 5TT image - run.command('mrcat tissue0_fast.mif tissue1_fast.mif tissue2_fast.mif tissue3_filled_fast.mif tissue4.mif 5TT.mif') + run.command('mrcat tissue0_fast.mif tissue1_fast.mif tissue2_filled_fast.mif tissue3_filled_fast.mif tissue4.mif 5TT.mif') else: - run.command('mrcat tissue0.mif tissue1.mif tissue2.mif tissue3_filled.mif tissue4.mif 5TT.mif') + run.command('mrcat tissue0.mif tissue1.mif tissue2_filled.mif tissue3_filled.mif tissue4.mif 5TT.mif') # Maybe don't go off all tissues here, since FreeSurfer's mask can be fairly liberal; # instead get just a voxel clearance from all other tissue types (maybe two) if app.args.nocrop: run.function(os.rename, '5TT.mif', 'result.mif') else: + app.console('Cropping final 5TT image') run.command('mrconvert 5TT.mif -coord 3 0,1,2,4 - | mrmath - sum - -axis 3 | mrthreshold - - -abs 0.001 | maskfilter - dilate crop_mask.mif') run.command('mrcrop 5TT.mif result.mif -mask crop_mask.mif') From 5ad4c68b97bdce7bd5724d61d921c0c757d0fd5f Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 22 Mar 2018 10:12:28 +1100 Subject: [PATCH 0118/1471] fixelcfestats: Remove erroneous assertion This test was used to ensure that the fixel2column mechanism was working correctly, but is not appropriate in cases where the input data actually contains NaN values (which is permissible). --- cmd/fixelcfestats.cpp | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 9db29a7907..5c5babe665 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -180,16 +180,11 @@ class SubjectFixelImport : public SubjectDataImportBase { Image temp (data); // For thread-safety // Due to merging 'stats_enhancements' with '3.0_RC2', - // this class now needs to be made aware of the fixel2row contents - // (soon to become fixel2column?) -#ifndef NDEBUG - row.fill (NaN); -#endif + // this class now needs to be made aware of the fixel2column contents for (temp.index(0) = 0; temp.index(0) != temp.size(0); ++temp.index(0)) { if (fixel2column[temp.index(0)] >= 0) row (fixel2column[temp.index(0)]) = temp.value(); } - assert (row.allFinite()); } // Is this going to require a reverse lookup? From 73386e2719456b47edd6c1dfb7ba79f5a5990c12 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 22 Mar 2018 11:21:42 +1100 Subject: [PATCH 0119/1471] Stats: Fix element-wise subject exclusion Fix issue with reduction of shuffling matrix in the presence of NaNs in the input data. Also add another vectorstats test to verify the operation of this. --- core/math/stats/glm.cpp | 28 +++++++++++++++++++++------- testing/data | 2 +- testing/tests/vectorstats | 1 + 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 04392823b3..aaf72a6fc0 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -480,7 +480,7 @@ namespace MR matrix_type extra_data (num_subjects(), importers.size()); BitSet element_mask (num_subjects()), perm_matrix_mask (num_subjects()); - matrix_type perm_matrix_masked, Mfull_masked, pinvMfull_masked, Rm; + matrix_type shuffling_matrix_masked, Mfull_masked, pinvMfull_masked, Rm; vector_type y_masked, Sy, lambda; matrix_type XtX, beta; @@ -533,7 +533,7 @@ namespace MR Mfull_masked.resize (num_subjects(), num_factors()); Mfull_masked.block (0, 0, num_subjects(), M.cols()) = M; Mfull_masked.block (0, M.cols(), num_subjects(), extra_data.cols()) = extra_data; - perm_matrix_masked = shuffling_matrix; + shuffling_matrix_masked = shuffling_matrix; y_masked = y.col (ie); } else { @@ -559,15 +559,23 @@ namespace MR } assert (out_index == finite_count); assert (perm_matrix_mask.count() == finite_count); - // Only after we've reduced the design matrix do we now reduce the permutation matrix - perm_matrix_masked.resize (finite_count, num_subjects()); + // Only after we've reduced the design matrix do we now reduce the shuffling matrix + // Step 1: Remove rows that contain non-zero entries in columns to be removed + matrix_type temp (finite_count, num_subjects()); out_index = 0; for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { if (perm_matrix_mask[in_index]) - perm_matrix_masked.row (out_index++) = shuffling_matrix.row (in_index); + temp.row (out_index++) = shuffling_matrix.row (in_index); + } + assert (out_index == finite_count); + // Step 2: Remove columns + shuffling_matrix_masked.resize (finite_count, finite_count); + out_index = 0; + for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { + if (element_mask[in_index]) + shuffling_matrix_masked.col (out_index++) = temp.col (in_index); } assert (out_index == finite_count); - } assert (Mfull_masked.allFinite()); @@ -585,7 +593,13 @@ namespace MR // Now that we have the individual contrast model partition for these data, // the rest of this function should proceed similarly to the fixed // design matrix case - Sy = perm_matrix_masked * partition.Rz * y_masked.matrix(); + //VAR (shuffling_matrix_masked.rows()); + //VAR (shuffling_matrix_masked.cols()); + //VAR (partition.Rz.rows()); + //VAR (partition.Rz.cols()); + //VAR (y_masked.rows()); + //VAR (y_masked.cols()); + Sy = shuffling_matrix_masked * partition.Rz * y_masked.matrix(); lambda = pinvMfull_masked * Sy.matrix(); beta.noalias() = c[ic].matrix() * lambda.matrix(); const default_type sse = (Rm*Sy.matrix()).squaredNorm(); diff --git a/testing/data b/testing/data index 1a4e3267f3..6b11ca9ef5 160000 --- a/testing/data +++ b/testing/data @@ -1 +1 @@ -Subproject commit 1a4e3267f36934893e615002e71c39bf497061ea +Subproject commit 6b11ca9ef52cd72032ad1c7fff76f40d178b965a diff --git a/testing/tests/vectorstats b/testing/tests/vectorstats index 1fbd020f2f..ecf4300169 100644 --- a/testing/tests/vectorstats +++ b/testing/tests/vectorstats @@ -2,3 +2,4 @@ N=16 SNR=5 vectorstats/gen0.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpc N=16 SNR=5 vectorstats/gen1.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test1.py N=16 SNR=5 vectorstats/gen2.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -column tmpcolumn.txt -ftests tmpftests.csv -force && vectorstats/test2.py N=16 SNR=5 vectorstats/gen3.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test3.py +N=16 SNR=5 vectorstats/gen4.py && vectorstats tmpsubjects.txt tmpdesign.csv tmpcontrast.csv tmpout -errors ise -force && vectorstats/test4.py From d259f346413090fb0ca47f2a03010d78ef3165ff Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 22 Mar 2018 14:53:13 +1100 Subject: [PATCH 0120/1471] Stats: Fix bug in non-stationarity correction Pre-allocation of matrix data was not properly updated during the implementation of multiple contrast support. --- src/stats/permtest.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index 2873946dc8..f6d7c84f2c 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -31,7 +31,7 @@ namespace MR enhancer (enhancer), global_enhanced_sum (global_enhanced_sum), global_enhanced_count (global_enhanced_count), - enhanced_sum (vector_type::Zero (global_enhanced_sum.size())), + enhanced_sum (matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())), enhanced_count (count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())), stats (global_enhanced_sum.rows(), global_enhanced_sum.cols()), enhanced_stats (global_enhanced_sum.rows(), global_enhanced_sum.cols()), @@ -58,8 +58,8 @@ namespace MR return false; (*stats_calculator) (shuffle.data, stats); (*enhancer) (stats, enhanced_stats); - for (ssize_t c = 0; c != enhanced_stats.cols(); ++c) { - for (ssize_t i = 0; i < enhanced_stats.rows(); ++i) { + for (size_t c = 0; c != enhanced_stats.cols(); ++c) { + for (size_t i = 0; i < enhanced_stats.rows(); ++i) { if (enhanced_stats(i, c) > 0.0) { enhanced_sum(i, c) += enhanced_stats(i, c); enhanced_count(i, c)++; From 256e3d7023f09d6ecff20e8126a42fc67c4e0630 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 23 Mar 2018 13:12:52 +1100 Subject: [PATCH 0121/1471] 5ttgen hsvs: -template option Allows user to provide a template image that will define the voxel grid for the output 5TT image. Incorporation of FAST run on cerebellar region is not yet correct, but this is the first working version. --- lib/mrtrix3/_5ttgen/hsvs.py | 115 ++++++++++++++++++++++++-------- src/surface/algo/mesh2image.cpp | 8 +++ 2 files changed, 95 insertions(+), 28 deletions(-) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py index 67a725ca7e..e68d706821 100644 --- a/lib/mrtrix3/_5ttgen/hsvs.py +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -5,8 +5,13 @@ def initialise(base_parser, subparsers): # TODO Permit either FreeSurfer directory or T1 image as input parser.add_argument('input', help='The input FreeSurfer subject directory') parser.add_argument('output', help='The output 5TT image') - # TODO Option to specify spatial resolution of output image? + # TESTME Option to specify spatial resolution of output image? # Or just a template image; that way can control voxel size & axis orientations + parser.add_argument('-template', help='Provide an image that will form the template for the generated 5TT image') + # TODO Add references + + +# TODO Add file.delTemporary() throughout @@ -16,8 +21,10 @@ def checkOutputPaths(): def getInputs(): + from mrtrix3 import app, path, run # FreeSurfer files will be accessed in-place; no need to pre-convert them into the temporary directory - pass + if app.args.template: + run.command('mrconvert ' + path.fromUser(app.args.template, True) + ' ' + path.toTemp('template.mif', True) + ' -axes 0,1,2') @@ -49,6 +56,11 @@ def checkDir(dirpath): checkFile(aparc_image) checkFile(mask_image) checkFile(reg_file) + template_image = 'template.mif' if app.args.template else aparc_image + + if app.args.template: + run.command('mrtransform ' + mask_image + ' -template template.mif - | mrthreshold - brainmask.mif -abs 0.5') + mask_image = 'brainmask.mif' sgm_first_map = { } have_first = True @@ -104,7 +116,7 @@ def checkDir(dirpath): # Since the hippocampal subfields segmentation can include some fine structures, reduce the extent of smoothing run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path + ' -smooth_spatial 5 -smooth_influence 5') progress.increment() - run.command('mesh2voxel ' + smooth_mesh_path + ' ' + aparc_image + ' ' + outputname + '.mif') + run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + outputname + '.mif') progress.increment() progress.done() # If we're going to be running FIRST, then we don't want to run it on the hippocampi @@ -123,7 +135,7 @@ def checkDir(dirpath): for key, value in sgm_first_map.items(): vtk_in_path = 'first-' + key + '_first.vtk' run.command('meshconvert ' + vtk_in_path + ' first-' + key + '_transformed.vtk -transform first2real T1.nii') - run.command('mesh2voxel first-' + key + '_transformed.vtk ' + aparc_image + ' ' + value + '.mif') + run.command('mesh2voxel first-' + key + '_transformed.vtk ' + template_image + ' ' + value + '.mif') progress.increment() progress.done() @@ -202,7 +214,7 @@ def checkDir(dirpath): checkFile(filepath) transformed_path = basename + '_realspace.obj' run.command('meshconvert ' + filepath + ' ' + transformed_path + ' -binary -transform fs2real ' + aparc_image) - run.command('mesh2voxel ' + transformed_path + ' ' + aparc_image + ' ' + basename + '.mif') + run.command('mesh2voxel ' + transformed_path + ' ' + template_image + ' ' + basename + '.mif') progress.increment() progress.done() @@ -212,21 +224,24 @@ def checkDir(dirpath): # Don't segment anything for which we have instead obtained estimates using FIRST # Also don't segment the hippocampi from the aparc+aseg image if we're using the hippocampal subfields module if not name in sgm_first_map.values() and not name in hipp_subfield_image_map.values(): - # If we're going to subsequently use fast, don't bother smoothing cerebellar segmentations; - # we're only going to use them to produce a mask anyway + # If we're going to subsequently use fast directly on the FreeSurfer T1 image, + # don't bother smoothing cerebellar segmentations; we're only going to use + # them to produce a mask anyway + # FIXME This will still be included in the list of images to be combined; + # if a template image is being used, this will lead to heartache due to image grid mismatch if 'Cerebellum' in name and have_fast: run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq ' + name + '.mif -datatype float32') else: run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq - | mrmesh - -threshold 0.5 ' + name + '_init.obj') run.command('meshfilter ' + name + '_init.obj smooth ' + name + '.obj') - run.command('mesh2voxel ' + name + '.obj ' + aparc_image + ' ' + name + '.mif') + run.command('mesh2voxel ' + name + '.obj ' + template_image + ' ' + name + '.mif') progress.increment() progress.done() # Construct images with the partial volume of each tissue progress = app.progressBar('Combining segmentations of all structures corresponding to each tissue type', 5) for tissue in range(0,5): - image_list = [ n + '.mif' for (i, t, n) in structures if t == tissue ] + image_list = [ n + '.mif' for (i, t, n) in structures if (t == tissue and not (have_fast and 'Cerebellum' in n)) ] # For cortical GM and WM, need to also add the main cerebrum segments if tissue == 0: image_list.extend([ 'lh.pial.mif', 'rh.pial.mif' ]) @@ -267,7 +282,8 @@ def checkDir(dirpath): # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 # TESTME Rather than doing this blindly, look for holes in the brain, and assign the remainder to WM; # only within the mask but outside the brain should the CSF fraction be filled - # TODO Can definitely do better than just an erosion step here + # TODO Can definitely do better than just an erosion step here; still some hyper-intensities at GM-Wm interface + # TODO Connected-component analysis at such high resolution is taking up huge amounts of memory run.command('mrthreshold tissuesum_01234.mif -abs 0.5 - | maskfilter - erode - | mrcalc 1.0 - -sub - | maskfilter - connect -largest - | mrcalc 1.0 - -sub wm_fill_mask.mif') progress.increment() run.command('mrcalc 1.0 tissuesum_01234.mif -sub wm_fill_mask.mif -mult wm_fill.mif') @@ -282,25 +298,66 @@ def checkDir(dirpath): # Branch depending on whether or not FSL fast will be used to re-segment the cerebellum if have_fast: - # Generate a mask of all voxels classified as cerebellum by FreeSurfer - cerebellar_indices = [ i for (i, t, n) in structures if 'Cerebellum' in n ] - cerebellar_submask_list = [ ] - progress = app.progressBar('Generating whole-cerebellum mask from FreeSurfer segmentations', len(cerebellar_indices)+1) - for index in cerebellar_indices: - filename = 'Cerebellum_' + str(index) + '.mif' - run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq ' + filename + ' -datatype bit') - cerebellar_submask_list.append(filename) + # TODO How to support -template option? + # - Re-grid norm.mgz to template image before running FAST + # - Re-grid FAST output to template image + # Consider splitting, including initial mapping of cerebellar regions: + # - If we're not using a separate template image, just map cerebellar regions to voxels to + # produce a mask, and run FAST within that mask + # - If we have a template, combine cerebellar regions, convert to surfaces (one per hemisphere), + # map these to the template image, run FIRST on a binary mask from this, then + # re-combine this with the tissue maps from other sources based on the estimated PVF of + # cerebellum meshes + + if app.args.template: + + # If this is the case, then we haven't yet performed any cerebellar segmentation / meshing + # What we want to do is: for each hemisphere, combine all three "cerebellar" segments from FreeSurfer, + # convert to a surface, map that surface to the template image + progress = app.progressBar('Preparing images of cerebellum for intensity-based segmentation', 11) + for hemi in [ 'Left', 'Right' ]: + cerebellar_images = [ n + '.mif' for (i, t, n) in structures if (hemi in n and 'Cerebellum' in n) ] + run.command('mrmath ' + ' '.join(cerebellar_images) + ' sum ' + hemi + '-Cerebellum-All.mif') + progress.increment() + run.command('mrmesh ' + hemi + '-Cerebellum-All.mif ' + hemi + '-Cerebellum-All-init.vtk') + progress.increment() + run.command('meshfilter ' + hemi + '-Cerebellum-All-init.vtk smooth ' + hemi + '-Cerebellum-All-smooth.vtk') + progress.increment() + run.command('mesh2voxel ' + hemi + '-Cerebellum-All-smooth.vtk ' + template_image + ' ' + hemi + '-Cerebellum-PVF-Template.mif') + progress.increment() + + # Combine the two hemispheres together into: + # - An image in preparation for running FAST + # - A combined total partial volume fraction image that will be later used for tissue recombination + run.command('mrmath Left-Cerebellum-PVF-Template.mif Right-Cerebellum-PVF-Template.mif sum Cerebellum_weight.mif') progress.increment() - run.command('mrmath ' + ' '.join(cerebellar_submask_list) + ' sum Cerebellar_mask.mif') - progress.done() + run.command('mrthreshold Cerebellum_weight.mif Both-Cerebellum-Mask-Template.mif -abs 1e-6') + progress.increment() + run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | ' \ + 'mrcalc - Both-Cerebellum-Mask-Template.mif -mult - | ' \ + 'mrconvert - T1_cerebellum.nii -stride -1,+2,+3') + progress.done() - app.console('Running FSL fast to segment the cerebellum based on intensity information') + else: + + progress = app.progressBar('Preparing images of cerebellum for intensity-based segmentation', 2) + # Generate a mask of all voxels classified as cerebellum by FreeSurfer + cerebellum_mask_images = [ n + '.mif' for (i, t, n) in structures if 'Cerebellum' in n ] + run.command('mrmath ' + ' '.join(cerebellum_mask_images) + ' sum Cerebellum_weight.mif') + progress.increment() + # FAST image input needs to be pre-masked + run.command('mrcalc T1.nii Cerebellum_weight.mif -mult - | mrconvert - T1_cerebellum.nii -stride -1,+2,+3') + progress.done() + + # TODO Any code below here should be compatible with Cerebellum_weight.mif containing partial volume fractions + # (in the case of no explicit template image, it's a mask, but the logic still applies) - # FAST image input needs to be pre-masked - run.command('mrcalc T1.nii Cerebellar_mask.mif -mult - | mrconvert - T1_cerebellum.nii -stride -1,+2,+3') + app.console('Running FSL fast to segment the cerebellum based on intensity information') # Run FSL FAST just within the cerebellum # TESTME Should bias field estimation be disabled within fast? + # TODO FAST memory usage can also be huge when using a high-resolution template image + # Consider cropping T1 image around the cerebellum before feeding to FAST run.command(fast_cmd + ' -N T1_cerebellum.nii') fast_output_prefix = 'T1_cerebellum' @@ -312,15 +369,17 @@ def checkDir(dirpath): # Some of these voxels may have a non-zero cortical GM component. # In that case, let's find a multiplier to apply to all tissues (including CGM) such that the sum is 1.0 - run.command('mrcalc 1.0 tissue0.mif 1.0 -add -div Cerebellar_mask.mif -mult Cerebellar_multiplier.mif') + # TESTME Does this needs to change for the case of a provided template image, in which case + # Cerebellum_weight.mif contains floating-point values? + run.command('mrcalc 1.0 tissue0.mif 1.0 -add -div Cerebellum_weight.mif -mult Cerebellar_multiplier.mif') progress.increment() - run.command('mrcalc Cerebellar_mask.mif Cerebellar_multiplier.mif 1.0 -if tissue0.mif -mult tissue0_fast.mif') + run.command('mrcalc Cerebellum_weight.mif Cerebellar_multiplier.mif 1.0 -if tissue0.mif -mult tissue0_fast.mif') progress.increment() - run.command('mrcalc Cerebellar_mask.mif ' + fast_output_prefix + '_pve_0' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue3_filled.mif -if tissue3_filled_fast.mif') + run.command('mrcalc Cerebellum_weight.mif ' + fast_output_prefix + '_pve_0' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue3_filled.mif -if tissue3_filled_fast.mif') progress.increment() - run.command('mrcalc Cerebellar_mask.mif ' + fast_output_prefix + '_pve_1' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue1.mif -if tissue1_fast.mif') + run.command('mrcalc Cerebellum_weight.mif ' + fast_output_prefix + '_pve_1' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue1.mif -if tissue1_fast.mif') progress.increment() - run.command('mrcalc Cerebellar_mask.mif ' + fast_output_prefix + '_pve_2' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue2_filled.mif -if tissue2_filled_fast.mif') + run.command('mrcalc Cerebellum_weight.mif ' + fast_output_prefix + '_pve_2' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue2_filled.mif -if tissue2_filled_fast.mif') progress.done() # Finally, concatenate the volumes to produce the 5TT image diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 59866b4aea..75d2bf495c 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -206,6 +206,14 @@ namespace MR ++progress; + // TODO Better implementation here + // For *any* voxel not on the mesh but neighbouring a voxel in which a vertex lies, + // track a floating-point value corresponding to its distance from the normal plane. + // Each voxel not on the mesh should then be assigned as prelim_inside or prelim_outside + // depending on whether the summed value is positive or negative + + + // New implementation of filling in the centre of the mesh // Rather than selecting the eight external corners and filling in outside the // mesh (which may omit some areas), selecting anything remaining as 'inside', From 0cf4f48a6d2bc91bd0afdbe687b8f8adb0b2d83c Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 23 Mar 2018 14:52:26 +1100 Subject: [PATCH 0122/1471] 5ttgen hsvs: Initial work tward re-arrangement Want to run FAST on the cerebellum before filling partial volume fraction deficits with WM and CSF. --- lib/mrtrix3/_5ttgen/hsvs.py | 117 ++++++++++++++++++++++-------------- 1 file changed, 73 insertions(+), 44 deletions(-) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py index e68d706821..d31ec756b3 100644 --- a/lib/mrtrix3/_5ttgen/hsvs.py +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -251,6 +251,7 @@ def checkDir(dirpath): progress.increment() progress.done() + # TODO Need to fill in any potential gaps in the WM image in the centre of the brain # This can hopefully be done with a connected-component analysis: Take just the WM image, and # fill in any gaps (i.e. select the inverse, select the largest connected component, invert again) @@ -259,46 +260,37 @@ def checkDir(dirpath): # then run through mrmesh # Combine these images together using the appropriate logic in order to form the 5TT image - progress = app.progressBar('Combining tissue images in appropriate manner to preserve 5TT format requirements', 14) - run.command('mrconvert tissue4_init.mif tissue4.mif') - progress.increment() - run.command('mrcalc tissue3_init.mif tissue3_init.mif tissue4.mif -add 1.0 -sub 0.0 -max -sub tissue3.mif') - progress.increment() - run.command('mrmath tissue3.mif tissue4.mif sum tissuesum_34.mif') - progress.increment() - run.command('mrcalc tissue1_init.mif tissue1_init.mif tissuesum_34.mif -add 1.0 -sub 0.0 -max -sub tissue1.mif') + progress = app.progressBar('Combining tissue images', 9) + tissue_images = [ 'tissue0.mif', 'tissue1.mif', 'tissue2.mif', 'tissue3.mif', 'tissue4.mif' ] + run.function(os.rename, 'tissue4_init.mif', 'tissue4.mif') progress.increment() - run.command('mrmath tissuesum_34.mif tissue1.mif sum tissuesum_134.mif') + run.command('mrcalc tissue3_init.mif tissue3_init.mif ' + tissue_images[4] + ' -add 1.0 -sub 0.0 -max -sub ' + tissue_images[3]) progress.increment() - run.command('mrcalc tissue2_init.mif tissue2_init.mif tissuesum_134.mif -add 1.0 -sub 0.0 -max -sub tissue2.mif') + run.command('mrmath ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_34.mif') progress.increment() - run.command('mrmath tissuesum_134.mif tissue2.mif sum tissuesum_1234.mif') + run.command('mrcalc tissue1_init.mif tissue1_init.mif tissuesum_34.mif -add 1.0 -sub 0.0 -max -sub ' + tissue_images[1]) progress.increment() - run.command('mrcalc tissue0_init.mif tissue0_init.mif tissuesum_1234.mif -add 1.0 -sub 0.0 -max -sub tissue0.mif') - progress.increment() - run.command('mrmath tissuesum_1234.mif tissue0.mif sum tissuesum_01234.mif') - progress.increment() - - # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 - # TESTME Rather than doing this blindly, look for holes in the brain, and assign the remainder to WM; - # only within the mask but outside the brain should the CSF fraction be filled - # TODO Can definitely do better than just an erosion step here; still some hyper-intensities at GM-Wm interface - # TODO Connected-component analysis at such high resolution is taking up huge amounts of memory - run.command('mrthreshold tissuesum_01234.mif -abs 0.5 - | maskfilter - erode - | mrcalc 1.0 - -sub - | maskfilter - connect -largest - | mrcalc 1.0 - -sub wm_fill_mask.mif') + run.command('mrmath ' + tissue_images[1] + ' ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_134.mif') progress.increment() - run.command('mrcalc 1.0 tissuesum_01234.mif -sub wm_fill_mask.mif -mult wm_fill.mif') + run.command('mrcalc tissue2_init.mif tissue2_init.mif tissuesum_134.mif -add 1.0 -sub 0.0 -max -sub ' + tissue_images[2]) progress.increment() - run.command('mrcalc tissue2.mif wm_fill.mif -add tissue2_filled.mif') + run.command('mrmath ' + ' '.join(tissue_images[1:5]) + ' sum tissuesum_1234.mif') progress.increment() - run.command('mrcalc 1.0 tissuesum_01234.mif wm_fill.mif -add -sub ' + mask_image + ' 0.0 -gt -mult csf_fill.mif') + run.command('mrcalc tissue0_init.mif tissue0_init.mif tissuesum_1234.mif -add 1.0 -sub 0.0 -max -sub ' + tissue_images[0]) progress.increment() - run.command('mrcalc tissue3.mif csf_fill.mif -add tissue3_filled.mif') + tissue_sum_image = 'tissuesum_01234.mif' + run.command('mrmath ' + ' '.join(tissue_images) + ' sum ' + tissue_sum_image) progress.done() + + + + + # Branch depending on whether or not FSL fast will be used to re-segment the cerebellum if have_fast: - # TODO How to support -template option? + # How to support -template option? # - Re-grid norm.mgz to template image before running FAST # - Re-grid FAST output to template image # Consider splitting, including initial mapping of cerebellar regions: @@ -329,13 +321,14 @@ def checkDir(dirpath): # Combine the two hemispheres together into: # - An image in preparation for running FAST # - A combined total partial volume fraction image that will be later used for tissue recombination - run.command('mrmath Left-Cerebellum-PVF-Template.mif Right-Cerebellum-PVF-Template.mif sum Cerebellum_weight.mif') + run.command('mrcalc Left-Cerebellum-PVF-Template.mif Right-Cerebellum-PVF-Template.mif -add 1.0 -min Cerebellum_weight.mif') progress.increment() - run.command('mrthreshold Cerebellum_weight.mif Both-Cerebellum-Mask-Template.mif -abs 1e-6') + T1_cerebellum_mask_image = 'T1_cerebellum_mask.mif' + run.command('mrthreshold Cerebellum_weight.mif ' + T1_cerebellum_mask_image + ' -abs 1e-6') progress.increment() run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | ' \ - 'mrcalc - Both-Cerebellum-Mask-Template.mif -mult - | ' \ - 'mrconvert - T1_cerebellum.nii -stride -1,+2,+3') + 'mrcalc - ' + T1_cerebellum_mask_image + ' -mult - | ' \ + 'mrconvert - T1_cerebellum_precrop.mif') progress.done() else: @@ -346,7 +339,8 @@ def checkDir(dirpath): run.command('mrmath ' + ' '.join(cerebellum_mask_images) + ' sum Cerebellum_weight.mif') progress.increment() # FAST image input needs to be pre-masked - run.command('mrcalc T1.nii Cerebellum_weight.mif -mult - | mrconvert - T1_cerebellum.nii -stride -1,+2,+3') + T1_cerebellum_mask_image = 'Cerebellum_weight.mif' + run.command('mrcalc T1.nii ' + T1_cerebellum_mask_image + ' -mult - | mrconvert - T1_cerebellum_precrop.mif -stride -1,+2,+3') progress.done() # TODO Any code below here should be compatible with Cerebellum_weight.mif containing partial volume fractions @@ -356,10 +350,13 @@ def checkDir(dirpath): # Run FSL FAST just within the cerebellum # TESTME Should bias field estimation be disabled within fast? - # TODO FAST memory usage can also be huge when using a high-resolution template image - # Consider cropping T1 image around the cerebellum before feeding to FAST + # FAST memory usage can also be huge when using a high-resolution template image: + # Crop T1 image around the cerebellum before feeding to FAST, then re-sample to full template image FoV + run.command('mrcrop T1_cerebellum_precrop.mif -mask ' + T1_cerebellum_mask_image + ' T1_cerebellum.nii') run.command(fast_cmd + ' -N T1_cerebellum.nii') - fast_output_prefix = 'T1_cerebellum' + run.command('mrtransform T1_cerebellum_pve_0' + fast_suffix + ' -interp nearest -template ' + template_image + ' FAST_CSF.mif') + run.command('mrtransform T1_cerebellum_pve_1' + fast_suffix + ' -interp nearest -template ' + template_image + ' FAST_GM.mif') + run.command('mrtransform T1_cerebellum_pve_2' + fast_suffix + ' -interp nearest -template ' + template_image + ' FAST_WM.mif') # Generate the revised tissue images, using output from FAST inside the cerebellum and # output from previous processing everywhere else @@ -371,22 +368,54 @@ def checkDir(dirpath): # In that case, let's find a multiplier to apply to all tissues (including CGM) such that the sum is 1.0 # TESTME Does this needs to change for the case of a provided template image, in which case # Cerebellum_weight.mif contains floating-point values? - run.command('mrcalc 1.0 tissue0.mif 1.0 -add -div Cerebellum_weight.mif -mult Cerebellar_multiplier.mif') + # TODO Can probably also change if the cerebellar segments are not included in prior computations: + # no longer considering just the CGM fraction + new_tissue_images = [ 'tissue0_fast.mif', 'tissue1_fast.mif', 'tissue2_fast.mif', 'tissue3_fast.mif', 'tissue4.mif' ] + new_tissue_sum_image = 'tissuesum_01234_fast.mif' + run.command('mrcalc 1.0 ' + tissue_images[0] + ' 1.0 -add -div Cerebellum_weight.mif -mult Cerebellar_multiplier.mif') + progress.increment() + run.command('mrcalc Cerebellum_weight.mif Cerebellar_multiplier.mif 1.0 -if ' + tissue_images[0] + ' -mult ' + new_tissue_images[0]) progress.increment() - run.command('mrcalc Cerebellum_weight.mif Cerebellar_multiplier.mif 1.0 -if tissue0.mif -mult tissue0_fast.mif') + run.command('mrcalc Cerebellum_weight.mif FAST_CSF.mif Cerebellar_multiplier.mif -mult ' + tissue_images[3] + ' -if ' + new_tissue_images[3]) progress.increment() - run.command('mrcalc Cerebellum_weight.mif ' + fast_output_prefix + '_pve_0' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue3_filled.mif -if tissue3_filled_fast.mif') + run.command('mrcalc Cerebellum_weight.mif FAST_GM.mif Cerebellar_multiplier.mif -mult ' + tissue_images[1] + ' -if ' + new_tissue_images[1]) progress.increment() - run.command('mrcalc Cerebellum_weight.mif ' + fast_output_prefix + '_pve_1' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue1.mif -if tissue1_fast.mif') + run.command('mrcalc Cerebellum_weight.mif FAST_WM.mif Cerebellar_multiplier.mif -mult ' + tissue_images[2] + ' -if ' + new_tissue_images[2]) progress.increment() - run.command('mrcalc Cerebellum_weight.mif ' + fast_output_prefix + '_pve_2' + fast_suffix + ' Cerebellar_multiplier.mif -mult tissue2_filled.mif -if tissue2_filled_fast.mif') + run.command('mrmath ' + ' '.join(new_tissue_images) + ' sum ' + new_tissue_sum_image) progress.done() + tissue_images = new_tissue_images + tissue_sum_image = new_tissue_sum_image + + + # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 + # TESTME Rather than doing this blindly, look for holes in the brain, and assign the remainder to WM; + # only within the mask but outside the brain should the CSF fraction be filled + # TODO Can definitely do better than just an erosion step here; still some hyper-intensities at GM-WW interface + + + # TESTME Should the below occur after FAST? + progress = app.progressBar('Performing fill operations to preserve unity tissue volume', 5) + # TODO Connected-component analysis at high template image resolution is taking up huge amounts of memory + # Crop beforehand? It's because it's filling everything outside the brain... + new_tissue_images = [ tissue_images[0], tissue_images[1], 'tissue2_filled.mif', 'tissue3_filled.mif', tissue_images[4] ] + run.command('mrthreshold ' + tissue_sum_image + ' -abs 0.5 - | maskfilter - erode - | mrcalc 1.0 - -sub - | maskfilter - connect -largest - | mrcalc 1.0 - -sub wm_fill_mask.mif') + progress.increment() + run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub wm_fill_mask.mif -mult wm_fill.mif') + progress.increment() + run.command('mrcalc ' + tissue_images[2] + ' wm_fill.mif -add ' + new_tissue_images[2]) + progress.increment() + run.command('mrcalc 1.0 ' + tissue_sum_image + ' wm_fill.mif -add -sub ' + mask_image + ' 0.0 -gt -mult csf_fill.mif') + progress.increment() + run.command('mrcalc ' + tissue_images[3] + ' csf_fill.mif -add ' + new_tissue_images[3]) + progress.done() + tissue_images = new_tissue_images + + + # Finally, concatenate the volumes to produce the 5TT image + run.command('mrcat ' + ' '.join(tissue_images) + ' 5TT.mif -axis 3') - # Finally, concatenate the volumes to produce the 5TT image - run.command('mrcat tissue0_fast.mif tissue1_fast.mif tissue2_filled_fast.mif tissue3_filled_fast.mif tissue4.mif 5TT.mif') - else: - run.command('mrcat tissue0.mif tissue1.mif tissue2_filled.mif tissue3_filled.mif tissue4.mif 5TT.mif') # Maybe don't go off all tissues here, since FreeSurfer's mask can be fairly liberal; # instead get just a voxel clearance from all other tissue types (maybe two) From 6985311240591f4cf7def463c2f0138528583052 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 27 Mar 2018 14:11:37 +1100 Subject: [PATCH 0123/1471] Stats: Deal with under-determined NaN-filled data When the GLM design matrix varies between elements (whether due to element-wise design matrix columns or the presence of non-finite values in the data), if such a large proportion of the data for a particular element is excluded that the system becomes under-determined, do not attempt to perform a statistical test for that element. --- core/math/stats/glm.cpp | 172 +++++++++++++++++++++------------------- src/stats/permtest.cpp | 8 +- 2 files changed, 93 insertions(+), 87 deletions(-) diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index aaf72a6fc0..36407eeedf 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -525,98 +525,104 @@ namespace MR } } const size_t finite_count = element_mask.count(); + if (finite_count < num_factors()) { + output.row (ie).setZero(); + } else { - // Do we need to reduce the size of our matrices / vectors - // based on the presence of non-finite values? - if (finite_count == num_subjects()) { + // Do we need to reduce the size of our matrices / vectors + // based on the presence of non-finite values? + if (finite_count == num_subjects()) { - Mfull_masked.resize (num_subjects(), num_factors()); - Mfull_masked.block (0, 0, num_subjects(), M.cols()) = M; - Mfull_masked.block (0, M.cols(), num_subjects(), extra_data.cols()) = extra_data; - shuffling_matrix_masked = shuffling_matrix; - y_masked = y.col (ie); + Mfull_masked.resize (num_subjects(), num_factors()); + Mfull_masked.block (0, 0, num_subjects(), M.cols()) = M; + Mfull_masked.block (0, M.cols(), num_subjects(), extra_data.cols()) = extra_data; + shuffling_matrix_masked = shuffling_matrix; + y_masked = y.col (ie); - } else { + } else { - Mfull_masked.resize (finite_count, num_factors()); - y_masked.resize (finite_count); - perm_matrix_mask.clear (true); - size_t out_index = 0; - for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { - if (element_mask[in_index]) { - Mfull_masked.block (out_index, 0, 1, M.cols()) = M.row (in_index); - Mfull_masked.block (out_index, M.cols(), 1, extra_data.cols()) = extra_data.row (in_index); - y_masked[out_index++] = y (in_index, ie); - } else { - // Any row in the permutation matrix that contains a non-zero entry - // in the column corresponding to in_row needs to be removed - // from the permutation matrix - for (ssize_t perm_row = 0; perm_row != shuffling_matrix.rows(); ++perm_row) { - if (shuffling_matrix (perm_row, in_index)) - perm_matrix_mask[perm_row] = false; + Mfull_masked.resize (finite_count, num_factors()); + y_masked.resize (finite_count); + perm_matrix_mask.clear (true); + size_t out_index = 0; + for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { + if (element_mask[in_index]) { + Mfull_masked.block (out_index, 0, 1, M.cols()) = M.row (in_index); + Mfull_masked.block (out_index, M.cols(), 1, extra_data.cols()) = extra_data.row (in_index); + y_masked[out_index++] = y (in_index, ie); + } else { + // Any row in the permutation matrix that contains a non-zero entry + // in the column corresponding to in_row needs to be removed + // from the permutation matrix + for (ssize_t perm_row = 0; perm_row != shuffling_matrix.rows(); ++perm_row) { + if (shuffling_matrix (perm_row, in_index)) + perm_matrix_mask[perm_row] = false; + } } } + assert (out_index == finite_count); + assert (perm_matrix_mask.count() == finite_count); + assert (y_masked.allFinite()); + // Only after we've reduced the design matrix do we now reduce the shuffling matrix + // Step 1: Remove rows that contain non-zero entries in columns to be removed + matrix_type temp (finite_count, num_subjects()); + out_index = 0; + for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { + if (perm_matrix_mask[in_index]) + temp.row (out_index++) = shuffling_matrix.row (in_index); + } + assert (out_index == finite_count); + // Step 2: Remove columns + shuffling_matrix_masked.resize (finite_count, finite_count); + out_index = 0; + for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { + if (element_mask[in_index]) + shuffling_matrix_masked.col (out_index++) = temp.col (in_index); + } + assert (out_index == finite_count); } - assert (out_index == finite_count); - assert (perm_matrix_mask.count() == finite_count); - // Only after we've reduced the design matrix do we now reduce the shuffling matrix - // Step 1: Remove rows that contain non-zero entries in columns to be removed - matrix_type temp (finite_count, num_subjects()); - out_index = 0; - for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { - if (perm_matrix_mask[in_index]) - temp.row (out_index++) = shuffling_matrix.row (in_index); - } - assert (out_index == finite_count); - // Step 2: Remove columns - shuffling_matrix_masked.resize (finite_count, finite_count); - out_index = 0; - for (size_t in_index = 0; in_index != num_subjects(); ++in_index) { - if (element_mask[in_index]) - shuffling_matrix_masked.col (out_index++) = temp.col (in_index); - } - assert (out_index == finite_count); - } - assert (Mfull_masked.allFinite()); - - pinvMfull_masked = Math::pinv (Mfull_masked); - - Rm.noalias() = matrix_type::Identity (finite_count, finite_count) - (Mfull_masked*pinvMfull_masked); - - // We now have our permutation (shuffling) matrix and design matrix prepared, - // and can commence regressing the partitioned model of each contrast - for (size_t ic = 0; ic != c.size(); ++ic) { - - const auto partition = c[ic].partition (Mfull_masked); - XtX.noalias() = partition.X.transpose()*partition.X; - - // Now that we have the individual contrast model partition for these data, - // the rest of this function should proceed similarly to the fixed - // design matrix case - //VAR (shuffling_matrix_masked.rows()); - //VAR (shuffling_matrix_masked.cols()); - //VAR (partition.Rz.rows()); - //VAR (partition.Rz.cols()); - //VAR (y_masked.rows()); - //VAR (y_masked.cols()); - Sy = shuffling_matrix_masked * partition.Rz * y_masked.matrix(); - lambda = pinvMfull_masked * Sy.matrix(); - beta.noalias() = c[ic].matrix() * lambda.matrix(); - const default_type sse = (Rm*Sy.matrix()).squaredNorm(); - - const default_type F = ((beta.transpose() * XtX * beta) (0, 0) / c[ic].rank()) / - (sse / value_type (finite_count - partition.rank_x - partition.rank_z)); + assert (Mfull_masked.allFinite()); + + pinvMfull_masked = Math::pinv (Mfull_masked); + + Rm.noalias() = matrix_type::Identity (finite_count, finite_count) - (Mfull_masked*pinvMfull_masked); + + // We now have our permutation (shuffling) matrix and design matrix prepared, + // and can commence regressing the partitioned model of each contrast + for (size_t ic = 0; ic != c.size(); ++ic) { + + const auto partition = c[ic].partition (Mfull_masked); + XtX.noalias() = partition.X.transpose()*partition.X; + + // Now that we have the individual contrast model partition for these data, + // the rest of this function should proceed similarly to the fixed + // design matrix case + //VAR (shuffling_matrix_masked.rows()); + //VAR (shuffling_matrix_masked.cols()); + //VAR (partition.Rz.rows()); + //VAR (partition.Rz.cols()); + //VAR (y_masked.rows()); + //VAR (y_masked.cols()); + Sy = shuffling_matrix_masked * partition.Rz * y_masked.matrix(); + lambda = pinvMfull_masked * Sy.matrix(); + beta.noalias() = c[ic].matrix() * lambda.matrix(); + const default_type sse = (Rm*Sy.matrix()).squaredNorm(); + + const default_type F = ((beta.transpose() * XtX * beta) (0, 0) / c[ic].rank()) / + (sse / value_type (finite_count - partition.rank_x - partition.rank_z)); + + if (!std::isfinite (F)) { + output (ie, ic) = value_type(0); + } else if (c[ic].is_F()) { + output (ie, ic) = F; + } else { + assert (beta.rows() == 1); + output (ie, ic) = std::sqrt (F) * (beta.sum() > 0 ? 1.0 : -1.0); + } - if (!std::isfinite (F)) { - output (ie, ic) = value_type(0); - } else if (c[ic].is_F()) { - output (ie, ic) = F; - } else { - assert (beta.rows() == 1); - output (ie, ic) = std::sqrt (F) * (beta.sum() > 0 ? 1.0 : -1.0); - } + } // End looping over contrasts - } // End looping over contrasts + } // End checking for adequate number of remaining subjects after NaN removal } // End looping over elements } diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index f6d7c84f2c..42cc8e6fab 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -58,8 +58,8 @@ namespace MR return false; (*stats_calculator) (shuffle.data, stats); (*enhancer) (stats, enhanced_stats); - for (size_t c = 0; c != enhanced_stats.cols(); ++c) { - for (size_t i = 0; i < enhanced_stats.rows(); ++i) { + for (size_t c = 0; c != stats_calculator->num_outputs(); ++c) { + for (size_t i = 0; i != stats_calculator->num_elements(); ++i) { if (enhanced_stats(i, c) > 0.0) { enhanced_sum(i, c) += enhanced_stats(i, c); enhanced_count(i, c)++; @@ -146,8 +146,8 @@ namespace MR PreProcessor preprocessor (stats_calculator, enhancer, empirical_statistic, global_enhanced_count); Thread::run_queue (shuffler, Math::Stats::Shuffle(), Thread::multi (preprocessor)); } - for (ssize_t contrast = 0; contrast != stats_calculator->num_outputs(); ++contrast) { - for (ssize_t element = 0; element != stats_calculator->num_elements(); ++element) { + for (size_t contrast = 0; contrast != stats_calculator->num_outputs(); ++contrast) { + for (size_t element = 0; element != stats_calculator->num_elements(); ++element) { if (global_enhanced_count(element, contrast) > 0) empirical_statistic(element, contrast) /= static_cast (global_enhanced_count(element, contrast)); } From 69266c0e7913dd67ac1a224341c9429510f5c87a Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 27 Mar 2018 18:31:55 +1100 Subject: [PATCH 0124/1471] 5ttgen hsvs: Tweaks - Use file.delTemporary throughout (has not been added comprehensively; this will require additional testing). More use of variables to store file names, to reduce changes of coding errors. - Experimenting with use of other tissue segmentations to constrain contributions from each tissue, and techniques for combining tissue segmentations together (not currently a final solution; still have errors at the outer edges of the cerebellum partial volume image with the -template option). --- lib/mrtrix3/_5ttgen/hsvs.py | 273 +++++++++++++++++++++++++++++------- 1 file changed, 222 insertions(+), 51 deletions(-) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py index d31ec756b3..ef53adf4b6 100644 --- a/lib/mrtrix3/_5ttgen/hsvs.py +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -1,6 +1,4 @@ def initialise(base_parser, subparsers): - import argparse - from mrtrix3 import app parser = subparsers.add_parser('hsvs', author='Robert E. Smith (robert.smith@florey.edu.au)', synopsis='Generate a 5TT image based on Hybrid Surface and Volume Segmentation (HSVS), using FreeSurfer and FSL tools', parents=[base_parser]) # TODO Permit either FreeSurfer directory or T1 image as input parser.add_argument('input', help='The input FreeSurfer subject directory') @@ -30,8 +28,7 @@ def getInputs(): def execute(): import os, sys - from mrtrix3 import app, fsl, path, run - from distutils.spawn import find_executable + from mrtrix3 import app, file, fsl, path, run def checkFile(filepath): import os @@ -115,14 +112,17 @@ def checkDir(dirpath): progress.increment() # Since the hippocampal subfields segmentation can include some fine structures, reduce the extent of smoothing run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path + ' -smooth_spatial 5 -smooth_influence 5') + file.delTemporary(init_mesh_path) progress.increment() run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + outputname + '.mif') + file.delTemporary(smooth_mesh_path) progress.increment() progress.done() # If we're going to be running FIRST, then we don't want to run it on the hippocampi if have_first: sgm_first_map = { key:value for key, value in sgm_first_map.items() if value not in hipp_subfield_image_map.values() } else: + app.console('No FreeSurfer hippocampal subfields module output detected; ' + ('FIRST segmentation' if have_first else 'standard FreeSurfer segmentation') + ' will instead be used') hipp_subfield_image_map = { } @@ -131,11 +131,15 @@ def checkDir(dirpath): app.console('Running FSL FIRST to segment sub-cortical grey matter structures') run.command(first_cmd + ' -s ' + ','.join(sgm_first_map.keys()) + ' -i T1.nii -b -o first') fsl.checkFirst('first', sgm_first_map.keys()) - progress = app.progressBar('Mapping sub-cortical structures segmented by FIRST from surface to voxel representation', len(sgm_first_map)) + progress = app.progressBar('Mapping sub-cortical structures segmented by FIRST from surface to voxel representation', 2*len(sgm_first_map)) for key, value in sgm_first_map.items(): vtk_in_path = 'first-' + key + '_first.vtk' - run.command('meshconvert ' + vtk_in_path + ' first-' + key + '_transformed.vtk -transform first2real T1.nii') - run.command('mesh2voxel first-' + key + '_transformed.vtk ' + template_image + ' ' + value + '.mif') + vtk_converted_path = 'first-' + key + '_transformed.vtk' + run.command('meshconvert ' + vtk_in_path + ' ' + vtk_converted_path + ' -transform first2real T1.nii') + file.delTemporary(vtk_in_path) + progress.increment() + run.command('mesh2voxel ' + vtk_converted_path + ' ' + template_image + ' ' + value + '.mif') + file.delTemporary(vtk_converted_path) progress.increment() progress.done() @@ -200,6 +204,7 @@ def checkDir(dirpath): (253, 2, 'CC_Central'), (254, 2, 'CC_Mid_Anterior'), (255, 2, 'CC_Anterior') ] + # TODO Need to do something about anterior commissure @@ -207,14 +212,16 @@ def checkDir(dirpath): # FIXME There may be some minor mismatch between the WM and pial segments within the medial section # where no optimisation is performed, but vertices are simply defined in order to guarantee # closed surfaces. Ideally these should be removed from the CGM tissue. - progress = app.progressBar('Mapping FreeSurfer cortical reconstruction to partial volume images', 4) + progress = app.progressBar('Mapping FreeSurfer cortical reconstruction to partial volume images', 8) for hemi in [ 'lh', 'rh' ]: for basename in [ hemi+'.white', hemi+'.pial' ]: filepath = os.path.join(surf_dir, basename) checkFile(filepath) transformed_path = basename + '_realspace.obj' run.command('meshconvert ' + filepath + ' ' + transformed_path + ' -binary -transform fs2real ' + aparc_image) + progress.increment() run.command('mesh2voxel ' + transformed_path + ' ' + template_image + ' ' + basename + '.mif') + file.delTemporary(transformed_path) progress.increment() progress.done() @@ -232,22 +239,29 @@ def checkDir(dirpath): if 'Cerebellum' in name and have_fast: run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq ' + name + '.mif -datatype float32') else: - run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq - | mrmesh - -threshold 0.5 ' + name + '_init.obj') - run.command('meshfilter ' + name + '_init.obj smooth ' + name + '.obj') - run.command('mesh2voxel ' + name + '.obj ' + template_image + ' ' + name + '.mif') + init_mesh_path = name + '_init.vtk' + smoothed_mesh_path = name + '.vtk' + run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq - | mrmesh - -threshold 0.5 ' + init_mesh_path) + run.command('meshfilter ' + init_mesh_path + ' smooth ' + smoothed_mesh_path) + file.delTemporary(init_mesh_path) + run.command('mesh2voxel ' + smoothed_mesh_path + ' ' + template_image + ' ' + name + '.mif') + file.delTemporary(smoothed_mesh_path) progress.increment() progress.done() # Construct images with the partial volume of each tissue progress = app.progressBar('Combining segmentations of all structures corresponding to each tissue type', 5) for tissue in range(0,5): - image_list = [ n + '.mif' for (i, t, n) in structures if (t == tissue and not (have_fast and 'Cerebellum' in n)) ] + image_list = [ n + '.mif' for (i, t, n) in structures if t == tissue and not (have_fast and 'Cerebellum' in n) ] # For cortical GM and WM, need to also add the main cerebrum segments if tissue == 0: image_list.extend([ 'lh.pial.mif', 'rh.pial.mif' ]) elif tissue == 2: image_list.extend([ 'lh.white.mif', 'rh.white.mif' ]) run.command('mrmath ' + ' '.join(image_list) + ' sum - | mrcalc - 1.0 -min tissue' + str(tissue) + '_init.mif') + # TODO Update file.delTemporary() to support list input + for entry in image_list: + file.delTemporary(entry) progress.increment() progress.done() @@ -260,23 +274,30 @@ def checkDir(dirpath): # then run through mrmesh # Combine these images together using the appropriate logic in order to form the 5TT image - progress = app.progressBar('Combining tissue images', 9) + progress = app.progressBar('Modulating segmentation images based on other tissues', 9) tissue_images = [ 'tissue0.mif', 'tissue1.mif', 'tissue2.mif', 'tissue3.mif', 'tissue4.mif' ] run.function(os.rename, 'tissue4_init.mif', 'tissue4.mif') progress.increment() run.command('mrcalc tissue3_init.mif tissue3_init.mif ' + tissue_images[4] + ' -add 1.0 -sub 0.0 -max -sub ' + tissue_images[3]) + file.delTemporary('tissue3_init.mif') progress.increment() run.command('mrmath ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_34.mif') progress.increment() run.command('mrcalc tissue1_init.mif tissue1_init.mif tissuesum_34.mif -add 1.0 -sub 0.0 -max -sub ' + tissue_images[1]) + file.delTemporary('tissue1_init.mif') + file.delTemporary('tissuesum_34.mif') progress.increment() run.command('mrmath ' + tissue_images[1] + ' ' + ' '.join(tissue_images[3:5]) + ' sum tissuesum_134.mif') progress.increment() run.command('mrcalc tissue2_init.mif tissue2_init.mif tissuesum_134.mif -add 1.0 -sub 0.0 -max -sub ' + tissue_images[2]) + file.delTemporary('tissue2_init.mif') + file.delTemporary('tissuesum_134.mif') progress.increment() run.command('mrmath ' + ' '.join(tissue_images[1:5]) + ' sum tissuesum_1234.mif') progress.increment() run.command('mrcalc tissue0_init.mif tissue0_init.mif tissuesum_1234.mif -add 1.0 -sub 0.0 -max -sub ' + tissue_images[0]) + file.delTemporary('tissue0_init.mif') + file.delTemporary('tissuesum_1234.mif') progress.increment() tissue_sum_image = 'tissuesum_01234.mif' run.command('mrmath ' + ' '.join(tissue_images) + ' sum ' + tissue_sum_image) @@ -300,35 +321,51 @@ def checkDir(dirpath): # map these to the template image, run FIRST on a binary mask from this, then # re-combine this with the tissue maps from other sources based on the estimated PVF of # cerebellum meshes - + cerebellum_volume_image = 'Cerebellum_volume.mif' if app.args.template: # If this is the case, then we haven't yet performed any cerebellar segmentation / meshing # What we want to do is: for each hemisphere, combine all three "cerebellar" segments from FreeSurfer, # convert to a surface, map that surface to the template image progress = app.progressBar('Preparing images of cerebellum for intensity-based segmentation', 11) + cerebellar_hemisphere_pvf_images = [ ] for hemi in [ 'Left', 'Right' ]: - cerebellar_images = [ n + '.mif' for (i, t, n) in structures if (hemi in n and 'Cerebellum' in n) ] - run.command('mrmath ' + ' '.join(cerebellar_images) + ' sum ' + hemi + '-Cerebellum-All.mif') + cerebellar_images = [ n + '.mif' for (i, t, n) in structures if hemi in n and 'Cerebellum' in n ] + sum_image = hemi + '-Cerebellum-All.mif' + init_mesh_path = hemi + '-Cerebellum-All-Init.vtk' + smooth_mesh_path = hemi + '-Cerebellum-All-Smooth.vtk' + pvf_image_path = hemi + '-Cerebellum-PVF-Template.mif' + run.command('mrmath ' + ' '.join(cerebellar_images) + ' sum ' + sum_image) + # TODO Update with new file.delTemporary() + for entry in cerebellar_images: + file.delTemporary(entry) progress.increment() - run.command('mrmesh ' + hemi + '-Cerebellum-All.mif ' + hemi + '-Cerebellum-All-init.vtk') + run.command('mrmesh ' + sum_image + ' ' + init_mesh_path) + file.delTemporary(sum_image) progress.increment() - run.command('meshfilter ' + hemi + '-Cerebellum-All-init.vtk smooth ' + hemi + '-Cerebellum-All-smooth.vtk') + run.command('meshfilter ' + init_mesh_path + ' smooth ' + smooth_mesh_path) + file.delTemporary(init_mesh_path) progress.increment() - run.command('mesh2voxel ' + hemi + '-Cerebellum-All-smooth.vtk ' + template_image + ' ' + hemi + '-Cerebellum-PVF-Template.mif') + run.command('mesh2voxel ' + smooth_mesh_path + ' ' + template_image + ' ' + pvf_image_path) + file.delTemporary(smooth_mesh_path) + cerebellar_hemisphere_pvf_images.append(pvf_image_path) progress.increment() # Combine the two hemispheres together into: # - An image in preparation for running FAST # - A combined total partial volume fraction image that will be later used for tissue recombination - run.command('mrcalc Left-Cerebellum-PVF-Template.mif Right-Cerebellum-PVF-Template.mif -add 1.0 -min Cerebellum_weight.mif') + run.command('mrcalc ' + ' '.join(cerebellar_hemisphere_pvf_images) + ' -add 1.0 -min ' + cerebellum_volume_image) + # TODO Update with new file.delTemporary() + for entry in cerebellar_hemisphere_pvf_images: + file.delTemporary(entry) progress.increment() T1_cerebellum_mask_image = 'T1_cerebellum_mask.mif' - run.command('mrthreshold Cerebellum_weight.mif ' + T1_cerebellum_mask_image + ' -abs 1e-6') + run.command('mrthreshold ' + cerebellum_volume_image + ' ' + T1_cerebellum_mask_image + ' -abs 1e-6') progress.increment() run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | ' \ 'mrcalc - ' + T1_cerebellum_mask_image + ' -mult - | ' \ 'mrconvert - T1_cerebellum_precrop.mif') + file.delTemporary(T1_cerebellum_mask_image) progress.done() else: @@ -336,33 +373,48 @@ def checkDir(dirpath): progress = app.progressBar('Preparing images of cerebellum for intensity-based segmentation', 2) # Generate a mask of all voxels classified as cerebellum by FreeSurfer cerebellum_mask_images = [ n + '.mif' for (i, t, n) in structures if 'Cerebellum' in n ] - run.command('mrmath ' + ' '.join(cerebellum_mask_images) + ' sum Cerebellum_weight.mif') + run.command('mrmath ' + ' '.join(cerebellum_mask_images) + ' sum ' + cerebellum_volume_image) progress.increment() # FAST image input needs to be pre-masked - T1_cerebellum_mask_image = 'Cerebellum_weight.mif' - run.command('mrcalc T1.nii ' + T1_cerebellum_mask_image + ' -mult - | mrconvert - T1_cerebellum_precrop.mif -stride -1,+2,+3') + T1_cerebellum_mask_image = cerebellum_volume_image + run.command('mrcalc T1.nii ' + T1_cerebellum_mask_image + ' -mult - | mrconvert - T1_cerebellum_precrop.mif') progress.done() - # TODO Any code below here should be compatible with Cerebellum_weight.mif containing partial volume fractions + # TODO Any code below here should be compatible with cerebellum_volume_image.mif containing partial volume fractions # (in the case of no explicit template image, it's a mask, but the logic still applies) app.console('Running FSL fast to segment the cerebellum based on intensity information') + # TODO It's possible that FAST may have some edge-effects... + # Consider including to the input to FAST a dilated cerebellum mask, but then only actually using + # the values within image 'cerebellum_volume_image' + # No, looks like this isn't the issue; the issue is the recombination further down below. + # Run FSL FAST just within the cerebellum # TESTME Should bias field estimation be disabled within fast? # FAST memory usage can also be huge when using a high-resolution template image: # Crop T1 image around the cerebellum before feeding to FAST, then re-sample to full template image FoV - run.command('mrcrop T1_cerebellum_precrop.mif -mask ' + T1_cerebellum_mask_image + ' T1_cerebellum.nii') - run.command(fast_cmd + ' -N T1_cerebellum.nii') - run.command('mrtransform T1_cerebellum_pve_0' + fast_suffix + ' -interp nearest -template ' + template_image + ' FAST_CSF.mif') - run.command('mrtransform T1_cerebellum_pve_1' + fast_suffix + ' -interp nearest -template ' + template_image + ' FAST_GM.mif') - run.command('mrtransform T1_cerebellum_pve_2' + fast_suffix + ' -interp nearest -template ' + template_image + ' FAST_WM.mif') + fast_input_image = 'T1_cerebellum.nii' + run.command('mrcrop T1_cerebellum_precrop.mif -mask ' + T1_cerebellum_mask_image + ' ' + fast_input_image) + run.command(fast_cmd + ' -N ' + fast_input_image) + file.delTemporary(fast_input_image) + + progress = app.progressBar('Introducing intensity-based cerebellar segmentation into the 5TT image', 10) + + fast_output_prefix = os.path.splitext(fast_input_image)[0] + '_pve_' + fast_outputs_cropped = [ fast_output_prefix + str(n) + fast_suffix for n in range(0,3) ] + fast_outputs_template = [ 'FAST_' + str(n) + '.mif' for n in range(0,3) ] + for inpath, outpath in zip(fast_outputs_cropped, fast_outputs_template): + run.command('mrtransform ' + inpath + ' -interp nearest -template ' + template_image + ' ' + outpath) + file.delTemporary(outpath) + progress.increment() + if app.args.template: + file.delTemporary(template_image) # Generate the revised tissue images, using output from FAST inside the cerebellum and # output from previous processing everywhere else # Note that the middle intensity (grey matter) in the FAST output here gets assigned # to the sub-cortical grey matter component - progress = app.progressBar('Introducing intensity-based cerebellar segmentation into the 5TT image', 5) # Some of these voxels may have a non-zero cortical GM component. # In that case, let's find a multiplier to apply to all tissues (including CGM) such that the sum is 1.0 @@ -370,23 +422,68 @@ def checkDir(dirpath): # Cerebellum_weight.mif contains floating-point values? # TODO Can probably also change if the cerebellar segments are not included in prior computations: # no longer considering just the CGM fraction - new_tissue_images = [ 'tissue0_fast.mif', 'tissue1_fast.mif', 'tissue2_fast.mif', 'tissue3_fast.mif', 'tissue4.mif' ] + # TODO This definitely needs to be revised: Sharp CSF strip between cerebellum and brain stem with -template + new_tissue_images = [ 'tissue0_fast.mif', 'tissue1_fast.mif', 'tissue2_fast.mif', 'tissue3_fast.mif', 'tissue4_fast.mif' ] new_tissue_sum_image = 'tissuesum_01234_fast.mif' - run.command('mrcalc 1.0 ' + tissue_images[0] + ' 1.0 -add -div Cerebellum_weight.mif -mult Cerebellar_multiplier.mif') + cerebellum_multiplier_image = 'Cerebellar_multiplier.mif' + + # TESTME Using tissue_sum_image rather than only CGM image + # TESTME Re-work combination equation: + # Cerebellum contributions will be weighted by the cerebellum weight image + # The sum of the non-cerebellum tissues, and (the cerebellum tissues multiplied by the cerebellum weight image), + # should result in a total tissue sum that does not exceed 1.0 + # Therefore, calculate the multiplier that need to be applied in order to make the tissue sum 1.0, but + # don't let that multiplier exceed 1.0 + # Need to deal with potential for NaNs + # Should the non-cerebellar volumes *all* preserve their initial weighting, and multiplication should only be + # applied to the cerebellar volumes? I think so... + # TESTME Fix for NaNs + # If nonzero then (eq) else 0.0 + run.command('mrcalc ' + cerebellum_volume_image + ' 0.0 -gt 1.0 ' + tissue_sum_image + ' -sub ' + cerebellum_volume_image + ' -div ' + cerebellum_volume_image + ' -min 0.0 -if ' + cerebellum_multiplier_image) + file.delTemporary(cerebellum_volume_image) + progress.increment() + run.command('mrconvert ' + tissue_images[0] + ' ' + new_tissue_images[0]) + file.delTemporary(tissue_images[0]) progress.increment() - run.command('mrcalc Cerebellum_weight.mif Cerebellar_multiplier.mif 1.0 -if ' + tissue_images[0] + ' -mult ' + new_tissue_images[0]) + run.command('mrcalc ' + tissue_images[1] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[1] + ' -mult -add ' + new_tissue_images[1]) + file.delTemporary(tissue_images[1]) progress.increment() - run.command('mrcalc Cerebellum_weight.mif FAST_CSF.mif Cerebellar_multiplier.mif -mult ' + tissue_images[3] + ' -if ' + new_tissue_images[3]) + run.command('mrcalc ' + tissue_images[2] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[2] + ' -mult -add ' + new_tissue_images[2]) + file.delTemporary(tissue_images[2]) progress.increment() - run.command('mrcalc Cerebellum_weight.mif FAST_GM.mif Cerebellar_multiplier.mif -mult ' + tissue_images[1] + ' -if ' + new_tissue_images[1]) + run.command('mrcalc ' + tissue_images[3] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[0] + ' -mult -add ' + new_tissue_images[3]) + file.delTemporary(tissue_images[3]) + file.delTemporary(cerebellum_multiplier_image) progress.increment() - run.command('mrcalc Cerebellum_weight.mif FAST_WM.mif Cerebellar_multiplier.mif -mult ' + tissue_images[2] + ' -if ' + new_tissue_images[2]) + run.command('mrconvert ' + tissue_images[4] + ' ' + new_tissue_images[4]) + file.delTemporary(tissue_images[4]) progress.increment() run.command('mrmath ' + ' '.join(new_tissue_images) + ' sum ' + new_tissue_sum_image) + file.delTemporary(tissue_sum_image) progress.done() tissue_images = new_tissue_images tissue_sum_image = new_tissue_sum_image + #run.command('mrcalc 1.0 ' + tissue_sum_image + ' 1.0 -add -div ' + cerebellum_volume_image + ' -mult ' + cerebellum_multiplier_image) + #progress.increment() + #run.command('mrcalc ' + cerebellum_volume_image + ' ' + cerebellum_multiplier_image + ' 1.0 -if ' + tissue_images[0] + ' -mult ' + new_tissue_images[0]) + #progress.increment() + #run.command('mrcalc ' + cerebellum_volume_image + ' ' + fast_outputs_template[0] + ' ' + cerebellum_multiplier_image + ' -mult ' + tissue_images[3] + ' -if ' + new_tissue_images[3]) + #file.delTemporary(fast_outputs_template[0]) + #progress.increment() + #run.command('mrcalc ' + cerebellum_volume_image + ' ' + fast_outputs_template[1] + ' ' + cerebellum_multiplier_image + ' -mult ' + tissue_images[1] + ' -if ' + new_tissue_images[1]) + #file.delTemporary(fast_outputs_template[1]) + #progress.increment() + #run.command('mrcalc ' + cerebellum_volume_image + ' ' + fast_outputs_template[2] + ' ' + cerebellum_multiplier_image + ' -mult ' + tissue_images[2] + ' -if ' + new_tissue_images[2]) + #file.delTemporary(fast_outputs_template[2]) + #file.delTemporary(cerebellum_multiplier_image) + #progress.increment() + #run.command('mrmath ' + ' '.join(new_tissue_images) + ' sum ' + new_tissue_sum_image) + #file.delTemporary(tissue_sum_image) + #progress.done() + #tissue_images = new_tissue_images + #tissue_sum_image = new_tissue_sum_image + # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 # TESTME Rather than doing this blindly, look for holes in the brain, and assign the remainder to WM; @@ -395,36 +492,110 @@ def checkDir(dirpath): # TESTME Should the below occur after FAST? - progress = app.progressBar('Performing fill operations to preserve unity tissue volume', 5) + progress = app.progressBar('Performing fill operations to preserve unity tissue volume', 2) # TODO Connected-component analysis at high template image resolution is taking up huge amounts of memory # Crop beforehand? It's because it's filling everything outside the brain... - new_tissue_images = [ tissue_images[0], tissue_images[1], 'tissue2_filled.mif', 'tissue3_filled.mif', tissue_images[4] ] - run.command('mrthreshold ' + tissue_sum_image + ' -abs 0.5 - | maskfilter - erode - | mrcalc 1.0 - -sub - | maskfilter - connect -largest - | mrcalc 1.0 - -sub wm_fill_mask.mif') - progress.increment() - run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub wm_fill_mask.mif -mult wm_fill.mif') - progress.increment() - run.command('mrcalc ' + tissue_images[2] + ' wm_fill.mif -add ' + new_tissue_images[2]) - progress.increment() - run.command('mrcalc 1.0 ' + tissue_sum_image + ' wm_fill.mif -add -sub ' + mask_image + ' 0.0 -gt -mult csf_fill.mif') + # TODO Consider performing CSF fill before WM fill? Does this solve any problems? + # Well, it might mean I can use connected-components to select the CSF fill, rather than using an erosion + # This may also reduce the RAM usage of the connected-components analysis, since it'd be within + # the brain mask rather than using the whole image FoV. + # Also: Potentially use mask cleaning filter + # FIXME Appears we can't rely on a connected-component filter: Still some bits e.g. between cortex & cerebellum + # Maybe need to look into techniques for specifically filling in between structure pairs + + + # TeSTME Don't even attempt to split between WM and CSF fill + # Or, at least, need to find a better solution for separating the two + new_tissue_images = [ tissue_images[0], + tissue_images[1], + tissue_images[2], + os.path.splitext(tissue_images[3])[0] + '_filled.mif', + tissue_images[4] ] + csf_fill_image = 'csf_fill.mif' + run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + mask_image + ' -mult ' + csf_fill_image) + file.delTemporary(mask_image) progress.increment() - run.command('mrcalc ' + tissue_images[3] + ' csf_fill.mif -add ' + new_tissue_images[3]) + run.command('mrcalc ' + tissue_images[3] + ' ' + csf_fill_image + ' -add ' + new_tissue_images[3]) + file.delTemporary(csf_fill_image) + file.delTemporary(tissue_images[3]) progress.done() tissue_images = new_tissue_images + + # new_tissue_images = [ tissue_images[0], + # tissue_images[1], + # os.path.splitext(tissue_images[2])[0] + '_filled.mif', + # os.path.splitext(tissue_images[3])[0] + '_filled.mif', + # tissue_images[4] ] + # nonunity_mask_image = 'nonunity_voxels.mif' + # csf_fill_mask_image = 'csf_fill_mask.mif' + # csf_fill_image = 'csf_fill.mif' + # wm_fill_image = 'wm_fill.mif' + # run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + mask_image + ' -mult 1e-5 -gt ' + nonunity_mask_image) + # progress.increment() + # # Cleaning filter is slow, & results in weirdness + # run.command('maskfilter ' + nonunity_mask_image + ' connect -largest ' + csf_fill_mask_image) + # progress.increment() + # run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + csf_fill_mask_image + ' -mult ' + csf_fill_image) + # progress.increment() + # run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + nonunity_mask_image + ' ' + csf_fill_mask_image + ' -sub -mult ' + wm_fill_image) + # file.delTemporary(nonunity_mask_image) + # file.delTemporary(csf_fill_mask_image) + # progress.increment() + # run.command('mrcalc ' + tissue_images[2] + ' ' + wm_fill_image + ' -add ' + new_tissue_images[2]) + # file.delTemporary(tissue_images[2]) + # file.delTemporary(wm_fill_image) + # progress.increment() + # run.command('mrcalc ' + tissue_images[3] + ' ' + csf_fill_image + ' -add ' + new_tissue_images[3]) + # file.delTemporary(tissue_images[3]) + # file.delTemporary(csf_fill_image) + # progress.done() + # tissue_images = new_tissue_images + + + #wm_fill_mask_image = 'wm_fill_mask.mif' + #wm_fill_image = 'wm_fill.mif' + #csf_fill_image = 'csf_fill.mif' + #run.command('mrthreshold ' + tissue_sum_image + ' -abs 0.5 - | maskfilter - erode - | mrcalc 1.0 - -sub - | maskfilter - connect -largest - | mrcalc 1.0 - -sub ' + wm_fill_mask_image) + #progress.increment() + #run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + wm_fill_mask_image + ' -mult ' + wm_fill_image) + #file.delTemporary(wm_fill_mask_image) + #progress.increment() + #run.command('mrcalc ' + tissue_images[2] + ' ' + wm_fill_image + ' -add ' + new_tissue_images[2]) + #file.delTemporary(tissue_images[2]) + #progress.increment() + #run.command('mrcalc 1.0 ' + tissue_sum_image + ' ' + wm_fill_image + ' -add -sub ' + mask_image + ' 0.0 -gt -mult ' + csf_fill_image) + #file.delTemporary(tissue_sum_image) + #file.delTemporary(wm_fill_image) + #progress.increment() + #run.command('mrcalc ' + tissue_images[3] + ' ' + csf_fill_image + ' -add ' + new_tissue_images[3]) + #file.delTemporary(tissue_images[3]) + #file.delTemporary(csf_fill_image) + #progress.done() + #tissue_images = new_tissue_images + + # Finally, concatenate the volumes to produce the 5TT image - run.command('mrcat ' + ' '.join(tissue_images) + ' 5TT.mif -axis 3') + precrop_result_image = '5TT.mif' + run.command('mrcat ' + ' '.join(tissue_images) + ' ' + precrop_result_image + ' -axis 3') + # TODO Use new file.delTemporary() + for entry in tissue_images: + file.delTemporary(entry) # Maybe don't go off all tissues here, since FreeSurfer's mask can be fairly liberal; # instead get just a voxel clearance from all other tissue types (maybe two) if app.args.nocrop: - run.function(os.rename, '5TT.mif', 'result.mif') + run.function(os.rename, precrop_result_image, 'result.mif') else: app.console('Cropping final 5TT image') - run.command('mrconvert 5TT.mif -coord 3 0,1,2,4 - | mrmath - sum - -axis 3 | mrthreshold - - -abs 0.001 | maskfilter - dilate crop_mask.mif') - run.command('mrcrop 5TT.mif result.mif -mask crop_mask.mif') + crop_mask_image = 'crop_mask.mif' + run.command('mrconvert ' + precrop_result_image + ' -coord 3 0,1,2,4 - | mrmath - sum - -axis 3 | mrthreshold - - -abs 0.001 | maskfilter - dilate ' + crop_mask_image) + run.command('mrcrop ' + precrop_result_image + ' result.mif -mask ' + crop_mask_image) + file.delTemporary(crop_mask_image) + file.delTemporary(precrop_result_image) From 1f693211787beec1ecfbce66dd164b8a5f38f36e Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 6 Apr 2018 17:07:04 +1000 Subject: [PATCH 0125/1471] GLM: Warn on design matrix with high condition number --- core/math/stats/glm.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 4ffbbbe93a..78f8bcec84 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -207,6 +207,11 @@ namespace MR // Can no longer apply this assertion here; GLMTTestVariable later // expands the number of columns in M //assert (c.cols() == M.cols()); + auto v = Eigen::JacobiSVD (design).singularValues(); + auto cond = v[0] / v[v.size()-1]; + if (cond > 10.0) { + WARN ("Design matrix may contain collinear factors (condition number = " + str(cond) + "); recommend double-checking derivation of design matrix"); + } } /*! Compute the statistics From ff2a88f1199e3667a7bbd6bf0c5074a3c7d7a982 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 6 Apr 2018 17:08:53 +1000 Subject: [PATCH 0126/1471] 5ttgen hsvs: Code comments cleanup --- lib/mrtrix3/_5ttgen/hsvs.py | 129 +++++++----------------------------- 1 file changed, 25 insertions(+), 104 deletions(-) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py index ef53adf4b6..1b1065d0f0 100644 --- a/lib/mrtrix3/_5ttgen/hsvs.py +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -21,6 +21,7 @@ def checkOutputPaths(): def getInputs(): from mrtrix3 import app, path, run # FreeSurfer files will be accessed in-place; no need to pre-convert them into the temporary directory + # TODO Pre-convert aparc image so that it doesn't have to be repeatedly uncompressed if app.args.template: run.command('mrconvert ' + path.fromUser(app.args.template, True) + ' ' + path.toTemp('template.mif', True) + ' -axes 0,1,2') @@ -266,7 +267,6 @@ def checkDir(dirpath): progress.done() - # TODO Need to fill in any potential gaps in the WM image in the centre of the brain # This can hopefully be done with a connected-component analysis: Take just the WM image, and # fill in any gaps (i.e. select the inverse, select the largest connected component, invert again) # Make sure that floating-point values are handled appropriately @@ -306,8 +306,6 @@ def checkDir(dirpath): - - # Branch depending on whether or not FSL fast will be used to re-segment the cerebellum if have_fast: @@ -380,16 +378,11 @@ def checkDir(dirpath): run.command('mrcalc T1.nii ' + T1_cerebellum_mask_image + ' -mult - | mrconvert - T1_cerebellum_precrop.mif') progress.done() - # TODO Any code below here should be compatible with cerebellum_volume_image.mif containing partial volume fractions + # Any code below here should be compatible with cerebellum_volume_image.mif containing partial volume fractions # (in the case of no explicit template image, it's a mask, but the logic still applies) app.console('Running FSL fast to segment the cerebellum based on intensity information') - # TODO It's possible that FAST may have some edge-effects... - # Consider including to the input to FAST a dilated cerebellum mask, but then only actually using - # the values within image 'cerebellum_volume_image' - # No, looks like this isn't the issue; the issue is the recombination further down below. - # Run FSL FAST just within the cerebellum # TESTME Should bias field estimation be disabled within fast? # FAST memory usage can also be huge when using a high-resolution template image: @@ -416,29 +409,11 @@ def checkDir(dirpath): # Note that the middle intensity (grey matter) in the FAST output here gets assigned # to the sub-cortical grey matter component - # Some of these voxels may have a non-zero cortical GM component. - # In that case, let's find a multiplier to apply to all tissues (including CGM) such that the sum is 1.0 - # TESTME Does this needs to change for the case of a provided template image, in which case - # Cerebellum_weight.mif contains floating-point values? - # TODO Can probably also change if the cerebellar segments are not included in prior computations: - # no longer considering just the CGM fraction - # TODO This definitely needs to be revised: Sharp CSF strip between cerebellum and brain stem with -template + # Some of these voxels may have existing non-zero tissue components. + # In that case, let's find a multiplier to apply to cerebellum tissues such that the sum does not exceed 1.0 new_tissue_images = [ 'tissue0_fast.mif', 'tissue1_fast.mif', 'tissue2_fast.mif', 'tissue3_fast.mif', 'tissue4_fast.mif' ] new_tissue_sum_image = 'tissuesum_01234_fast.mif' cerebellum_multiplier_image = 'Cerebellar_multiplier.mif' - - # TESTME Using tissue_sum_image rather than only CGM image - # TESTME Re-work combination equation: - # Cerebellum contributions will be weighted by the cerebellum weight image - # The sum of the non-cerebellum tissues, and (the cerebellum tissues multiplied by the cerebellum weight image), - # should result in a total tissue sum that does not exceed 1.0 - # Therefore, calculate the multiplier that need to be applied in order to make the tissue sum 1.0, but - # don't let that multiplier exceed 1.0 - # Need to deal with potential for NaNs - # Should the non-cerebellar volumes *all* preserve their initial weighting, and multiplication should only be - # applied to the cerebellar volumes? I think so... - # TESTME Fix for NaNs - # If nonzero then (eq) else 0.0 run.command('mrcalc ' + cerebellum_volume_image + ' 0.0 -gt 1.0 ' + tissue_sum_image + ' -sub ' + cerebellum_volume_image + ' -div ' + cerebellum_volume_image + ' -min 0.0 -if ' + cerebellum_multiplier_image) file.delTemporary(cerebellum_volume_image) progress.increment() @@ -464,25 +439,6 @@ def checkDir(dirpath): tissue_images = new_tissue_images tissue_sum_image = new_tissue_sum_image - #run.command('mrcalc 1.0 ' + tissue_sum_image + ' 1.0 -add -div ' + cerebellum_volume_image + ' -mult ' + cerebellum_multiplier_image) - #progress.increment() - #run.command('mrcalc ' + cerebellum_volume_image + ' ' + cerebellum_multiplier_image + ' 1.0 -if ' + tissue_images[0] + ' -mult ' + new_tissue_images[0]) - #progress.increment() - #run.command('mrcalc ' + cerebellum_volume_image + ' ' + fast_outputs_template[0] + ' ' + cerebellum_multiplier_image + ' -mult ' + tissue_images[3] + ' -if ' + new_tissue_images[3]) - #file.delTemporary(fast_outputs_template[0]) - #progress.increment() - #run.command('mrcalc ' + cerebellum_volume_image + ' ' + fast_outputs_template[1] + ' ' + cerebellum_multiplier_image + ' -mult ' + tissue_images[1] + ' -if ' + new_tissue_images[1]) - #file.delTemporary(fast_outputs_template[1]) - #progress.increment() - #run.command('mrcalc ' + cerebellum_volume_image + ' ' + fast_outputs_template[2] + ' ' + cerebellum_multiplier_image + ' -mult ' + tissue_images[2] + ' -if ' + new_tissue_images[2]) - #file.delTemporary(fast_outputs_template[2]) - #file.delTemporary(cerebellum_multiplier_image) - #progress.increment() - #run.command('mrmath ' + ' '.join(new_tissue_images) + ' sum ' + new_tissue_sum_image) - #file.delTemporary(tissue_sum_image) - #progress.done() - #tissue_images = new_tissue_images - #tissue_sum_image = new_tissue_sum_image # For all voxels within FreeSurfer's brain mask, add to the CSF image in order to make the sum 1.0 @@ -490,8 +446,6 @@ def checkDir(dirpath): # only within the mask but outside the brain should the CSF fraction be filled # TODO Can definitely do better than just an erosion step here; still some hyper-intensities at GM-WW interface - - # TESTME Should the below occur after FAST? progress = app.progressBar('Performing fill operations to preserve unity tissue volume', 2) # TODO Connected-component analysis at high template image resolution is taking up huge amounts of memory # Crop beforehand? It's because it's filling everything outside the brain... @@ -504,8 +458,6 @@ def checkDir(dirpath): # Maybe need to look into techniques for specifically filling in between structure pairs - # TeSTME Don't even attempt to split between WM and CSF fill - # Or, at least, need to find a better solution for separating the two new_tissue_images = [ tissue_images[0], tissue_images[1], tissue_images[2], @@ -523,57 +475,25 @@ def checkDir(dirpath): - # new_tissue_images = [ tissue_images[0], - # tissue_images[1], - # os.path.splitext(tissue_images[2])[0] + '_filled.mif', - # os.path.splitext(tissue_images[3])[0] + '_filled.mif', - # tissue_images[4] ] - # nonunity_mask_image = 'nonunity_voxels.mif' - # csf_fill_mask_image = 'csf_fill_mask.mif' - # csf_fill_image = 'csf_fill.mif' - # wm_fill_image = 'wm_fill.mif' - # run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + mask_image + ' -mult 1e-5 -gt ' + nonunity_mask_image) - # progress.increment() - # # Cleaning filter is slow, & results in weirdness - # run.command('maskfilter ' + nonunity_mask_image + ' connect -largest ' + csf_fill_mask_image) - # progress.increment() - # run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + csf_fill_mask_image + ' -mult ' + csf_fill_image) - # progress.increment() - # run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + nonunity_mask_image + ' ' + csf_fill_mask_image + ' -sub -mult ' + wm_fill_image) - # file.delTemporary(nonunity_mask_image) - # file.delTemporary(csf_fill_mask_image) - # progress.increment() - # run.command('mrcalc ' + tissue_images[2] + ' ' + wm_fill_image + ' -add ' + new_tissue_images[2]) - # file.delTemporary(tissue_images[2]) - # file.delTemporary(wm_fill_image) - # progress.increment() - # run.command('mrcalc ' + tissue_images[3] + ' ' + csf_fill_image + ' -add ' + new_tissue_images[3]) - # file.delTemporary(tissue_images[3]) - # file.delTemporary(csf_fill_image) - # progress.done() - # tissue_images = new_tissue_images - - - #wm_fill_mask_image = 'wm_fill_mask.mif' - #wm_fill_image = 'wm_fill.mif' - #csf_fill_image = 'csf_fill.mif' - #run.command('mrthreshold ' + tissue_sum_image + ' -abs 0.5 - | maskfilter - erode - | mrcalc 1.0 - -sub - | maskfilter - connect -largest - | mrcalc 1.0 - -sub ' + wm_fill_mask_image) - #progress.increment() - #run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + wm_fill_mask_image + ' -mult ' + wm_fill_image) - #file.delTemporary(wm_fill_mask_image) - #progress.increment() - #run.command('mrcalc ' + tissue_images[2] + ' ' + wm_fill_image + ' -add ' + new_tissue_images[2]) - #file.delTemporary(tissue_images[2]) - #progress.increment() - #run.command('mrcalc 1.0 ' + tissue_sum_image + ' ' + wm_fill_image + ' -add -sub ' + mask_image + ' 0.0 -gt -mult ' + csf_fill_image) - #file.delTemporary(tissue_sum_image) - #file.delTemporary(wm_fill_image) - #progress.increment() - #run.command('mrcalc ' + tissue_images[3] + ' ' + csf_fill_image + ' -add ' + new_tissue_images[3]) - #file.delTemporary(tissue_images[3]) - #file.delTemporary(csf_fill_image) - #progress.done() - #tissue_images = new_tissue_images + # TODO Make attempt at setting non-brain voxels at bottom of brain stem + # Note that this needs to be compatible with -template option + # - Generate gradient image of norm.mgz - should be bright at CSF edges, less so at arbitrary cuts through the WM + # Note: Hopefully keeping directionality information + # - Generate gradient image of brain stem partial volume image + # Note: If using -template option, this will likely need to be re-generated in native space + # (Actually, may have been erased by file.delTemporary() earlier...) + # - Get (absolute) inner product of two gradient images - should be bright where brain stem segmentation and + # gradients in T1 intensity colocalise, dark where either is absent + # Actually this needs to be more carefully considered: Ideally want something that appears bright at the + # bottom edge of the brain stem. E.g. min(abs(Gradient of brain stem PVF +- gradient of T1 image)) + # Will need some kind of appropriate scaling of T1 gradient image in order to enable an addition / subtraction + # Does norm.mgz get a standardised intensity range? + # - Perform automatic threshold & connected-component analysis + # Perhaps prior to this point, could set the image FoV based on the brain stem segmentation, then + # retain only the lower half of the FoV, such that the largest connected component is the bottom part + # - Maybe dilate this a little bit + # - Multiply all tissues by 0 in these voxels (may require conversion back to template image) + # Finally, concatenate the volumes to produce the 5TT image @@ -584,7 +504,6 @@ def checkDir(dirpath): file.delTemporary(entry) - # Maybe don't go off all tissues here, since FreeSurfer's mask can be fairly liberal; # instead get just a voxel clearance from all other tissue types (maybe two) if app.args.nocrop: @@ -599,6 +518,8 @@ def checkDir(dirpath): + + app.warn('Script algorithm is not yet capable of performing requisite image modifications in order to ' 'permit streamlines travelling from the brain stem down the spinal column. Recommend using ' '5ttedit -none, in conjunction with a manually-drawn ROI labelling the bottom part of the ' From 2303475de683fb2c44048d66bce218d96e507be1 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 11 Apr 2018 18:05:14 +1000 Subject: [PATCH 0127/1471] mrtrix3.file.delTemporary: Compatibility with list input --- lib/mrtrix3/file.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/lib/mrtrix3/file.py b/lib/mrtrix3/file.py index c780546527..e786bdf9b6 100644 --- a/lib/mrtrix3/file.py +++ b/lib/mrtrix3/file.py @@ -11,6 +11,24 @@ def delTemporary(path): #pylint: disable=unused-variable from mrtrix3 import app if not app.cleanup: return + if isinstance(path, list): + if len(path) == 1: + delTemporary(path[0]) + return + if app.verbosity > 2: + app.console('Deleting ' + str(len(path)) + ' temporary items: ' + str(path)) + for entry in path: + if os.path.isfile(entry): + func = os.remove + elif os.path.isdir(entry): + func = shutil.rmtree + else: + continue + try: + func(entry) + except OSError: + pass + return if os.path.isfile(path): temporary_type = 'file' func = os.remove From 2295270939cf58946c5911d8c63ab77339d9f38e Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 11 Apr 2018 18:09:42 +1000 Subject: [PATCH 0128/1471] 5ttgen hsvs: Various updates - Better cleanup of temporary files during script execution. - Prevent deletion of brain mask image in FreeSurfer directory. - Fix accidental deletion of FAST output files after transformation, which prevented script from executing unless -nocleanup was specified. - --- lib/mrtrix3/_5ttgen/hsvs.py | 44 ++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py index 1b1065d0f0..36e3b46119 100644 --- a/lib/mrtrix3/_5ttgen/hsvs.py +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -28,7 +28,7 @@ def getInputs(): def execute(): - import os, sys + import glob, os, sys from mrtrix3 import app, file, fsl, path, run def checkFile(filepath): @@ -132,6 +132,7 @@ def checkDir(dirpath): app.console('Running FSL FIRST to segment sub-cortical grey matter structures') run.command(first_cmd + ' -s ' + ','.join(sgm_first_map.keys()) + ' -i T1.nii -b -o first') fsl.checkFirst('first', sgm_first_map.keys()) + file.delTemporary(glob.glob('T1_to_std_sub.*')) progress = app.progressBar('Mapping sub-cortical structures segmented by FIRST from surface to voxel representation', 2*len(sgm_first_map)) for key, value in sgm_first_map.items(): vtk_in_path = 'first-' + key + '_first.vtk' @@ -142,6 +143,9 @@ def checkDir(dirpath): run.command('mesh2voxel ' + vtk_converted_path + ' ' + template_image + ' ' + value + '.mif') file.delTemporary(vtk_converted_path) progress.increment() + if not have_fast: + file.delTemporary('T1.nii') + file.delTemporary(glob.glob('first*')) progress.done() @@ -363,7 +367,6 @@ def checkDir(dirpath): run.command('mrtransform ' + norm_image + ' -template ' + template_image + ' - | ' \ 'mrcalc - ' + T1_cerebellum_mask_image + ' -mult - | ' \ 'mrconvert - T1_cerebellum_precrop.mif') - file.delTemporary(T1_cerebellum_mask_image) progress.done() else: @@ -372,12 +375,16 @@ def checkDir(dirpath): # Generate a mask of all voxels classified as cerebellum by FreeSurfer cerebellum_mask_images = [ n + '.mif' for (i, t, n) in structures if 'Cerebellum' in n ] run.command('mrmath ' + ' '.join(cerebellum_mask_images) + ' sum ' + cerebellum_volume_image) + for entry in cerebellum_mask_images: + file.delTemporary(entry) progress.increment() # FAST image input needs to be pre-masked T1_cerebellum_mask_image = cerebellum_volume_image run.command('mrcalc T1.nii ' + T1_cerebellum_mask_image + ' -mult - | mrconvert - T1_cerebellum_precrop.mif') progress.done() + file.delTemporary('T1.nii') + # Any code below here should be compatible with cerebellum_volume_image.mif containing partial volume fractions # (in the case of no explicit template image, it's a mask, but the logic still applies) @@ -389,17 +396,23 @@ def checkDir(dirpath): # Crop T1 image around the cerebellum before feeding to FAST, then re-sample to full template image FoV fast_input_image = 'T1_cerebellum.nii' run.command('mrcrop T1_cerebellum_precrop.mif -mask ' + T1_cerebellum_mask_image + ' ' + fast_input_image) + # TODO Store this name in a variable + file.delTemporary('T1_cerebellum_precrop.mif') + # FIXME Cleanup of T1_cerebellum_mask_image: May be same image as cerebellum_volume_image run.command(fast_cmd + ' -N ' + fast_input_image) file.delTemporary(fast_input_image) - progress = app.progressBar('Introducing intensity-based cerebellar segmentation into the 5TT image', 10) + # Use glob to clean up unwanted FAST outputs + fast_output_prefix = os.path.splitext(fast_input_image)[0] + fast_pve_output_prefix = fast_output_prefix + '_pve_' + file.delTemporary([ entry for entry in glob.glob(fast_output_prefix + '*') if not fast_pve_output_prefix in entry ]) - fast_output_prefix = os.path.splitext(fast_input_image)[0] + '_pve_' - fast_outputs_cropped = [ fast_output_prefix + str(n) + fast_suffix for n in range(0,3) ] + progress = app.progressBar('Introducing intensity-based cerebellar segmentation into the 5TT image', 10) + fast_outputs_cropped = [ fast_pve_output_prefix + str(n) + fast_suffix for n in range(0,3) ] fast_outputs_template = [ 'FAST_' + str(n) + '.mif' for n in range(0,3) ] for inpath, outpath in zip(fast_outputs_cropped, fast_outputs_template): run.command('mrtransform ' + inpath + ' -interp nearest -template ' + template_image + ' ' + outpath) - file.delTemporary(outpath) + file.delTemporary(inpath) progress.increment() if app.args.template: file.delTemporary(template_image) @@ -422,12 +435,15 @@ def checkDir(dirpath): progress.increment() run.command('mrcalc ' + tissue_images[1] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[1] + ' -mult -add ' + new_tissue_images[1]) file.delTemporary(tissue_images[1]) + file.delTemporary(fast_outputs_template[1]) progress.increment() run.command('mrcalc ' + tissue_images[2] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[2] + ' -mult -add ' + new_tissue_images[2]) file.delTemporary(tissue_images[2]) + file.delTemporary(fast_outputs_template[2]) progress.increment() run.command('mrcalc ' + tissue_images[3] + ' ' + cerebellum_multiplier_image + ' ' + fast_outputs_template[0] + ' -mult -add ' + new_tissue_images[3]) file.delTemporary(tissue_images[3]) + file.delTemporary(fast_outputs_template[0]) file.delTemporary(cerebellum_multiplier_image) progress.increment() run.command('mrconvert ' + tissue_images[4] + ' ' + new_tissue_images[4]) @@ -457,6 +473,15 @@ def checkDir(dirpath): # FIXME Appears we can't rely on a connected-component filter: Still some bits e.g. between cortex & cerebellum # Maybe need to look into techniques for specifically filling in between structure pairs + # TESTME This appears to be correct in the template case, but wrong in the default case + # OK, what's happening is: Some voxels are getting a non-zero cortical GM fraction due to native use + # of the surface representation, but these voxels are actually outside FreeSurfer's own provided brain + # mask. So what we need to do here is get the union of the tissue sum nonzero image and the mask image, + # and use that at the -mult step of the mrcalc call. + + # Required image: (tissue_sum_image > 0.0) || mask_image + # tissue_sum_image 0.0 -gt mask_image -add 1.0 -min + new_tissue_images = [ tissue_images[0], tissue_images[1], @@ -464,8 +489,11 @@ def checkDir(dirpath): os.path.splitext(tissue_images[3])[0] + '_filled.mif', tissue_images[4] ] csf_fill_image = 'csf_fill.mif' - run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + mask_image + ' -mult ' + csf_fill_image) - file.delTemporary(mask_image) + run.command('mrcalc 1.0 ' + tissue_sum_image + ' -sub ' + tissue_sum_image + ' 0.0 -gt ' + mask_image + ' -add 1.0 -min -mult ' + csf_fill_image) + file.delTemporary(tissue_sum_image) + # If no template is specified, this file is part of the FreeSurfer output; hence don't modify + if app.args.template: + file.delTemporary(mask_image) progress.increment() run.command('mrcalc ' + tissue_images[3] + ' ' + csf_fill_image + ' -add ' + new_tissue_images[3]) file.delTemporary(csf_fill_image) From 9a392998f9e021bd341b225f211527ca6705a048 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 11 Apr 2018 18:19:36 +1000 Subject: [PATCH 0129/1471] 5ttgen hsvs: Add cleanup erroneously omitted from previous commit 229527093 --- lib/mrtrix3/_5ttgen/hsvs.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py index 36e3b46119..0448b591e2 100644 --- a/lib/mrtrix3/_5ttgen/hsvs.py +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -473,16 +473,13 @@ def checkDir(dirpath): # FIXME Appears we can't rely on a connected-component filter: Still some bits e.g. between cortex & cerebellum # Maybe need to look into techniques for specifically filling in between structure pairs - # TESTME This appears to be correct in the template case, but wrong in the default case - # OK, what's happening is: Some voxels are getting a non-zero cortical GM fraction due to native use - # of the surface representation, but these voxels are actually outside FreeSurfer's own provided brain - # mask. So what we need to do here is get the union of the tissue sum nonzero image and the mask image, - # and use that at the -mult step of the mrcalc call. - + # Some voxels may get a non-zero cortical GM fraction due to native use of the surface representation, yet + # these voxels are actually outside FreeSurfer's own provided brain mask. So what we need to do here is + # get the union of the tissue sum nonzero image and the mask image, and use that at the -mult step of the + # mrcalc call. # Required image: (tissue_sum_image > 0.0) || mask_image # tissue_sum_image 0.0 -gt mask_image -add 1.0 -min - new_tissue_images = [ tissue_images[0], tissue_images[1], tissue_images[2], From 2e825418ecdffab90318500ec64b2730b406c94f Mon Sep 17 00:00:00 2001 From: Thijs Dhollander Date: Fri, 13 Apr 2018 17:37:34 +1000 Subject: [PATCH 0130/1471] basic version of improved dwi2response dhollander algorithm --- lib/mrtrix3/dwi2response/dhollander.py | 78 +++++++++++++++----------- 1 file changed, 46 insertions(+), 32 deletions(-) diff --git a/lib/mrtrix3/dwi2response/dhollander.py b/lib/mrtrix3/dwi2response/dhollander.py index b9ec49b875..d786309c6b 100644 --- a/lib/mrtrix3/dwi2response/dhollander.py +++ b/lib/mrtrix3/dwi2response/dhollander.py @@ -37,13 +37,15 @@ def execute(): #pylint: disable=unused-variable from mrtrix3 import app, image, path, run + # CHECK INPUTS AND OPTIONS + # Get b-values and number of volumes per b-value. bvalues = [ int(round(float(x))) for x in image.mrinfo('dwi.mif', 'shell_bvalues').split() ] bvolumes = [ int(x) for x in image.mrinfo('dwi.mif', 'shell_sizes').split() ] app.console(str(len(bvalues)) + ' unique b-value(s) detected: ' + ','.join(map(str,bvalues)) + ' with ' + ','.join(map(str,bvolumes)) + ' volumes.') if len(bvalues) < 2: app.error('Need at least 2 unique b-values (including b=0).') - + bvalues_option = ' -shells ' + ','.join(map(str,bvalues)) # Get lmax information (if provided). sfwm_lmax = [ ] @@ -56,7 +58,12 @@ def execute(): #pylint: disable=unused-variable app.error('Values supplied to the -lmax option must be even.') if l<0: app.error('Values supplied to the -lmax option must be non-negative.') + sfwm_lmax_option = '' + if sfwm_lmax: + sfwm_lmax_option = ' -lmax ' + ','.join(map(str,sfwm_lmax)) + + # PREPARATION # Erode (brain) mask. if app.args.erode > 0: @@ -64,7 +71,6 @@ def execute(): #pylint: disable=unused-variable else: run.command('mrconvert mask.mif eroded_mask.mif -datatype bit') - # Get volumes, compute mean signal and SDM per b-value; compute overall SDM; get rid of erroneous values. totvolumes = 0 fullsdmcmd = 'mrcalc' @@ -92,6 +98,8 @@ def execute(): #pylint: disable=unused-variable run.command('mrcalc safe_mask.mif full_sdm.mif 0 -if 10 -min safe_sdm.mif') + # CRUDE SEGMENTATION + # Compute FA and principal eigenvectors; crude WM versus GM-CSF separation based on FA. run.command('dwi2tensor dwi.mif - -mask safe_mask.mif | tensor2metric - -fa safe_fa.mif -vector safe_vecs.mif -modulate none -mask safe_mask.mif') run.command('mrcalc safe_mask.mif safe_fa.mif 0 -if ' + str(app.args.fa) + ' -gt crude_wm.mif -datatype bit') @@ -103,6 +111,8 @@ def execute(): #pylint: disable=unused-variable run.command('mrcalc crude_csf.mif 0 _crudenonwm.mif -if crude_gm.mif -datatype bit') + # REFINED SEGMENTATION + # Refine WM: remove high SDM outliers. crudewmmedian = image.statistic('safe_sdm.mif', 'median', '-mask crude_wm.mif') run.command('mrcalc crude_wm.mif safe_sdm.mif 0 -if ' + str(crudewmmedian) + ' -gt _crudewmhigh.mif -datatype bit') @@ -127,16 +137,16 @@ def execute(): #pylint: disable=unused-variable run.command('mrcalc _crudecsfextra.mif safe_sdm.mif ' + str(crudecsfmin) + ' -subtract 0 -if - | mrthreshold - - -mask _crudecsfextra.mif | mrcalc _crudecsfextra.mif - 0 -if refined_csf.mif -datatype bit') - # Get final voxels for single-fibre WM response function estimation from WM using 'tournier' algorithm. - refwmcount = float(image.statistic('refined_wm.mif', 'count', '-mask refined_wm.mif')) - voxsfwmcount = int(round(refwmcount * app.args.sfwm / 100.0)) - app.console('Running \'tournier\' algorithm to select ' + str(voxsfwmcount) + ' single-fibre WM voxels.') - cleanopt = '' - if not app.cleanup: - cleanopt = ' -nocleanup' - run.command('dwi2response tournier dwi.mif _respsfwmss.txt -sf_voxels ' + str(voxsfwmcount) + ' -iter_voxels ' + str(voxsfwmcount * 10) + ' -mask refined_wm.mif -voxels voxels_sfwm.mif -tempdir ' + app.tempDir + cleanopt) + # FINAL VOXEL SELECTION AND RESPONSE FUNCTION ESTIMATION + + # Get final voxels for CSF response function estimation from refined CSF. + refcsfcount = float(image.statistic('refined_csf.mif', 'count', '-mask refined_csf.mif')) + voxcsfcount = int(round(refcsfcount * app.args.csf / 100.0)) + run.command('mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxcsfcount) + ' -ignorezero | mrcalc refined_csf.mif - 0 -if - -datatype bit | mrconvert - voxels_csf.mif -axes 0,1,2') + # Estimate CSF response function + run.command('amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' + bvalues_option + ' -isotropic') - # Get final voxels for GM response function estimation from GM. + # Get final voxels for GM response function estimation from refined GM. refgmmedian = image.statistic('safe_sdm.mif', 'median', '-mask refined_gm.mif') run.command('mrcalc refined_gm.mif safe_sdm.mif 0 -if ' + str(refgmmedian) + ' -gt _refinedgmhigh.mif -datatype bit') run.command('mrcalc _refinedgmhigh.mif 0 refined_gm.mif -if _refinedgmlow.mif -datatype bit') @@ -146,15 +156,33 @@ def execute(): #pylint: disable=unused-variable voxgmlowcount = int(round(refgmlowcount * app.args.gm / 100.0)) run.command('mrcalc _refinedgmhigh.mif safe_sdm.mif 0 -if - | mrthreshold - - -bottom ' + str(voxgmhighcount) + ' -ignorezero | mrcalc _refinedgmhigh.mif - 0 -if _refinedgmhighselect.mif -datatype bit') run.command('mrcalc _refinedgmlow.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxgmlowcount) + ' -ignorezero | mrcalc _refinedgmlow.mif - 0 -if _refinedgmlowselect.mif -datatype bit') - run.command('mrcalc _refinedgmhighselect.mif 1 _refinedgmlowselect.mif -if voxels_gm.mif -datatype bit') + run.command('mrcalc _refinedgmhighselect.mif 1 _refinedgmlowselect.mif -if - -datatype bit | mrconvert - voxels_gm.mif -axes 0,1,2') + # Estimate GM response function + run.command('amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' + bvalues_option + ' -isotropic') - # Get final voxels for CSF response function estimation from CSF. - refcsfcount = float(image.statistic('refined_csf.mif', 'count', '-mask refined_csf.mif')) - voxcsfcount = int(round(refcsfcount * app.args.csf / 100.0)) - run.command('mrcalc refined_csf.mif safe_sdm.mif 0 -if - | mrthreshold - - -top ' + str(voxcsfcount) + ' -ignorezero | mrcalc refined_csf.mif - 0 -if voxels_csf.mif -datatype bit') + # Get final voxels for single-fibre WM response function estimation from refined WM. + refwmcount = float(image.statistic('refined_wm.mif', 'count', '-mask refined_wm.mif')) + voxsfwmcount = int(round(refwmcount * app.args.sfwm / 100.0)) + run.command('mrcalc refined_wm.mif safe_fa.mif 0 -if - | mrthreshold - - -top ' + str(voxsfwmcount) + ' -ignorezero | mrcalc refined_wm.mif - 0 -if - -datatype bit | mrconvert - init_voxels_sfwm.mif -axes 0,1,2') + run.command('amp2response dwi.mif init_voxels_sfwm.mif safe_vecs.mif init_response_sfwm.txt' + bvalues_option + sfwm_lmax_option) + run.command('dwi2fod msmt_csd dwi.mif init_response_sfwm.txt fod_wm.mif response_gm.txt abs_gm.mif -mask refined_wm.mif' + bvalues_option) + run.command('mrconvert fod_wm.mif abs_wm.mif -coord 3 0') + run.command('sh2peaks fod_wm.mif wm_peak.mif -num 1 -fast -mask refined_wm.mif') + run.command('peaks2amp wm_peak.mif - | mrconvert - abs_wm_amp.mif -coord 3 0 -axes 0,1,2') + run.command('mrcalc abs_wm_amp.mif abs_wm.mif abs_gm.mif -add -divide frac_wm_amp.mif') + run.command('mrcalc refined_wm.mif frac_wm_amp.mif 0 -if - | mrthreshold - - -top ' + str(voxsfwmcount) + ' -ignorezero | mrcalc refined_wm.mif - 0 -if - -datatype bit | mrconvert - voxels_sfwm.mif -axes 0,1,2') + # Estimate SF WM response function + run.command('amp2response dwi.mif voxels_sfwm.mif wm_peak.mif response_sfwm.txt' + bvalues_option + sfwm_lmax_option) + + + # SUMMARY AND OUTPUT + # Generate 4D binary images with voxel selections at major stages in algorithm (RGB as in MSMT-CSD paper). + run.command('mrcat crude_csf.mif crude_gm.mif crude_wm.mif crude.mif -axis 3') + run.command('mrcat refined_csf.mif refined_gm.mif refined_wm.mif refined.mif -axis 3') + run.command('mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif voxels.mif -axis 3') - # Show summary of voxels counts. + # Show final summary of voxels counts. textarrow = ' --> ' app.console('Summary of voxel counts:') app.console('Mask: ' + str(int(image.statistic('mask.mif', 'count', '-mask mask.mif'))) + textarrow + str(int(image.statistic('eroded_mask.mif', 'count', '-mask eroded_mask.mif'))) + textarrow + str(int(image.statistic('safe_mask.mif', 'count', '-mask safe_mask.mif')))) @@ -162,21 +190,7 @@ def execute(): #pylint: disable=unused-variable app.console('GM: ' + str(int(image.statistic('crude_gm.mif', 'count', '-mask crude_gm.mif'))) + textarrow + str(int(image.statistic('refined_gm.mif', 'count', '-mask refined_gm.mif'))) + textarrow + str(int(image.statistic('voxels_gm.mif', 'count', '-mask voxels_gm.mif')))) app.console('CSF: ' + str(int(image.statistic('crude_csf.mif', 'count', '-mask crude_csf.mif'))) + textarrow + str(int(image.statistic('refined_csf.mif', 'count', '-mask refined_csf.mif'))) + textarrow + str(int(image.statistic('voxels_csf.mif', 'count', '-mask voxels_csf.mif')))) - - # Generate single-fibre WM, GM and CSF responses - bvalues_option = ' -shells ' + ','.join(map(str,bvalues)) - sfwm_lmax_option = '' - if sfwm_lmax: - sfwm_lmax_option = ' -lmax ' + ','.join(map(str,sfwm_lmax)) - run.command('amp2response dwi.mif voxels_sfwm.mif safe_vecs.mif response_sfwm.txt' + bvalues_option + sfwm_lmax_option) - run.command('amp2response dwi.mif voxels_gm.mif safe_vecs.mif response_gm.txt' + bvalues_option + ' -isotropic') - run.command('amp2response dwi.mif voxels_csf.mif safe_vecs.mif response_csf.txt' + bvalues_option + ' -isotropic') + # Copy response functions to output files run.function(shutil.copyfile, 'response_sfwm.txt', path.fromUser(app.args.out_sfwm, False)) run.function(shutil.copyfile, 'response_gm.txt', path.fromUser(app.args.out_gm, False)) run.function(shutil.copyfile, 'response_csf.txt', path.fromUser(app.args.out_csf, False)) - - - # Generate 4D binary images with voxel selections at major stages in algorithm (RGB as in MSMT-CSD paper). - run.command('mrcat crude_csf.mif crude_gm.mif crude_wm.mif crude.mif -axis 3') - run.command('mrcat refined_csf.mif refined_gm.mif refined_wm.mif refined.mif -axis 3') - run.command('mrcat voxels_csf.mif voxels_gm.mif voxels_sfwm.mif voxels.mif -axis 3') From 48995c3680bad3b7063bad56dabf4c4e17ef8485 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sat, 14 Apr 2018 15:14:05 +1000 Subject: [PATCH 0131/1471] Fix compilation errors in src/surface/algos/ --- src/surface/algo/image2mesh.h | 4 ++-- src/surface/algo/mesh2image.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/surface/algo/image2mesh.h b/src/surface/algo/image2mesh.h index 78468cee07..047181a3d5 100644 --- a/src/surface/algo/image2mesh.h +++ b/src/surface/algo/image2mesh.h @@ -74,7 +74,7 @@ namespace MR // refers to the lower corner of the voxel; that way searches for existing // vertices can be done using a simple map - Vox pos; + Vox pos (0, 0, 0); for (auto loop = Loop(voxel) (voxel); loop; ++loop) { if (voxel.value()) { @@ -444,7 +444,7 @@ namespace MR ImageType voxel (input_image); float in_vertex_values[8]; std::map< Vox, std::map > input_vertex_pair_to_output_vertex_index_map; - Vox lower_corner; + Vox lower_corner (-1, -1, -1); for (lower_corner[2] = -1; lower_corner[2] != voxel.size(2); ++lower_corner[2]) { for (lower_corner[1] = -1; lower_corner[1] != voxel.size(1); ++lower_corner[1]) { for (lower_corner[0] = -1; lower_corner[0] != voxel.size(0); ++lower_corner[0]) { diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 75d2bf495c..7e6fd7254b 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -175,7 +175,7 @@ namespace MR return true; }; - Vox voxel; + Vox voxel (lower_bound[0], lower_bound[1], lower_bound[2]); for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { for (voxel[0] = lower_bound[0]; voxel[0] <= upper_bound[0]; ++voxel[0]) { From 4818185e334d698b91c1d7bebb61ecf06ea1f510 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sat, 14 Apr 2018 17:05:56 +1000 Subject: [PATCH 0132/1471] Further fix of compilation errors in src/surface/algos Follows incomplete solution in 48995c36. --- src/surface/algo/image2mesh.h | 4 ++-- src/surface/algo/mesh2image.cpp | 2 +- src/surface/types.h | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/surface/algo/image2mesh.h b/src/surface/algo/image2mesh.h index 047181a3d5..78468cee07 100644 --- a/src/surface/algo/image2mesh.h +++ b/src/surface/algo/image2mesh.h @@ -74,7 +74,7 @@ namespace MR // refers to the lower corner of the voxel; that way searches for existing // vertices can be done using a simple map - Vox pos (0, 0, 0); + Vox pos; for (auto loop = Loop(voxel) (voxel); loop; ++loop) { if (voxel.value()) { @@ -444,7 +444,7 @@ namespace MR ImageType voxel (input_image); float in_vertex_values[8]; std::map< Vox, std::map > input_vertex_pair_to_output_vertex_index_map; - Vox lower_corner (-1, -1, -1); + Vox lower_corner; for (lower_corner[2] = -1; lower_corner[2] != voxel.size(2); ++lower_corner[2]) { for (lower_corner[1] = -1; lower_corner[1] != voxel.size(1); ++lower_corner[1]) { for (lower_corner[0] = -1; lower_corner[0] != voxel.size(0); ++lower_corner[0]) { diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 7e6fd7254b..75d2bf495c 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -175,7 +175,7 @@ namespace MR return true; }; - Vox voxel (lower_bound[0], lower_bound[1], lower_bound[2]); + Vox voxel; for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { for (voxel[0] = lower_bound[0]; voxel[0] <= upper_bound[0]; ++voxel[0]) { diff --git a/src/surface/types.h b/src/surface/types.h index 39e1df3d7c..2c29edfaaf 100644 --- a/src/surface/types.h +++ b/src/surface/types.h @@ -39,6 +39,7 @@ namespace MR { MEMALIGN (Vox) public: using Eigen::Array3i::Array3i; + Vox () : Eigen::Array3i (-1, -1, -1) { } Vox (const Eigen::Vector3& p) : Eigen::Array3i (int(std::round (p[0])), int(std::round (p[1])), int(std::round (p[2]))) { } bool operator< (const Vox& i) const { From b1627cbe65eef0239a7fd95d23c72e7c64afa850 Mon Sep 17 00:00:00 2001 From: Thijs Dhollander Date: Tue, 17 Apr 2018 17:46:49 +1000 Subject: [PATCH 0133/1471] mtnormalise bugfix --- cmd/mtnormalise.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/mtnormalise.cpp b/cmd/mtnormalise.cpp index 9f64d833bc..65053ed257 100644 --- a/cmd/mtnormalise.cpp +++ b/cmd/mtnormalise.cpp @@ -331,8 +331,8 @@ void run_primitive () { auto outlier_rejection = [&](float outlier_range) { auto summed_log = ImageType::scratch (header_3D, "Log of summed tissue volumes"); - for (size_t j = 0; j < n_tissue_types; ++j) { - for (auto i = Loop (0, 3) (summed_log, combined_tissue, norm_field_image); i; ++i) { + for (auto i = Loop (0, 3) (summed_log, combined_tissue, norm_field_image); i; ++i) { + for (size_t j = 0; j < n_tissue_types; ++j) { combined_tissue.index(3) = j; summed_log.value() += balance_factors(j) * combined_tissue.value() / norm_field_image.value(); } From 660f20f58f44898a57698d944659b680273e8de5 Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Fri, 4 May 2018 13:37:03 +0100 Subject: [PATCH 0134/1471] build: remove highlight of running jobs This is a bit fragile, since any other output to the terminal will mess up the tracking of line numbers. --- build | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/build b/build index 97186c7884..f41c4a7144 100755 --- a/build +++ b/build @@ -125,7 +125,6 @@ lock = threading.Lock() print_lock = threading.Lock() stop = False error_stream = None -current_line = 0 main_cindex = 0 logfile = open ('build.log', 'wb') @@ -197,23 +196,11 @@ if sys.stderr.isatty(): -def disp (msg, line = -1): - global current_line +def disp (msg): print_lock.acquire() - if line < 0: - this_line = current_line - else: - this_line = line logfile.write (msg.encode (errors='ignore')) - if line >= 0: - sys.stdout.write ('\033[s\r\033['+str(current_line - line)+'A') sys.stdout.write (msg) - if line < 0: - current_line += len (msg.splitlines()) - else: - sys.stdout.write ('\033[u') print_lock.release() - return this_line @@ -827,7 +814,7 @@ def fillin (template, keyvalue): def execute (message, cmd, working_dir=None): - line = disp ('\033[1m'+message+'\033[0m' + os.linesep) + disp (message + os.linesep) log (' '.join(cmd) + os.linesep) try: @@ -835,7 +822,6 @@ def execute (message, cmd, working_dir=None): process = subprocess.Popen (cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=environ, cwd=working_dir) ( stdout, stderr ) = process.communicate() end = timer() - disp (message + os.linesep, line) if timingfile is not None: logtime ('[{:>8.3f}s] '.format(end-start) + message + os.linesep) From bfba710fa37c9c5759929fdfda42fff90f7d45a9 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 9 May 2018 15:07:38 +1000 Subject: [PATCH 0135/1471] GLM: Fix default permutation in presence of NaNs Fixes two issues relating to calculation of properties of the default permutation when there are NaNs present in the data: - The version of GLM::all_stats() where there are no NaNs in the data and no element-wise design matrix columns was being erroneously invoked in the presence of NaNs in the input data, because only the presence of element-wise design matrix columns was being tested in order to invoke this function; the presence of non-finite values in the input data is now additionally a requirement for this code path. - In calculating properties of the default permutation in an element-wise fashion, while element-wise design matrix column data were being added, this block of code failed to remove non-finite values from the data and design matrix before performing the matrix solve, resulting in erroneous default permutation parameters. --- cmd/mrclusterstats.cpp | 3 ++- core/math/stats/glm.cpp | 30 +++++++++++++++++++++++++++--- core/math/stats/glm.h | 2 ++ src/stats/tfce.h | 4 +++- 4 files changed, 34 insertions(+), 5 deletions(-) diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 0d03fc9ea5..dd4231db1d 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -131,6 +131,8 @@ class SubjectVoxelImport : public SubjectDataImportBase H (Header::open (path)), data (H.get_image()) { } + virtual ~SubjectVoxelImport() { } + void operator() (matrix_type::RowXpr row) const override { assert (v2v); @@ -317,7 +319,6 @@ void run() { if (!get_options ("notest").size()) { matrix_type perm_distribution, uncorrected_pvalue; - // FIXME This shouldn't be empty... matrix_type default_cluster_output (num_voxels, num_contrasts); Stats::PermTest::run_permutations (glm_test, enhancer, empirical_enhanced_statistic, diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 36407eeedf..53b8c9916e 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -228,7 +228,7 @@ namespace MR matrix_type& std_effect_size, vector_type& stdev) { - if (extra_columns.empty()) { + if (extra_columns.empty() && measurements.allFinite()) { all_stats (measurements, fixed_design, contrasts, betas, abs_effect_size, std_effect_size, stdev); return; } @@ -282,8 +282,32 @@ namespace MR // acquire the data for this particular element, without permutation for (size_t col = 0; col != extra_columns.size(); ++col) element_design.col (design_fixed.cols() + col) = (extra_columns[col]) (element_index); - Math::Stats::GLM::all_stats (element_data, element_design, contrasts, - local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + // For each element-wise design matrix, remove any NaN values + // present in either the input data or imported from the element-wise design matrix column data + size_t valid_rows = 0; + for (size_t row = 0; row != data.rows(); ++row) { + if (std::isfinite (element_data(row)) && element_design.row (row).allFinite()) + ++valid_rows; + } + if (valid_rows == data.rows()) { // No NaNs present + Math::Stats::GLM::all_stats (element_data, element_design, contrasts, + local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + } else { + // Need to reduce the data and design matrices to contain only finite data + matrix_type element_data_finite (valid_rows, 1); + matrix_type element_design_finite (valid_rows, element_design.cols()); + size_t output_row = 0; + for (size_t row = 0; row != data.rows(); ++row) { + if (std::isfinite (element_data(row)) && element_design.row (row).allFinite()) { + element_data_finite(output_row, 0) = element_data(row); + element_design_finite.row (output_row) = element_design.row (row); + ++output_row; + } + } + assert (output_row == valid_rows); + Math::Stats::GLM::all_stats (element_data_finite, element_design_finite, contrasts, + local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + } global_betas.col (element_index) = local_betas; global_abs_effect_size.row (element_index) = local_abs_effect_size.row (0); global_std_effect_size.row (element_index) = local_std_effect_size.row (0); diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 78f8bcec84..321b6a4417 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -214,6 +214,8 @@ namespace MR } } + virtual ~TestBase() { } + /*! Compute the statistics * @param shuffling_matrix a matrix to permute / sign flip the residuals (for permutation testing) * @param output the matrix containing the output statistics (one column per contrast) diff --git a/src/stats/tfce.h b/src/stats/tfce.h index aeaa31d6ae..5c50411271 100644 --- a/src/stats/tfce.h +++ b/src/stats/tfce.h @@ -41,6 +41,8 @@ namespace MR class EnhancerBase : public Stats::EnhancerBase { MEMALIGN (EnhancerBase) + public: + virtual ~EnhancerBase() { } protected: // Alternative functor that also takes the threshold value; // makes TFCE integration cleaner @@ -57,7 +59,7 @@ namespace MR Wrapper (const std::shared_ptr base) : enhancer (base), dH (NaN), E (NaN), H (NaN) { } Wrapper (const std::shared_ptr base, const default_type dh, const default_type e, const default_type h) : enhancer (base), dH (dh), E (e), H (h) { } Wrapper (const Wrapper& that) = default; - ~Wrapper() { } + virtual ~Wrapper() { } void set_tfce_parameters (const value_type d_height, const value_type extent, const value_type height) { From 64499ccc19f07fc1491647253a232f0a17294f28 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 9 May 2018 15:38:43 +1000 Subject: [PATCH 0136/1471] mrclusterstats: Various fixes - Fix fundamental issue in connected-component filter; with 6-neighbour connectivity, code was stuck in an infinite loop. The code on this branch is quite different to that in newer tags of MRtrix3, so suspect that this issue has already been resolved elsewhere, and will lead to future merge conflicts. - Fix passing Voxel2Vector class to SubjectVoxelImport class, which is necessary for instructing the code as to how to load image data from a subject into the data matrix. - Fix some compiler warnings related to comparison of signed and unsigned integers. --- cmd/mrclusterstats.cpp | 22 ++++++++++++---------- core/filter/connected_components.cpp | 10 ++++++---- core/math/stats/glm.cpp | 6 +++--- 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index dd4231db1d..15521b44db 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -14,6 +14,7 @@ #include "command.h" #include "image.h" +#include "types.h" #include "algo/loop.h" #include "file/path.h" @@ -156,7 +157,7 @@ class SubjectVoxelImport : public SubjectDataImportBase const Header& header() const { return H; } - void set_mapping (std::shared_ptr& ptr) { + static void set_mapping (std::shared_ptr& ptr) { v2v = ptr; } @@ -185,11 +186,12 @@ void run() { // Load analysis mask and compute adjacency auto mask_header = Header::open (argument[3]); auto mask_image = mask_header.get_image(); - Voxel2Vector v2v (mask_image, mask_header); + std::shared_ptr v2v = make_shared (mask_image, mask_header); + SubjectVoxelImport::set_mapping (v2v); Filter::Connector connector; connector.adjacency.set_26_adjacency (do_26_connectivity); - connector.adjacency.initialise (mask_header, v2v); - const size_t num_voxels = v2v.size(); + connector.adjacency.initialise (mask_header, *v2v); + const size_t num_voxels = v2v->size(); // Read file names and check files exist CohortDataImport importer; @@ -279,16 +281,16 @@ void run() { ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); for (ssize_t i = 0; i != num_factors; ++i) { - write_output (betas.row(i), v2v, prefix + "beta" + str(i) + ".mif", output_header); + write_output (betas.row(i), *v2v, prefix + "beta" + str(i) + ".mif", output_header); ++progress; } for (size_t i = 0; i != num_contrasts; ++i) { if (!contrasts[i].is_F()) { - write_output (abs_effect_size.col(i), v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); ++progress; - write_output (std_effect_size.col(i), v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); ++progress; + write_output (abs_effect_size.col(i), *v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); ++progress; + write_output (std_effect_size.col(i), *v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); ++progress; } } - write_output (stdev, v2v, prefix + "std_dev.mif", output_header); + write_output (stdev, *v2v, prefix + "std_dev.mif", output_header); } // Construct the class for performing the initial statistical tests @@ -329,13 +331,13 @@ void run() { ProgressBar progress ("Generating output images", 1 + (2 * num_contrasts)); for (size_t i = 0; i != num_contrasts; ++i) { - write_output (uncorrected_pvalue.col(i), v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); + write_output (uncorrected_pvalue.col(i), *v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); ++progress; } const matrix_type fwe_pvalue_output = MR::Math::Stats::fwe_pvalue (perm_distribution, default_cluster_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { - write_output (fwe_pvalue_output.col(i), v2v, prefix + "fwe_pvalue" + postfix(i) + ".mif", output_header); + write_output (fwe_pvalue_output.col(i), *v2v, prefix + "fwe_pvalue" + postfix(i) + ".mif", output_header); ++progress; } diff --git a/core/filter/connected_components.cpp b/core/filter/connected_components.cpp index 7482f92ada..c4b063284d 100644 --- a/core/filter/connected_components.cpp +++ b/core/filter/connected_components.cpp @@ -45,10 +45,12 @@ namespace MR throw Exception ("Cannot initialise connected component filter: All axes have been disabled"); // Now generate a list of plausible offsets between adjacent elements while (*std::max_element (o.begin(), o.end()) < 2) { - // Determine whether or not this offset should be added to the list - if (!use_26_neighbours && header.ndim() >= 3 && ((std::abs(o[0]) + std::abs(o[1]) + std::abs(o[2])) > 1)) - continue; - offsets.push_back (o); + // Determine whether or not this offset should be added to the list: + // - Don't add if we're only using 6 nearest neighbours and this offset isn't one of those six + // - Don't add self-connection + if (!(!use_26_neighbours && ((std::abs(o[0]) + std::abs(o[1]) + std::abs(o[2])) > 1)) + && (std::abs(o[0]) + std::abs(o[1]) + std::abs(o[2]) > 0)) + offsets.push_back (o); // Find the next offset to be tested ++o[start_axis]; for (size_t axis = start_axis; axis != header.ndim(); ++axis) { diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 53b8c9916e..fd6dd12623 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -284,8 +284,8 @@ namespace MR element_design.col (design_fixed.cols() + col) = (extra_columns[col]) (element_index); // For each element-wise design matrix, remove any NaN values // present in either the input data or imported from the element-wise design matrix column data - size_t valid_rows = 0; - for (size_t row = 0; row != data.rows(); ++row) { + ssize_t valid_rows = 0; + for (ssize_t row = 0; row != data.rows(); ++row) { if (std::isfinite (element_data(row)) && element_design.row (row).allFinite()) ++valid_rows; } @@ -297,7 +297,7 @@ namespace MR matrix_type element_data_finite (valid_rows, 1); matrix_type element_design_finite (valid_rows, element_design.cols()); size_t output_row = 0; - for (size_t row = 0; row != data.rows(); ++row) { + for (ssize_t row = 0; row != data.rows(); ++row) { if (std::isfinite (element_data(row)) && element_design.row (row).allFinite()) { element_data_finite(output_row, 0) = element_data(row); element_design_finite.row (output_row) = element_design.row (row); From a3c110338c521e8036f4a14ba1d4ee390a945b83 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 9 May 2018 15:51:53 +1000 Subject: [PATCH 0137/1471] GLM::all_stats(): Prevent progress bar interference Makes the progress bar appear as it should, in cases where there are NaNs present in the input data or there are element-wise design matrix columns. --- core/math/stats/glm.cpp | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index fd6dd12623..c35c315dec 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -169,13 +169,19 @@ namespace MR vector_type& stdev) { #ifndef GLM_ALL_STATS_DEBUG - ProgressBar progress ("Calculating basic properties of default permutation"); + // If this function is being invoked from the other version of all_stats(), + // on an element-by-element basis, don't interfere with the progress bar + // that's being displayed by that outer looping function + std::unique_ptr progress; + if (measurements.cols() > 1) + progress.reset (new ProgressBar ("Calculating basic properties of default permutation", 6)); #endif betas = solve_betas (measurements, design); #ifdef GLM_ALL_STATS_DEBUG std::cerr << "Betas: " << betas.rows() << " x " << betas.cols() << ", max " << betas.array().maxCoeff() << "\n"; #else - ++progress; + if (progress) + ++*progress; #endif abs_effect_size.resize (measurements.cols(), contrasts.size()); for (size_t ic = 0; ic != contrasts.size(); ++ic) { @@ -188,7 +194,8 @@ namespace MR #ifdef GLM_ALL_STATS_DEBUG std::cerr << "abs_effect_size: " << abs_effect_size.rows() << " x " << abs_effect_size.cols() << ", max " << abs_effect_size.array().maxCoeff() << "\n"; #else - ++progress; + if (progress) + ++*progress; #endif // Explicit calculation of residuals before SSE, rather than in a single // step, appears to be necessary for compatibility with Eigen 3.2.0 @@ -196,20 +203,23 @@ namespace MR #ifdef GLM_ALL_STATS_DEBUG std::cerr << "Residuals: " << residuals.rows() << " x " << residuals.cols() << ", max " << residuals.array().maxCoeff() << "\n"; #else - ++progress; + if (progress) + ++*progress; #endif vector_type sse (residuals.cols()); sse = residuals.colwise().squaredNorm(); #ifdef GLM_ALL_STATS_DEBUG std::cerr << "sse: " << sse.size() << ", max " << sse.maxCoeff() << "\n"; #else - ++progress; + if (progress) + ++*progress; #endif stdev = (sse / value_type(design.rows()-Math::rank (design))).sqrt(); #ifdef GLM_ALL_STATS_DEBUG std::cerr << "stdev: " << stdev.size() << ", max " << stdev.maxCoeff() << "\n"; #else - ++progress; + if (progress) + ++*progress; #endif std_effect_size = abs_effect_size.array().colwise() / stdev; #ifdef GLM_ALL_STATS_DEBUG From 93d57b4bacac44577d8f3287783b45f43af12fcc Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 11 May 2018 15:08:03 +1000 Subject: [PATCH 0138/1471] mrclusterstats: Fix omitted calculation of default permutation --- cmd/mrclusterstats.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 15521b44db..e4e8f15593 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -318,13 +318,20 @@ void run() { save_vector (empirical_enhanced_statistic.col(i), prefix + "empirical" + postfix(i) + ".txt"); } + // Precompute statistic value and enhanced statistic for the default permutation + matrix_type default_output, default_enhanced_output; + Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_enhanced_statistic, default_enhanced_output, default_output); + for (size_t i = 0; i != num_contrasts; ++i) { + write_output (default_output.col (i), *v2v, prefix + (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".mif", output_header); + write_output (default_enhanced_output.col (i), *v2v, prefix + (use_tfce ? "tfce" : "clustersize") + postfix(i) + ".mif", output_header); + } + if (!get_options ("notest").size()) { matrix_type perm_distribution, uncorrected_pvalue; - matrix_type default_cluster_output (num_voxels, num_contrasts); Stats::PermTest::run_permutations (glm_test, enhancer, empirical_enhanced_statistic, - default_cluster_output, perm_distribution, uncorrected_pvalue); + default_enhanced_output, perm_distribution, uncorrected_pvalue); for (size_t i = 0; i != num_contrasts; ++i) save_vector (perm_distribution.col(i), prefix + "perm_dist" + postfix(i) + ".txt"); @@ -334,7 +341,7 @@ void run() { write_output (uncorrected_pvalue.col(i), *v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); ++progress; } - const matrix_type fwe_pvalue_output = MR::Math::Stats::fwe_pvalue (perm_distribution, default_cluster_output); + const matrix_type fwe_pvalue_output = MR::Math::Stats::fwe_pvalue (perm_distribution, default_enhanced_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { write_output (fwe_pvalue_output.col(i), *v2v, prefix + "fwe_pvalue" + postfix(i) + ".mif", output_header); From 2807190728da2ac06701bf16e1d8a2520d5ad4e0 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 11 May 2018 16:06:40 +1000 Subject: [PATCH 0139/1471] mrclusterstats: Output empirical statistic as image rather than vector --- cmd/mrclusterstats.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index e4e8f15593..7815f3b8d5 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -315,7 +315,7 @@ void run() { throw Exception ("Nonstationary adjustment is not currently implemented for threshold-based cluster analysis"); Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_enhanced_statistic); for (size_t i = 0; i != num_contrasts; ++i) - save_vector (empirical_enhanced_statistic.col(i), prefix + "empirical" + postfix(i) + ".txt"); + write_output (empirical_enhanced_statistic.col(i), *v2v, prefix + "empirical" + postfix(i) + ".mif", output_header); } // Precompute statistic value and enhanced statistic for the default permutation From e60c37e49d9cb26b037b54f72697c82cae33717c Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 15 May 2018 12:43:06 +1000 Subject: [PATCH 0140/1471] Stats: Better condition number checking - Provide standardised function MR::Math::condition_number(), and make use of it in multiple areas where the same calculation was being performed. - In statistical inference commands, if the design matrix may vary between elements being tested, output data that provides the condition number of the design matrix in each element. - Additionally, if the design matrix for a particular element being tested contains a very poor condition number, do not attempt statistical tests, and set output to 0.0 for that element, irrespective of shuffling. --- cmd/connectomestats.cpp | 11 ++-- cmd/fixelcfestats.cpp | 10 ++-- cmd/mrclusterstats.cpp | 16 ++++-- cmd/vectorstats.cpp | 16 ++++-- core/filter/smooth.h | 5 +- core/math/check_gradient.h | 5 +- core/math/condition_number.h | 40 +++++++++++++++ core/math/stats/glm.cpp | 98 +++++++++++++++++++++--------------- core/math/stats/glm.h | 12 +++-- src/dwi/gradient.h | 56 +++++++++------------ 10 files changed, 169 insertions(+), 100 deletions(-) create mode 100644 core/math/condition_number.h diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index 0827501d8b..03fd37c2b9 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -259,13 +259,12 @@ void run() { matrix_type betas (num_factors, num_edges); matrix_type abs_effect_size (num_edges, num_contrasts), std_effect_size (num_edges, num_contrasts); - vector_type stdev (num_edges); + vector_type cond (num_edges), stdev (num_edges); Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, - betas, abs_effect_size, std_effect_size, stdev); + cond, betas, abs_effect_size, std_effect_size, stdev); - // TODO Contrasts should be somehow named, in order to differentiate between t-tests and F-tests - ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); + ProgressBar progress ("outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1 + (nans_in_data || extra_columns.size() ? 1 : 0)); for (ssize_t i = 0; i != num_factors; ++i) { save_matrix (mat2vec.V2M (betas.row(i)), "beta" + str(i) + ".csv"); ++progress; @@ -276,6 +275,10 @@ void run() save_matrix (mat2vec.V2M (std_effect_size.col(i)), "std_effect" + postfix(i) + ".csv"); ++progress; } } + if (nans_in_data || extra_columns.size()) { + save_matrix (mat2vec.V2M (cond), "cond.csv"); + ++progress; + } save_matrix (mat2vec.V2M (stdev), "std_dev.csv"); } diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 5c5babe665..5d02184362 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -551,12 +551,12 @@ void run() { matrix_type betas (num_factors, num_fixels); matrix_type abs_effect_size (num_fixels, num_contrasts), std_effect_size (num_fixels, num_contrasts); - vector_type stdev (num_fixels); + vector_type cond (num_fixels), stdev (num_fixels); Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, - betas, abs_effect_size, std_effect_size, stdev); + cond, betas, abs_effect_size, std_effect_size, stdev); - ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); + ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1 + (nans_in_data || extra_columns.size() ? 1 : 0)); for (ssize_t i = 0; i != num_factors; ++i) { write_fixel_output (Path::join (output_fixel_directory, "beta" + str(i) + ".mif"), betas.row(i), output_header); @@ -570,6 +570,10 @@ void run() ++progress; } } + if (nans_in_data || extra_columns.size()) { + write_fixel_output (Path::join (output_fixel_directory, "cond.mif"), cond, output_header); + ++progress; + } write_fixel_output (Path::join (output_fixel_directory, "std_dev.mif"), stdev, output_header); } diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 7815f3b8d5..c4aded3395 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -274,22 +274,28 @@ void run() { { matrix_type betas (num_factors, num_voxels); matrix_type abs_effect_size (num_voxels, num_contrasts), std_effect_size (num_voxels, num_contrasts); - vector_type stdev (num_voxels); + vector_type cond (num_voxels), stdev (num_voxels); Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, - betas, abs_effect_size, std_effect_size, stdev); + cond, betas, abs_effect_size, std_effect_size, stdev); - ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1); + ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", num_factors + (2 * num_contrasts) + 1 + (nans_in_data || extra_columns.size() ? 1 : 0)); for (ssize_t i = 0; i != num_factors; ++i) { write_output (betas.row(i), *v2v, prefix + "beta" + str(i) + ".mif", output_header); ++progress; } for (size_t i = 0; i != num_contrasts; ++i) { if (!contrasts[i].is_F()) { - write_output (abs_effect_size.col(i), *v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); ++progress; - write_output (std_effect_size.col(i), *v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); ++progress; + write_output (abs_effect_size.col(i), *v2v, prefix + "abs_effect" + postfix(i) + ".mif", output_header); + ++progress; + write_output (std_effect_size.col(i), *v2v, prefix + "std_effect" + postfix(i) + ".mif", output_header); + ++progress; } } + if (nans_in_data || extra_columns.size()) { + write_output (cond, *v2v, prefix + "cond.mif", output_header); + ++progress; + } write_output (stdev, *v2v, prefix + "std_dev.mif", output_header); } diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 0a8ee94274..7c6de22e9e 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -178,19 +178,25 @@ void run() { matrix_type betas (num_factors, num_elements); matrix_type abs_effect_size (num_elements, num_contrasts), std_effect_size (num_elements, num_contrasts); - vector_type stdev (num_elements); + vector_type cond (num_elements), stdev (num_elements); Math::Stats::GLM::all_stats (data, design, extra_columns, contrasts, - betas, abs_effect_size, std_effect_size, stdev); + cond, betas, abs_effect_size, std_effect_size, stdev); - ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", 2 + (2 * num_contrasts)); + ProgressBar progress ("Outputting beta coefficients, effect size and standard deviation", 2 + (2 * num_contrasts) + (nans_in_data || extra_columns.size() ? 1 : 0)); save_matrix (betas, output_prefix + "betas.csv"); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { if (!contrasts[i].is_F()) { - save_vector (abs_effect_size.col(i), output_prefix + "abs_effect" + postfix(i) + ".csv"); ++progress; - save_vector (std_effect_size.col(i), output_prefix + "std_effect" + postfix(i) + ".csv"); ++progress; + save_vector (abs_effect_size.col(i), output_prefix + "abs_effect" + postfix(i) + ".csv"); + ++progress; + save_vector (std_effect_size.col(i), output_prefix + "std_effect" + postfix(i) + ".csv"); + ++progress; } } + if (nans_in_data || extra_columns.size()) { + save_vector (cond, output_prefix + "cond.csv"); + ++progress; + } save_vector (stdev, output_prefix + "std_dev.csv"); } diff --git a/core/filter/smooth.h b/core/filter/smooth.h index e54960510a..52819b814f 100644 --- a/core/filter/smooth.h +++ b/core/filter/smooth.h @@ -268,8 +268,9 @@ namespace MR } } result /= av_weights; - } else if (kernel_size != kernel.size()) - result /= kernel.segment(c, kernel_size).sum(); + } else if (kernel_size != kernel.size()) { + result /= kernel.segment(c, kernel_size).sum(); + } image.value() = result; } diff --git a/core/math/check_gradient.h b/core/math/check_gradient.h index e22c600dfc..e4304845fa 100644 --- a/core/math/check_gradient.h +++ b/core/math/check_gradient.h @@ -15,7 +15,6 @@ #ifndef __math_check_gradient_h__ #define __math_check_gradient_h__ -#include #include "debug.h" #include "datatype.h" @@ -99,9 +98,7 @@ namespace MR { } // CONSOLE ("hessian = [ " + str(hessian) + "]"); MAT(hessian); - auto v = Eigen::JacobiSVD (hessian).singularValues(); - auto conditionnumber = v[0] / v[v.size()-1]; - CONSOLE("\033[00;34mcondition number: " + str(conditionnumber)+"\033[0m"); + CONSOLE("\033[00;34mcondition number: " + str(condition_number (hessian))+"\033[0m"); } return hessian; } diff --git a/core/math/condition_number.h b/core/math/condition_number.h new file mode 100644 index 0000000000..df02bc5c18 --- /dev/null +++ b/core/math/condition_number.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2008-2017 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#ifndef __math_condition_number_h__ +#define __math_condition_number_h__ + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#include +#pragma GCC diagnostic pop + +namespace MR +{ + namespace Math + { + + template + inline default_type condition_number (const M& data) + { + auto v = Eigen::JacobiSVD (data).singularValues(); + return v[0] / v[v.size()-1]; + } + + } +} + +#endif + + diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index c35c315dec..5ea93e4c9f 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -15,8 +15,8 @@ #include "math/stats/glm.h" #include "debug.h" -#include "misc/bitset.h" #include "thread_queue.h" +#include "misc/bitset.h" namespace MR { @@ -233,6 +233,7 @@ namespace MR const matrix_type& fixed_design, const vector& extra_columns, const vector& contrasts, + vector_type& cond, matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, @@ -271,11 +272,12 @@ namespace MR { MEMALIGN(Functor) public: Functor (const matrix_type& data, const matrix_type& design_fixed, const vector& extra_columns, const vector& contrasts, - matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : + vector_type& cond, matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev) : data (data), design_fixed (design_fixed), extra_columns (extra_columns), contrasts (contrasts), + global_cond (cond), global_betas (betas), global_abs_effect_size (abs_effect_size), global_std_effect_size (std_effect_size), @@ -299,9 +301,11 @@ namespace MR if (std::isfinite (element_data(row)) && element_design.row (row).allFinite()) ++valid_rows; } + default_type condition_number = 0.0; if (valid_rows == data.rows()) { // No NaNs present Math::Stats::GLM::all_stats (element_data, element_design, contrasts, local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + condition_number = Math::condition_number (element_design); } else { // Need to reduce the data and design matrices to contain only finite data matrix_type element_data_finite (valid_rows, 1); @@ -315,9 +319,13 @@ namespace MR } } assert (output_row == valid_rows); + assert (element_data_finite.allFinite()); + assert (element_design_finite.allFinite()); Math::Stats::GLM::all_stats (element_data_finite, element_design_finite, contrasts, local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + condition_number = Math::condition_number (element_design_finite); } + global_cond[element_index] = condition_number; global_betas.col (element_index) = local_betas; global_abs_effect_size.row (element_index) = local_abs_effect_size.row (0); global_std_effect_size.row (element_index) = local_std_effect_size.row (0); @@ -329,6 +337,7 @@ namespace MR const matrix_type& design_fixed; const vector& extra_columns; const vector& contrasts; + vector_type& global_cond; matrix_type& global_betas; matrix_type& global_abs_effect_size; matrix_type& global_std_effect_size; @@ -339,7 +348,7 @@ namespace MR Source source (measurements.cols()); Functor functor (measurements, fixed_design, extra_columns, contrasts, - betas, abs_effect_size, std_effect_size, stdev); + cond, betas, abs_effect_size, std_effect_size, stdev); Thread::run_queue (source, Thread::batch (size_t()), Thread::multi (functor)); } @@ -617,44 +626,53 @@ namespace MR } assert (Mfull_masked.allFinite()); - pinvMfull_masked = Math::pinv (Mfull_masked); - - Rm.noalias() = matrix_type::Identity (finite_count, finite_count) - (Mfull_masked*pinvMfull_masked); - - // We now have our permutation (shuffling) matrix and design matrix prepared, - // and can commence regressing the partitioned model of each contrast - for (size_t ic = 0; ic != c.size(); ++ic) { - - const auto partition = c[ic].partition (Mfull_masked); - XtX.noalias() = partition.X.transpose()*partition.X; - - // Now that we have the individual contrast model partition for these data, - // the rest of this function should proceed similarly to the fixed - // design matrix case - //VAR (shuffling_matrix_masked.rows()); - //VAR (shuffling_matrix_masked.cols()); - //VAR (partition.Rz.rows()); - //VAR (partition.Rz.cols()); - //VAR (y_masked.rows()); - //VAR (y_masked.cols()); - Sy = shuffling_matrix_masked * partition.Rz * y_masked.matrix(); - lambda = pinvMfull_masked * Sy.matrix(); - beta.noalias() = c[ic].matrix() * lambda.matrix(); - const default_type sse = (Rm*Sy.matrix()).squaredNorm(); - - const default_type F = ((beta.transpose() * XtX * beta) (0, 0) / c[ic].rank()) / - (sse / value_type (finite_count - partition.rank_x - partition.rank_z)); - - if (!std::isfinite (F)) { - output (ie, ic) = value_type(0); - } else if (c[ic].is_F()) { - output (ie, ic) = F; - } else { - assert (beta.rows() == 1); - output (ie, ic) = std::sqrt (F) * (beta.sum() > 0 ? 1.0 : -1.0); - } + // TODO Test condition number of masked & data-filled design matrix; + // need to skip statistical testing if it is too poor + const default_type condition_number = Math::condition_number (Mfull_masked); + if (condition_number > 1e5) { + output.row (ie).fill (0.0); + } else { + + pinvMfull_masked = Math::pinv (Mfull_masked); + + Rm.noalias() = matrix_type::Identity (finite_count, finite_count) - (Mfull_masked*pinvMfull_masked); + + // We now have our permutation (shuffling) matrix and design matrix prepared, + // and can commence regressing the partitioned model of each contrast + for (size_t ic = 0; ic != c.size(); ++ic) { + + const auto partition = c[ic].partition (Mfull_masked); + XtX.noalias() = partition.X.transpose()*partition.X; + + // Now that we have the individual contrast model partition for these data, + // the rest of this function should proceed similarly to the fixed + // design matrix case + //VAR (shuffling_matrix_masked.rows()); + //VAR (shuffling_matrix_masked.cols()); + //VAR (partition.Rz.rows()); + //VAR (partition.Rz.cols()); + //VAR (y_masked.rows()); + //VAR (y_masked.cols()); + Sy = shuffling_matrix_masked * partition.Rz * y_masked.matrix(); + lambda = pinvMfull_masked * Sy.matrix(); + beta.noalias() = c[ic].matrix() * lambda.matrix(); + const default_type sse = (Rm*Sy.matrix()).squaredNorm(); + + const default_type F = ((beta.transpose() * XtX * beta) (0, 0) / c[ic].rank()) / + (sse / value_type (finite_count - partition.rank_x - partition.rank_z)); + + if (!std::isfinite (F)) { + output (ie, ic) = value_type(0); + } else if (c[ic].is_F()) { + output (ie, ic) = F; + } else { + assert (beta.rows() == 1); + output (ie, ic) = std::sqrt (F) * (beta.sum() > 0 ? 1.0 : -1.0); + } + + } // End looping over contrasts - } // End looping over contrasts + } // End checking for adequate condition number after NaN removal } // End checking for adequate number of remaining subjects after NaN removal diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index 321b6a4417..ef8abe361b 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -18,6 +18,7 @@ #include "app.h" #include "types.h" +#include "math/condition_number.h" #include "math/least_squares.h" #include "math/stats/import.h" #include "math/stats/typedefs.h" @@ -184,8 +185,8 @@ namespace MR * @param std_effect_size the matrix containing the output standardised effect size * @param stdev the matrix containing the output standard deviation */ - void all_stats (const matrix_type& measurements, const matrix_type& design, const vector& extra_columns, - const vector& contrasts, matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev); + void all_stats (const matrix_type& measurements, const matrix_type& design, const vector& extra_columns, const vector& contrasts, + vector_type& cond, matrix_type& betas, matrix_type& abs_effect_size, matrix_type& std_effect_size, vector_type& stdev); //! @} @@ -207,10 +208,11 @@ namespace MR // Can no longer apply this assertion here; GLMTTestVariable later // expands the number of columns in M //assert (c.cols() == M.cols()); - auto v = Eigen::JacobiSVD (design).singularValues(); - auto cond = v[0] / v[v.size()-1]; + const default_type cond = Math::condition_number (design); if (cond > 10.0) { - WARN ("Design matrix may contain collinear factors (condition number = " + str(cond) + "); recommend double-checking derivation of design matrix"); + WARN ("Design matrix conditioning is poor (condition number = " + str(cond) + "); some calculations may be unstable"); + } else if (cond > 1e5) { + throw Exception ("Design matrix may contain collinear elements (condition number = " + str(cond) + "); check derivation of design matrix"); } } diff --git a/src/dwi/gradient.h b/src/dwi/gradient.h index a367f1fe27..53a7085225 100644 --- a/src/dwi/gradient.h +++ b/src/dwi/gradient.h @@ -15,17 +15,11 @@ #ifndef __dwi_gradient_h__ #define __dwi_gradient_h__ -// These lines are to silence deprecation warnings with Eigen & GCC v5 -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#include -#pragma GCC diagnostic pop - - #include "app.h" #include "file/path.h" #include "file/config.h" #include "header.h" +#include "math/condition_number.h" #include "math/sphere.h" #include "math/SH.h" #include "dwi/shells.h" @@ -50,7 +44,7 @@ namespace MR throw Exception ("invalid diffusion gradient table dimensions"); for (ssize_t i = 0; i < grad.rows(); i++) { auto norm = grad.row(i).template head<3>().norm(); - if (norm) + if (norm) grad.row(i).template head<3>().array() /= norm; } return grad; @@ -60,20 +54,20 @@ namespace MR /*! \brief convert the DW encoding matrix in \a grad into a * azimuth/elevation direction set, using only the DWI volumes as per \a * dwi */ - template + template inline Eigen::MatrixXd gen_direction_matrix ( - const MatrixType& grad, + const MatrixType& grad, const IndexVectorType& dwi) { Eigen::MatrixXd dirs (dwi.size(),2); for (size_t i = 0; i < dwi.size(); i++) { dirs (i,0) = std::atan2 (grad (dwi[i],1), grad (dwi[i],0)); auto z = grad (dwi[i],2) / grad.row (dwi[i]).template head<3>().norm(); - if (z >= 1.0) + if (z >= 1.0) dirs(i,1) = 0.0; else if (z <= -1.0) dirs (i,1) = Math::pi; - else + else dirs (i,1) = std::acos (z); } return dirs; @@ -84,7 +78,7 @@ namespace MR template - default_type condition_number_for_lmax (const MatrixType& dirs, int lmax) + default_type condition_number_for_lmax (const MatrixType& dirs, int lmax) { Eigen::MatrixXd g; if (dirs.cols() == 2) // spherical coordinates: @@ -92,8 +86,7 @@ namespace MR else // Cartesian to spherical: g = Math::Sphere::cartesian2spherical (dirs).leftCols(2); - auto v = Eigen::JacobiSVD (Math::SH::init_transform (g, lmax)).singularValues(); - return v[0] / v[v.size()-1]; + return Math::condition_number (Math::SH::init_transform (g, lmax)); } @@ -119,13 +112,13 @@ namespace MR - //! scale b-values by square of gradient norm + //! scale b-values by square of gradient norm template - void scale_bvalue_by_G_squared (MatrixType& G) + void scale_bvalue_by_G_squared (MatrixType& G) { INFO ("b-values will be scaled by the square of DW gradient norm"); - for (ssize_t n = 0; n < G.rows(); ++n) - if (G(n,3)) + for (ssize_t n = 0; n < G.rows(); ++n) + if (G(n,3)) G(n,3) *= G.row(n).template head<3>().squaredNorm(); } @@ -134,9 +127,9 @@ namespace MR //! store the DW gradient encoding matrix in a header /*! this will store the DW gradient encoding matrix into the - * Header::keyval() structure of \a header, under the key 'dw_scheme'. + * Header::keyval() structure of \a header, under the key 'dw_scheme'. */ - template + template void set_DW_scheme (Header& header, const MatrixType& G) { if (!G.rows()) { @@ -154,7 +147,7 @@ namespace MR } add_line (dw_scheme, line); } - if (dw_scheme.size()) + if (dw_scheme.size()) header.keyval()["dw_scheme"] = dw_scheme; else WARN ("attempt to add empty DW scheme to header - ignored"); @@ -200,7 +193,7 @@ namespace MR //! get the DW gradient encoding matrix /*! attempts to find the DW gradient encoding matrix, using the following - * procedure: + * procedure: * - if the -grad option has been supplied, then load the matrix assuming * it is in MRtrix format, and return it; * - if the -fslgrad option has been supplied, then load and rectify the @@ -212,7 +205,7 @@ namespace MR //! check that the DW scheme matches the DWI data in \a header - template + template inline void check_DW_scheme (const Header& header, const MatrixType& grad) { if (!grad.rows()) @@ -233,18 +226,18 @@ namespace MR /*! \brief validate the DW encoding matrix \a grad and - * check that it matches the DW header in \a header + * check that it matches the DW header in \a header * * This ensures the dimensions match the corresponding DWI data, applies * b-value scaling if specified, and normalises the gradient vectors. */ void validate_DW_scheme (Eigen::MatrixXd& grad, const Header& header, bool nofail = false); /*! \brief get the DW encoding matrix as per get_DW_scheme(), and - * check that it matches the DW header in \a header + * check that it matches the DW header in \a header * * This is the version that should be used in any application that * processes the DWI raw data. */ - inline Eigen::MatrixXd get_valid_DW_scheme (const Header& header, bool nofail = false) + inline Eigen::MatrixXd get_valid_DW_scheme (const Header& header, bool nofail = false) { auto grad = get_DW_scheme (header); validate_DW_scheme (grad, header, nofail); @@ -267,7 +260,7 @@ namespace MR template Eigen::MatrixXd compute_SH2amp_mapping ( const MatrixType& directions, - bool lmax_from_command_line = true, + bool lmax_from_command_line = true, int default_lmax = 8) { int lmax = -1; @@ -301,12 +294,11 @@ namespace MR Eigen::MatrixXd mapping; do { mapping = Math::SH::init_transform (directions, lmax); - auto v = Eigen::JacobiSVD (mapping).singularValues(); - auto cond = v[0] / v[v.size()-1]; - if (cond < 10.0) + const default_type cond = Math::condition_number (mapping); + if (cond < 10.0) break; WARN ("directions are poorly distributed for lmax = " + str(lmax) + " (condition number = " + str (cond) + ")"); - if (cond < 100.0 || lmax_set_from_commandline) + if (cond < 100.0 || lmax_set_from_commandline) break; lmax -= 2; } while (lmax >= 0); From c92e0355017c99c7c2491118525e3a28fb855c70 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 15 May 2018 13:10:29 +1000 Subject: [PATCH 0141/1471] GLM: More tweaks based on conditioning - Do not fit GLM at any point (even generating maps of beta coefficients) if design matrix is ill-conditioned. - Additionally skip test if condition number itself is NaN. --- core/math/stats/glm.cpp | 26 ++++++++++++++++++++------ core/math/stats/glm.h | 2 +- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 5ea93e4c9f..76f2bbdab5 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -303,9 +303,16 @@ namespace MR } default_type condition_number = 0.0; if (valid_rows == data.rows()) { // No NaNs present - Math::Stats::GLM::all_stats (element_data, element_design, contrasts, - local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); condition_number = Math::condition_number (element_design); + if (!std::isfinite (condition_number) || condition_number > 1e5) { + local_betas = matrix_type::Zero (global_betas.rows(), 1); + local_abs_effect_size = matrix_type::Zero (1, contrasts.size()); + local_std_effect_size = matrix_type::Zero (1, contrasts.size()); + local_stdev = vector_type::Zero (1); + } else { + Math::Stats::GLM::all_stats (element_data, element_design, contrasts, + local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + } } else { // Need to reduce the data and design matrices to contain only finite data matrix_type element_data_finite (valid_rows, 1); @@ -321,9 +328,16 @@ namespace MR assert (output_row == valid_rows); assert (element_data_finite.allFinite()); assert (element_design_finite.allFinite()); - Math::Stats::GLM::all_stats (element_data_finite, element_design_finite, contrasts, - local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); condition_number = Math::condition_number (element_design_finite); + if (!std::isfinite (condition_number) || condition_number > 1e5) { + local_betas = matrix_type::Zero (global_betas.rows(), 1); + local_abs_effect_size = matrix_type::Zero (1, contrasts.size()); + local_std_effect_size = matrix_type::Zero (1, contrasts.size()); + local_stdev = vector_type::Zero (1); + } else { + Math::Stats::GLM::all_stats (element_data_finite, element_design_finite, contrasts, + local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); + } } global_cond[element_index] = condition_number; global_betas.col (element_index) = local_betas; @@ -626,10 +640,10 @@ namespace MR } assert (Mfull_masked.allFinite()); - // TODO Test condition number of masked & data-filled design matrix; + // Test condition number of NaN-masked & data-filled design matrix; // need to skip statistical testing if it is too poor const default_type condition_number = Math::condition_number (Mfull_masked); - if (condition_number > 1e5) { + if (!std::isfinite (condition_number) || condition_number > 1e5) { output.row (ie).fill (0.0); } else { diff --git a/core/math/stats/glm.h b/core/math/stats/glm.h index ef8abe361b..9a1001e797 100644 --- a/core/math/stats/glm.h +++ b/core/math/stats/glm.h @@ -211,7 +211,7 @@ namespace MR const default_type cond = Math::condition_number (design); if (cond > 10.0) { WARN ("Design matrix conditioning is poor (condition number = " + str(cond) + "); some calculations may be unstable"); - } else if (cond > 1e5) { + } else if (!std::isfinite (cond) || cond > 1e5) { throw Exception ("Design matrix may contain collinear elements (condition number = " + str(cond) + "); check derivation of design matrix"); } } From fe5bddf903b57eb6d95c0dc13dc4522dae60aab7 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 18 May 2018 16:24:49 +1000 Subject: [PATCH 0142/1471] GLM: Addtional restrictions on per-element design matrices When the number of subjects contributing to the statistical test within an element is greatly reduced due to non-finite values, it becomes possible by chance for the model to obtain an exceptionally good fit to the data; this leads to an exceptionally high t-value / F-value, which correspondingly leads to very slow execution during statistical enhancement. This change introduces an additional constraint thta must be satisfied in order for statistical testing to proceed for any given element: If at least one subject is excluded in that element, the number of remaining subjects must be at least twice the number of factors inthe design matrix, in order to provide adequate degrees of freedom. --- core/math/condition_number.h | 1 + core/math/stats/glm.cpp | 93 ++++++++++++++++++++++-------------- 2 files changed, 58 insertions(+), 36 deletions(-) diff --git a/core/math/condition_number.h b/core/math/condition_number.h index df02bc5c18..146f775a4b 100644 --- a/core/math/condition_number.h +++ b/core/math/condition_number.h @@ -28,6 +28,7 @@ namespace MR template inline default_type condition_number (const M& data) { + assert (data.rows() && data.cols()); auto v = Eigen::JacobiSVD (data).singularValues(); return v[0] / v[v.size()-1]; } diff --git a/core/math/stats/glm.cpp b/core/math/stats/glm.cpp index 76f2bbdab5..bb4c7ffaf1 100644 --- a/core/math/stats/glm.cpp +++ b/core/math/stats/glm.cpp @@ -214,7 +214,11 @@ namespace MR if (progress) ++*progress; #endif - stdev = (sse / value_type(design.rows()-Math::rank (design))).sqrt(); + const ssize_t dof = design.rows()-Math::rank (design); +#ifdef GLM_ALL_STATS_DEBUG + std::cerr << "Degrees of freedom: " << design.rows() << " - " << Math:rank (design) << " = " << dof << "\n"; +#endif + stdev = (sse / value_type(dof)).sqrt(); #ifdef GLM_ALL_STATS_DEBUG std::cerr << "stdev: " << stdev.size() << ", max " << stdev.maxCoeff() << "\n"; #else @@ -305,15 +309,12 @@ namespace MR if (valid_rows == data.rows()) { // No NaNs present condition_number = Math::condition_number (element_design); if (!std::isfinite (condition_number) || condition_number > 1e5) { - local_betas = matrix_type::Zero (global_betas.rows(), 1); - local_abs_effect_size = matrix_type::Zero (1, contrasts.size()); - local_std_effect_size = matrix_type::Zero (1, contrasts.size()); - local_stdev = vector_type::Zero (1); + zero(); } else { Math::Stats::GLM::all_stats (element_data, element_design, contrasts, local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); } - } else { + } else if (valid_rows >= element_design.cols()) { // Need to reduce the data and design matrices to contain only finite data matrix_type element_data_finite (valid_rows, 1); matrix_type element_design_finite (valid_rows, element_design.cols()); @@ -330,14 +331,13 @@ namespace MR assert (element_design_finite.allFinite()); condition_number = Math::condition_number (element_design_finite); if (!std::isfinite (condition_number) || condition_number > 1e5) { - local_betas = matrix_type::Zero (global_betas.rows(), 1); - local_abs_effect_size = matrix_type::Zero (1, contrasts.size()); - local_std_effect_size = matrix_type::Zero (1, contrasts.size()); - local_stdev = vector_type::Zero (1); + zero(); } else { Math::Stats::GLM::all_stats (element_data_finite, element_design_finite, contrasts, local_betas, local_abs_effect_size, local_std_effect_size, local_stdev); } + } else { // Insufficient data to fit model at all + zero(); } global_cond[element_index] = condition_number; global_betas.col (element_index) = local_betas; @@ -358,6 +358,13 @@ namespace MR vector_type& global_stdev; matrix_type local_betas, local_abs_effect_size, local_std_effect_size; vector_type local_stdev; + + void zero () { + local_betas = matrix_type::Zero (global_betas.rows(), 1); + local_abs_effect_size = matrix_type::Zero (1, contrasts.size()); + local_std_effect_size = matrix_type::Zero (1, contrasts.size()); + local_stdev = vector_type::Zero (1); + } }; Source source (measurements.cols()); @@ -582,7 +589,12 @@ namespace MR } } const size_t finite_count = element_mask.count(); - if (finite_count < num_factors()) { + // Additional rejection here: + // If the number of finite elemets is _not_ equal to the number of subjects + // (i.e. at least one subject has been removed), there needs to be a + // more stringent criterion met in order to proceed with the test. + // Let's do: DoF must be at least equal to the number of factors. + if (finite_count < num_subjects() && finite_count < 2 * num_factors()) { output.row (ie).setZero(); } else { @@ -642,6 +654,8 @@ namespace MR // Test condition number of NaN-masked & data-filled design matrix; // need to skip statistical testing if it is too poor + // TODO Condition number testing may be quite slow; + // would a rank calculation with tolerance be faster? const default_type condition_number = Math::condition_number (Mfull_masked); if (!std::isfinite (condition_number) || condition_number > 1e5) { output.row (ie).fill (0.0); @@ -656,33 +670,40 @@ namespace MR for (size_t ic = 0; ic != c.size(); ++ic) { const auto partition = c[ic].partition (Mfull_masked); - XtX.noalias() = partition.X.transpose()*partition.X; - - // Now that we have the individual contrast model partition for these data, - // the rest of this function should proceed similarly to the fixed - // design matrix case - //VAR (shuffling_matrix_masked.rows()); - //VAR (shuffling_matrix_masked.cols()); - //VAR (partition.Rz.rows()); - //VAR (partition.Rz.cols()); - //VAR (y_masked.rows()); - //VAR (y_masked.cols()); - Sy = shuffling_matrix_masked * partition.Rz * y_masked.matrix(); - lambda = pinvMfull_masked * Sy.matrix(); - beta.noalias() = c[ic].matrix() * lambda.matrix(); - const default_type sse = (Rm*Sy.matrix()).squaredNorm(); - - const default_type F = ((beta.transpose() * XtX * beta) (0, 0) / c[ic].rank()) / - (sse / value_type (finite_count - partition.rank_x - partition.rank_z)); - - if (!std::isfinite (F)) { + const ssize_t dof = finite_count - partition.rank_x - partition.rank_z; + if (dof < 1) { output (ie, ic) = value_type(0); - } else if (c[ic].is_F()) { - output (ie, ic) = F; } else { - assert (beta.rows() == 1); - output (ie, ic) = std::sqrt (F) * (beta.sum() > 0 ? 1.0 : -1.0); - } + + XtX.noalias() = partition.X.transpose()*partition.X; + + // Now that we have the individual contrast model partition for these data, + // the rest of this function should proceed similarly to the fixed + // design matrix case + //VAR (shuffling_matrix_masked.rows()); + //VAR (shuffling_matrix_masked.cols()); + //VAR (partition.Rz.rows()); + //VAR (partition.Rz.cols()); + //VAR (y_masked.rows()); + //VAR (y_masked.cols()); + Sy = shuffling_matrix_masked * partition.Rz * y_masked.matrix(); + lambda = pinvMfull_masked * Sy.matrix(); + beta.noalias() = c[ic].matrix() * lambda.matrix(); + const default_type sse = (Rm*Sy.matrix()).squaredNorm(); + + const default_type F = ((beta.transpose() * XtX * beta) (0, 0) / c[ic].rank()) / + (sse / value_type (dof)); + + if (!std::isfinite (F)) { + output (ie, ic) = value_type(0); + } else if (c[ic].is_F()) { + output (ie, ic) = F; + } else { + assert (beta.rows() == 1); + output (ie, ic) = std::sqrt (F) * (beta.sum() > 0 ? 1.0 : -1.0); + } + + } // End checking for sufficient degrees of freedom } // End looping over contrasts From 4e656cced95a6ea8113fe30694ba214f39e03c00 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 21 May 2018 10:21:04 +1000 Subject: [PATCH 0143/1471] run.command(): Explicitly flag when no error message If a failed command does not produce any output on stdout or stderr, rather than printing "Output of failed command:", explicitly state that the failed command did not provide any diagnostic information. --- lib/mrtrix3/run.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/mrtrix3/run.py b/lib/mrtrix3/run.py index 1218dbda9a..e930867ebd 100644 --- a/lib/mrtrix3/run.py +++ b/lib/mrtrix3/run.py @@ -215,11 +215,14 @@ def command(cmd, exitOnError=True): #pylint: disable=unused-variable filename = caller[1] lineno = caller[2] sys.stderr.write(script_name + ': ' + app.colourError + '[ERROR] Command failed: ' + cmd + app.colourClear + app.colourDebug + ' (' + os.path.basename(filename) + ':' + str(lineno) + ')' + app.colourClear + '\n') - sys.stderr.write(script_name + ': ' + app.colourConsole + 'Output of failed command:' + app.colourClear + '\n') - for line in error_text.splitlines(): - sys.stderr.write(' ' * (len(script_name)+2) + line + '\n') - app.console('') + if error_text: + sys.stderr.write(script_name + ': ' + app.colourConsole + 'Output of failed command:' + app.colourClear + '\n') + for line in error_text.splitlines(): + sys.stderr.write(' ' * (len(script_name)+2) + line + '\n') + else: + sys.stderr.write(script_name + ': ' + app.colourConsole + 'Failed command did not provide any diagnostic information' + app.colourClear + '\n') sys.stderr.flush() + app.console('') if app.tempDir: with open(os.path.join(app.tempDir, 'error.txt'), 'w') as outfile: outfile.write(cmd + '\n\n' + error_text + '\n') From 63f51aadb84ee4c685cd3c3cbd500f0b1f43e240 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Mon, 21 May 2018 10:22:22 +1000 Subject: [PATCH 0144/1471] 5ttcheck: Friendly "Everything OK" message on success --- cmd/5ttcheck.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/5ttcheck.cpp b/cmd/5ttcheck.cpp index 619cefce45..061e129e95 100644 --- a/cmd/5ttcheck.cpp +++ b/cmd/5ttcheck.cpp @@ -139,6 +139,8 @@ void run () } else { WARN ("Input image does not perfectly conform to 5TT format, but may still be applicable" + vox_option_suggestion); } + } else { + CONSOLE(std::string(argument.size() > 1 ? "All images" : "Input image") + " checked OK"); } } From 81c8167a3fe2534fa504828be729b27bf702aaf1 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 22 May 2018 12:03:44 +1000 Subject: [PATCH 0145/1471] build: Only use stderr Address issues with buffering of stdout contents as described in #1345. --- build | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/build b/build index f58f1aa35d..9935ee594b 100755 --- a/build +++ b/build @@ -195,7 +195,7 @@ def pipe_errors_to_less_handler(): def disp (msg): print_lock.acquire() logfile.write (msg.encode (errors='ignore')) - sys.stdout.write (msg) + sys.stderr.write (msg) print_lock.release() def log (msg): @@ -717,21 +717,21 @@ class Entry: def display (self, indent=''): show_rebuild = lambda x: x+' [REBUILD]' if todo[x].need_rebuild() else x - sys.stdout.write (indent + '[' + self.action + '] ' + show_rebuild (self.name) + ':\n') - sys.stdout.write (indent + ' timestamp: ' + str(self.timestamp)) + sys.stderr.write (indent + '[' + self.action + '] ' + show_rebuild (self.name) + ':\n') + sys.stderr.write (indent + ' timestamp: ' + str(self.timestamp)) if len(self.deps): - sys.stdout.write (', dep timestamp: ' + str(self.dep_timestamp) + ', diff: ' + str(self.timestamp-self.dep_timestamp)) - sys.stdout.write ('\n') + sys.stderr.write (', dep timestamp: ' + str(self.dep_timestamp) + ', diff: ' + str(self.timestamp-self.dep_timestamp)) + sys.stderr.write ('\n') if len(self.cmd): - sys.stdout.write (indent + ' command: ' + ' '.join(self.cmd) + '\n') + sys.stderr.write (indent + ' command: ' + ' '.join(self.cmd) + '\n') if len(self.deps): - sys.stdout.write (indent + ' deps: ') + sys.stderr.write (indent + ' deps: ') if dep_recursive: - sys.stdout.write ('\n') + sys.stderr.write ('\n') for x in self.deps: todo[x].display (indent + ' ') else: - sys.stdout.write ((indent+'\n ').join([ show_rebuild(x) for x in self.deps ]) + '\n') + sys.stderr.write ((indent+'\n ').join([ show_rebuild(x) for x in self.deps ]) + '\n') @@ -819,11 +819,11 @@ def execute (message, cmd, working_dir=None): def print_deps (current_file, indent=''): current_file = os.path.normpath (current_file) - sys.stdout.write (indent + current_file) + sys.stderr.write (indent + current_file) if current_file in file_flags: if len(file_flags[current_file]): - sys.stdout.write (' [' + file_flags[current_file] + ']') - sys.stdout.write (os.linesep) + sys.stderr.write (' [' + file_flags[current_file] + ']') + sys.stderr.write (os.linesep) if len(todo[current_file].deps): for entry in todo[current_file].deps: print_deps (entry, indent + ' ') From 14edc2110583bd1dbe9252d4e0914bce83afbd23 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 22 May 2018 15:22:28 +1000 Subject: [PATCH 0146/1471] mrhistmatch test: Relax tolerance on scale parameter --- testing/tests/mrhistmatch | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testing/tests/mrhistmatch b/testing/tests/mrhistmatch index 7a41d36158..928a650ce8 100644 --- a/testing/tests/mrhistmatch +++ b/testing/tests/mrhistmatch @@ -1,5 +1,5 @@ # Test "scale" mode -M=$(bc <<< "scale=9; $RANDOM/16384") && echo $M > tmp1.txt && mrcalc b0.nii.gz $M -mult tmp1.mif -force && mrhistmatch scale b0.nii.gz tmp1.mif tmp2.mif -force && mrinfo tmp2.mif -property mrhistmatch_scale > tmp2.txt && testing_diff_matrix tmp1.txt tmp2.txt -frac 1e-5 +M=$(bc <<< "scale=9; $RANDOM/16384") && echo $M > tmp1.txt && mrcalc b0.nii.gz $M -mult tmp1.mif -force && mrhistmatch scale b0.nii.gz tmp1.mif tmp2.mif -force && mrinfo tmp2.mif -property mrhistmatch_scale > tmp2.txt && testing_diff_matrix tmp1.txt tmp2.txt -frac 1e-4 # Test "linear" mode # Note: Use fractional tolerance for scale parameter, absolute tolerance for offset parameter -M=$(bc <<< "scale=9; $RANDOM/16384") && C=$(bc <<< "$RANDOM-16384") && echo $M > tmp1.txt && echo $C > tmp2.txt && mrcalc b0.nii.gz $M -mult $C -add tmp1.mif -force && mrhistmatch linear b0.nii.gz tmp1.mif tmp2.mif -force && mrinfo tmp2.mif -property mrhistmatch_scale > tmp3.txt && mrinfo tmp2.mif -property mrhistmatch_offset > tmp4.txt && testing_diff_matrix tmp1.txt tmp3.txt -frac 1e-5 && testing_diff_matrix tmp2.txt tmp4.txt -abs 0.5 +M=$(bc <<< "scale=9; $RANDOM/16384") && C=$(bc <<< "$RANDOM-16384") && echo $M > tmp1.txt && echo $C > tmp2.txt && mrcalc b0.nii.gz $M -mult $C -add tmp1.mif -force && mrhistmatch linear b0.nii.gz tmp1.mif tmp2.mif -force && mrinfo tmp2.mif -property mrhistmatch_scale > tmp3.txt && mrinfo tmp2.mif -property mrhistmatch_offset > tmp4.txt && testing_diff_matrix tmp1.txt tmp3.txt -frac 1e-4 && testing_diff_matrix tmp2.txt tmp4.txt -abs 0.5 From 0767d6dc19f1b21e15e3da79aa96483826b85afd Mon Sep 17 00:00:00 2001 From: J-Donald Tournier Date: Tue, 22 May 2018 09:47:56 +0100 Subject: [PATCH 0147/1471] strip() function now also strips null characters This should sort out dangling null characters in DICOM fields - see #1347 --- core/file/dicom/definitions.h | 10 +++++----- core/file/dicom/element.cpp | 28 +++++++++++++--------------- core/mrtrix.h | 8 ++++---- 3 files changed, 22 insertions(+), 24 deletions(-) diff --git a/core/file/dicom/definitions.h b/core/file/dicom/definitions.h index fd0a3ff074..965e404074 100644 --- a/core/file/dicom/definitions.h +++ b/core/file/dicom/definitions.h @@ -48,8 +48,8 @@ #define GROUP_BYTE_ORDER 0x0002U #define GROUP_BYTE_ORDER_SWAPPED 0x0200U -#define GROUP_SEQUENCE 0xFFFEU -#define GROUP_DATA 0x7FE0U +#define GROUP_SEQUENCE 0xFFFEU +#define GROUP_DATA 0x7FE0U #define ELEMENT_TRANSFER_SYNTAX_UID 0x0010U #define ELEMENT_SEQUENCE_ITEM 0xE000U @@ -64,7 +64,7 @@ namespace MR { inline std::string format_date (const std::string& date) { - if (date.empty() || date.size() < 8) + if (date.empty() || date.size() < 8) return date; return date.substr(6,2) + "/" + date.substr(4,2) + "/" + date.substr(0,4); } @@ -73,7 +73,7 @@ namespace MR { inline std::string format_time (const std::string& time) { - if (time.empty()) + if (time.empty()) return time; return time.substr(0,2) + ":" + time.substr(2,2) + ":" + time.substr(4,2); } @@ -82,7 +82,7 @@ namespace MR { inline std::string format_ID (const std::string& ID) { - if (ID.empty()) + if (ID.empty()) return ID; return "(" + ID + ")"; } diff --git a/core/file/dicom/element.cpp b/core/file/dicom/element.cpp index ed21308d07..57fb636be9 100644 --- a/core/file/dicom/element.cpp +++ b/core/file/dicom/element.cpp @@ -299,7 +299,7 @@ namespace MR { for (const uint8_t* p = data; p < data + size; p += sizeof (int16_t)) V.push_back (Raw::fetch_ (p, is_BE)); else if (VR == VR_IS) { - vector strings (split (std::string (reinterpret_cast (data), size), "\\", false)); + auto strings = split (std::string (reinterpret_cast (data), size), "\\", false); V.resize (strings.size()); for (size_t n = 0; n < V.size(); n++) V[n] = to (strings[n]); @@ -323,9 +323,10 @@ namespace MR { for (const uint8_t* p = data; p < data + size; p += sizeof (uint16_t)) V.push_back (Raw::fetch_ (p, is_BE)); else if (VR == VR_IS) { - vector strings (split (std::string (reinterpret_cast (data), size), "\\", false)); + auto strings = split (std::string (reinterpret_cast (data), size), "\\", false); V.resize (strings.size()); - for (size_t n = 0; n < V.size(); n++) V[n] = to (strings[n]); + for (size_t n = 0; n < V.size(); n++) + V[n] = to (strings[n]); } else report_unknown_tag_with_implicit_syntax(); @@ -344,7 +345,7 @@ namespace MR { for (const uint8_t* p = data; p < data + size; p += sizeof (float32)) V.push_back (Raw::fetch_ (p, is_BE)); else if (VR == VR_DS || VR == VR_IS) { - vector strings (split (std::string (reinterpret_cast (data), size), "\\", false)); + auto strings = split (std::string (reinterpret_cast (data), size), "\\", false); V.resize (strings.size()); for (size_t n = 0; n < V.size(); n++) V[n] = to (strings[n]); @@ -378,16 +379,13 @@ namespace MR { vector Element::get_string () const { - if (VR == VR_AT) { - vector strings; - strings.push_back (printf ("%04X %04X", Raw::fetch_ (data, is_BE), Raw::fetch_ (data+2, is_BE))); - return strings; - } + if (VR == VR_AT) + return { printf ("%04X %04X", Raw::fetch_ (data, is_BE), Raw::fetch_ (data+2, is_BE)) }; - vector strings (split (std::string (reinterpret_cast (data), size), "\\", false)); - for (vector::iterator i = strings.begin(); i != strings.end(); ++i) { - *i = strip (*i); - replace (*i, '^', ' '); + auto strings = split (std::string (reinterpret_cast (data), size), "\\", false); + for (auto& entry: strings) { + entry = strip (entry); + replace (entry, '^', ' '); } return strings; } @@ -398,8 +396,8 @@ namespace MR { template inline void print_vec (const vector& V) { - for (size_t n = 0; n < V.size(); n++) - fprintf (stdout, "%s ", str (V[n]).c_str()); + for (const auto& entry: V) + fprintf (stdout, "%s ", str(entry).c_str()); } } diff --git a/core/mrtrix.h b/core/mrtrix.h index 75760274e7..ac11c8278f 100644 --- a/core/mrtrix.h +++ b/core/mrtrix.h @@ -162,7 +162,7 @@ namespace MR } - inline std::string strip (const std::string& string, const char* ws = " \t\n", bool left = true, bool right = true) + inline std::string strip (const std::string& string, const std::string& ws = {" \0\t\n", 4}, bool left = true, bool right = true) { std::string::size_type start = (left ? string.find_first_not_of (ws) : 0); if (start == std::string::npos) @@ -175,15 +175,15 @@ namespace MR inline void replace (std::string& string, char orig, char final) { - for (std::string::iterator i = string.begin(); i != string.end(); ++i) - if (*i == orig) *i = final; + for (auto& c: string) + if (c == orig) c = final; } inline void replace (std::string& str, const std::string& from, const std::string& to) { if (from.empty()) return; size_t start_pos = 0; - while((start_pos = str.find(from, start_pos)) != std::string::npos) { + while ((start_pos = str.find (from, start_pos)) != std::string::npos) { str.replace (start_pos, from.length(), to); start_pos += to.length(); } From 3cd40e69672c956e69e64c7a23ecd9c25ab37415 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 22 May 2018 22:48:07 +1000 Subject: [PATCH 0148/1471] mesh2voxel: Improve filling step Use an improved algorithm for determining and classifying voxels that are near the mesh but are not intersected by it. --- src/surface/algo/mesh2image.cpp | 85 +++++++++++++++++---------------- 1 file changed, 44 insertions(+), 41 deletions(-) diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 75d2bf495c..f26348b334 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -44,7 +44,7 @@ namespace MR // For initial segmentation of mesh - identify voxels on the mesh, inside & outside enum vox_mesh_t { UNDEFINED, ON_MESH, PRELIM_OUTSIDE, PRELIM_INSIDE, FILL_TEMP, OUTSIDE, INSIDE }; - ProgressBar progress ("converting mesh to partial volume image", 7); + ProgressBar progress ("converting mesh to partial volume image", 8); // For speed, want the vertex data to be in voxel positions Filter::VertexTransform transform (image); @@ -76,6 +76,8 @@ namespace MR // Stores a flag for each voxel as encoded in enum vox_mesh_t Header H (image); auto init_seg = Image::scratch (H); + for (auto l = Loop(init_seg) (init_seg); l; ++l) + init_seg.value() = vox_mesh_t::UNDEFINED; // For every voxel, stores those polygons that may intersect the voxel using Vox2Poly = std::map< Vox, vector >; @@ -196,7 +198,7 @@ namespace MR } else { // Only call this once each voxel, regardless of the number of intersecting polygons assign_pos_of (voxel).to (init_seg); - init_seg.value() = ON_MESH; + init_seg.value() = vox_mesh_t::ON_MESH; } this_voxel_polys.push_back (poly_index); voxel2poly.insert (std::make_pair (voxel, this_voxel_polys)); @@ -206,46 +208,38 @@ namespace MR ++progress; - // TODO Better implementation here // For *any* voxel not on the mesh but neighbouring a voxel in which a vertex lies, - // track a floating-point value corresponding to its distance from the normal plane. - // Each voxel not on the mesh should then be assigned as prelim_inside or prelim_outside + // track a floating-point value corresponding to its distance from the plane defined + // by the normal at the vertex. + // Each voxel not directly on the mesh should then be assigned as prelim_inside or prelim_outside // depending on whether the summed value is positive or negative - - - - // New implementation of filling in the centre of the mesh - // Rather than selecting the eight external corners and filling in outside the - // mesh (which may omit some areas), selecting anything remaining as 'inside', - // fill inwards from vertices according to their normals, and select anything - // remaining as 'outside'. - std::stack to_expand; + auto sum_distances = Image::scratch (H, "Sum of distances from polygon planes"); + Vox adj_voxel; for (size_t i = 0; i != mesh.num_vertices(); ++i) { - const Vox voxel (mesh.vert (i)); - Eigen::Vector3 normal (mesh.norm (i)); - // Scale the normal such that the maximum length along any individual axis is 1.0 (but may be negative) - normal /= normal.array().abs().maxCoeff(); - // Use this to select an adjacent voxel outside the structure (based on the - const Vox outside_neighbour (voxel + Vox(normal)); - // Add this to the set of exterior voxels to be expanded if appropriate - assign_pos_of (outside_neighbour).to (init_seg); - if (!is_out_of_bounds (init_seg)) { - if (init_seg.value() == vox_mesh_t::UNDEFINED) { - init_seg.value() = vox_mesh_t::PRELIM_OUTSIDE; - //to_expand.push (outside_neighbour); - } - } - // Now do the same for inside the structure - const Vox inside_neighbour (voxel - Vox(normal)); - assign_pos_of (inside_neighbour).to (init_seg); - if (!is_out_of_bounds (init_seg)) { - if (init_seg.value() == vox_mesh_t::UNDEFINED) { - init_seg.value() = vox_mesh_t::PRELIM_INSIDE; - //to_expand.push (inside_neighbour); + const Vox centre_voxel (mesh.vert(i)); + for (adj_voxel[2] = centre_voxel[2]-1; adj_voxel[2] <= centre_voxel[2]+1; ++adj_voxel[2]) { + for (adj_voxel[1] = centre_voxel[1]-1; adj_voxel[1] <= centre_voxel[1]+1; ++adj_voxel[1]) { + for (adj_voxel[0] = centre_voxel[0]-1; adj_voxel[0] <= centre_voxel[0]+1; ++adj_voxel[0]) { + if (!is_out_of_bounds (H, adj_voxel) && (adj_voxel - centre_voxel).any()) { + const Eigen::Vector3 offset (adj_voxel.cast().matrix() - mesh.vert(i)); + const default_type dp_normal = offset.dot (mesh.norm(i)); + const default_type offset_on_plane = (offset - (mesh.norm(i) * dp_normal)).norm(); + assign_pos_of (adj_voxel).to (sum_distances); + // If offset_on_plane is close to zero, this vertex should contribute strongly toward + // the sum of distances from the surface within this voxel + sum_distances.value() += (1.0 / (1.0 + offset_on_plane)) * dp_normal; + } + } } } } ++progress; + for (auto l = Loop(init_seg) (init_seg, sum_distances); l; ++l) { + if (static_cast (sum_distances.value()) != 0.0f && init_seg.value() != vox_mesh_t::ON_MESH) + init_seg.value() = sum_distances.value() < 0.0 ? vox_mesh_t::PRELIM_INSIDE : vox_mesh_t::PRELIM_OUTSIDE; + } + ++progress; + // Can't guarantee that mesh might have a single isolated polygon pointing the wrong way // Therefore, need to: @@ -254,6 +248,7 @@ namespace MR // - For the final region selection, assign values to voxels based on a majority vote Image seed (init_seg); vector to_fill; + std::stack to_expand; for (auto l = Loop(seed) (seed); l; ++l) { if (seed.value() == vox_mesh_t::PRELIM_INSIDE || seed.value() == vox_mesh_t::PRELIM_OUTSIDE) { size_t prelim_inside_count = 0, prelim_outside_count = 0; @@ -370,7 +365,8 @@ namespace MR Vertex p (*i_p); p += Eigen::Vector3 (voxel[0], voxel[1], voxel[2]); - default_type best_min_edge_distance = -std::numeric_limits::infinity(); + default_type best_min_edge_distance_on_plane = -std::numeric_limits::infinity(); + //default_type best_interior_distance_from_plane = std::numeric_limits::infinity(); bool best_result_inside = false; // Only test against those polygons that are near this voxel @@ -381,7 +377,14 @@ namespace MR VertexList v; bool is_inside = false; - default_type min_edge_distance = std::numeric_limits::infinity(); + default_type min_edge_distance_on_plane = std::numeric_limits::infinity(); + + // FIXME + // If point does not lie within projection of polygon, compute the + // distance of the point projected onto the plane to the nearest edge of that polygon; + // use this distance to decide which polygon classifies the point + // If point does lie within projection of polygon (potentially more than one), then the + // polygon to which the distance from the plane is minimal classifies the point if (polygon_num_vertices == 3) { @@ -402,7 +405,7 @@ namespace MR edge_distances[0] = (p_on_plane-v[0]).dot (zero); edge_distances[1] = (p_on_plane-v[2]).dot (one); edge_distances[2] = (p_on_plane-v[1]).dot (two); - min_edge_distance = std::min (edge_distances[0], std::min (edge_distances[1], edge_distances[2])); + min_edge_distance_on_plane = std::min ( { edge_distances[0], edge_distances[1], edge_distances[2] } ); } else { @@ -438,14 +441,14 @@ namespace MR // Now, how far away is the point within the plane from this edge? const default_type this_edge_distance = (p_on_plane - p1).dot (edge_normal); - min_edge_distance = std::min (min_edge_distance, this_edge_distance); + min_edge_distance_on_plane = std::min (min_edge_distance_on_plane, this_edge_distance); } } - if (min_edge_distance > best_min_edge_distance) { - best_min_edge_distance = min_edge_distance; + if (min_edge_distance_on_plane > best_min_edge_distance_on_plane) { + best_min_edge_distance_on_plane = min_edge_distance_on_plane; best_result_inside = is_inside; } From a9029946874b6883dc9716aca13b527f26768616 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 25 May 2018 11:29:02 +1000 Subject: [PATCH 0149/1471] fixel2voxel: Fix dimensionality of output image When the operator chosen to map fixel data to a voxel grid results in a single scalar value per voxel, the output image should logically be a 3D image. However, due to use of the fixel format index image as the template, many such images come out as 4D. This change explicitly sets the output image dimensionality for each choice of operator. --- cmd/fixel2voxel.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/fixel2voxel.cpp b/cmd/fixel2voxel.cpp index fcd3b3fa3f..53ec97b21b 100644 --- a/cmd/fixel2voxel.cpp +++ b/cmd/fixel2voxel.cpp @@ -524,6 +524,7 @@ void run () H_out.datatype().set_byte_order_native(); H_out.keyval().erase (Fixel::n_fixels_key); if (op == 7) { // count + H_out.ndim() = 3; H_out.datatype() = DataType::UInt8; } else if (op == 10 || op == 11) { // dec H_out.ndim() = 4; @@ -542,6 +543,8 @@ void run () // 3 volumes per fixel if performing split_dir H_out.size(3) = (op == 13) ? (3 * max_count) : max_count; } + } else { + H_out.ndim() = 3; } if (op == 10 || op == 11 || op == 13) // dec or split_dir From b69164c9d3e7041e86fa3dc270592d2d2c58456a Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 25 May 2018 15:54:38 +1000 Subject: [PATCH 0150/1471] 5ttgen: Fix printing output of 5ttcheck --- bin/5ttgen | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/5ttgen b/bin/5ttgen index 3414b1c889..a148978b14 100755 --- a/bin/5ttgen +++ b/bin/5ttgen @@ -49,7 +49,7 @@ alg.execute() stderr = run.command('5ttcheck result.mif')[1] if stderr: app.warn('Generated image does not perfectly conform to 5TT format:') - for line in stderr: + for line in stderr.splitlines(): app.warn(line) run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else '')) From 805641811b958051c8dde13fe8602fe5c868a99e Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 25 May 2018 18:36:45 +1000 Subject: [PATCH 0151/1471] GLM code: Prevent compiler warning warning: delete called on non-final 'SubjectConnectomeImport' that has virtual functions but non-virtual destructor [-Wdelete-non-virtual-dtor] --- core/math/stats/import.h | 1 + 1 file changed, 1 insertion(+) diff --git a/core/math/stats/import.h b/core/math/stats/import.h index 09306849cc..d438977d2e 100644 --- a/core/math/stats/import.h +++ b/core/math/stats/import.h @@ -51,6 +51,7 @@ namespace MR public: SubjectDataImportBase (const std::string& path) : path (path) { } + virtual ~SubjectDataImportBase() { } /*! * @param row the row of a matrix into which the data from this From 89687167139493ca6d9d41a7a330b354bdfb624f Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 5 Jun 2018 16:46:40 +1000 Subject: [PATCH 0152/1471] dwipreproc: Catch --slspec mis-use If a user attempts to provide slice timing information to eddy via the --slspec option, instead of the slice indices for acquisition groups, then parse the contents of this file as slice timing; but give the user a slap on the wrist while doing so. Inspired by #1360. --- bin/dwipreproc | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/bin/dwipreproc b/bin/dwipreproc index f0840ed934..2943303508 100755 --- a/bin/dwipreproc +++ b/bin/dwipreproc @@ -213,12 +213,26 @@ if eddy_mporder: if os.path.isfile(slspec_file_path): # Since there's a chance that we may need to pad this info, we can't just copy this file # to the temporary directory... - with open(slspec_file_path, 'r') as f: - for line in f: - line = line.strip() - if line: - slice_groups.append([int(value) for value in line.split()]) - app.var(slice_groups) + try: + with open(slspec_file_path, 'r') as f: + for line in f: + line = line.strip() + if line: + slice_groups.append([int(value) for value in line.split()]) + app.var(slice_groups) + except ValueError: + try: + with open(slspec_file_path, 'r') as f: + for line in f: + line = line.strip() + if line: + slice_timing.append(float(line)) + app.warning('\"slspec\" file provided to FSL eddy should contain slice indices for slice groups, not slice timing; nevertheless, slice timing has been imported from file \"' + slspec_file_path + '\"') + app.var(slice_timing) + if len(slice_timing) != dwi_num_slices: + app.error('Cannot use slice timing information from file \"' + slspec_file_path + '\" for slice-to-volume correction: Number of entries (' + len(slice_timing) + ') does not match number of slices (' + dwi_header.size()[2] + ')') + except ValueError: + app.error('Error parsing \"slspec\" file (this should contain integer values indicating slice groups, not slice timing; please see FSL eddy help page)') # Remove this entry from eddy_manual_options; it'll be inserted later, with the # path to the new slspec file eddy_manual_options = [ s for s in eddy_manual_options if not s.startswith('--slspec') ] From e93de1e59bd4fa74aa231b9bc7fbf8d27b17fafd Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 5 Jun 2018 17:55:41 +1000 Subject: [PATCH 0153/1471] dwipreproc: Fix app.warn() call --- bin/dwipreproc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/dwipreproc b/bin/dwipreproc index 2943303508..60ba0fcae6 100755 --- a/bin/dwipreproc +++ b/bin/dwipreproc @@ -227,7 +227,7 @@ if eddy_mporder: line = line.strip() if line: slice_timing.append(float(line)) - app.warning('\"slspec\" file provided to FSL eddy should contain slice indices for slice groups, not slice timing; nevertheless, slice timing has been imported from file \"' + slspec_file_path + '\"') + app.warn('\"slspec\" file provided to FSL eddy should contain slice indices for slice groups, not slice timing; nevertheless, slice timing has been imported from file \"' + slspec_file_path + '\"') app.var(slice_timing) if len(slice_timing) != dwi_num_slices: app.error('Cannot use slice timing information from file \"' + slspec_file_path + '\" for slice-to-volume correction: Number of entries (' + len(slice_timing) + ') does not match number of slices (' + dwi_header.size()[2] + ')') From f7e9141007e0c882cfe9e7020fe29986086794a3 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 3 Jul 2018 18:50:38 +1000 Subject: [PATCH 0154/1471] New commands: mrcentre, labelcentre These commands calculate the centre of mass of either an image (which can be masked, and for which the underlying image intensities will be taken into account), or a parcellation label image (in which case the centre of mass of each parcel is reported). Closes #1370. --- cmd/labelcentre.cpp | 78 +++++++++++++++++++++++ cmd/mrcentre.cpp | 82 +++++++++++++++++++++++++ docs/reference/commands/labelcentre.rst | 59 ++++++++++++++++++ docs/reference/commands/mrcentre.rst | 61 ++++++++++++++++++ docs/reference/commands_list.rst | 4 ++ 5 files changed, 284 insertions(+) create mode 100644 cmd/labelcentre.cpp create mode 100644 cmd/mrcentre.cpp create mode 100644 docs/reference/commands/labelcentre.rst create mode 100644 docs/reference/commands/mrcentre.rst diff --git a/cmd/labelcentre.cpp b/cmd/labelcentre.cpp new file mode 100644 index 0000000000..d2af7908e5 --- /dev/null +++ b/cmd/labelcentre.cpp @@ -0,0 +1,78 @@ +/* Copyright (c) 2008-2018 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "command.h" +#include "header.h" +#include "image.h" +#include "image_helpers.h" +#include "transform.h" +#include "types.h" + +#include "connectome/connectome.h" + +using namespace MR; +using namespace App; + + +const char* space_options[] = { "scanner", "voxel", nullptr }; + + +void usage () +{ + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Determine the centre of mass / centre of gravity of each parcel within a label image"; + + ARGUMENTS + + Argument ("input", "the input label image").type_image_in() + + Argument ("space", "the coordinate space in which to provide the centres (options are: " + join (space_options, ",") + ")").type_choice (space_options); + +} + + +using Connectome::node_t; +using vector_type = Eigen::Array; +using matrix_type = Eigen::Matrix; + + +void run () +{ + Header H = Header::open (argument[0]); + if (H.ndim() > 3) + throw Exception ("Command does not accept images with more than 3 dimensions"); + Connectome::check (H); + Image image = H.get_image(); + + matrix_type coms; + vector_type masses; + + for (auto l = Loop(image) (image); l; ++l) { + const node_t value = image.value(); + if (value) { + if (value > coms.rows()) { + coms.conservativeResizeLike (matrix_type::Zero (value, 3)); + masses.conservativeResizeLike (vector_type::Zero (value)); + } + coms.row(value-1) += Eigen::Vector3 (image.index(0), image.index(1), image.index(2)); + masses[value-1]++; + } + } + + coms = coms.array().colwise() / masses; + + if (!int(argument[1])) + coms = (image.transform() * coms.transpose()).transpose(); + + std::cout << coms; +} \ No newline at end of file diff --git a/cmd/mrcentre.cpp b/cmd/mrcentre.cpp new file mode 100644 index 0000000000..7aa805ac4c --- /dev/null +++ b/cmd/mrcentre.cpp @@ -0,0 +1,82 @@ +/* Copyright (c) 2008-2018 the MRtrix3 contributors. + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at http://mozilla.org/MPL/2.0/. + * + * MRtrix is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * For more details, see http://www.mrtrix.org/. + */ + + +#include "command.h" +#include "image.h" +#include "image_helpers.h" +#include "transform.h" +#include "types.h" + +using namespace MR; +using namespace App; + + +const char* space_options[] = { "scanner", "voxel", nullptr }; + + +void usage () +{ + AUTHOR = "Robert E. Smith (robert.smith@florey.edu.au)"; + + SYNOPSIS = "Determine the centre of mass / centre of gravity of an image"; + + ARGUMENTS + + Argument ("input", "the input image").type_image_in() + + Argument ("space", "the coordinate space in which to provide the centre (options are: " + join (space_options, ",") + ")").type_choice (space_options); + + OPTIONS + + Option ("mask", "only include voxels within a mask in the calculation") + + Argument ("image").type_image_in(); + +} + + +typedef float value_type; + + +void run () +{ + Image image = Image::open (argument[0]); + if (image.ndim() > 3) + throw Exception ("Command does not accept images with more than 3 dimensions"); + + Image mask; + auto opt = get_options ("mask"); + if (opt.size()) { + mask = Image::open (opt[0][0]); + check_dimensions (image, mask); + } + + Eigen::Vector3 com (0.0, 0.0, 0.0); + default_type mass = 0.0; + if (mask.valid()) { + for (auto l = Loop(image) (image, mask); l; ++l) { + if (mask.value()) { + com += Eigen::Vector3 (image.index(0), image.index(1), image.index(2)) * image.value(); + mass += image.value(); + } + } + } else { + for (auto l = Loop(image) (image); l; ++l) { + com += Eigen::Vector3 (image.index(0), image.index(1), image.index(2)) * image.value(); + mass += image.value(); + } + } + + com /= mass; + if (!int(argument[1])) + com = image.transform() * com; + + std::cout << com.transpose(); +} \ No newline at end of file diff --git a/docs/reference/commands/labelcentre.rst b/docs/reference/commands/labelcentre.rst new file mode 100644 index 0000000000..cfbae07d7e --- /dev/null +++ b/docs/reference/commands/labelcentre.rst @@ -0,0 +1,59 @@ +.. _labelcentre: + +labelcentre +=================== + +Synopsis +-------- + +Determine the centre of mass / centre of gravity of each parcel within a label image + +Usage +-------- + +:: + + labelcentre [ options ] input space + +- *input*: the input label image +- *space*: the coordinate space in which to provide the centres (options are: scanner,voxel) + +Options +------- + +Standard options +^^^^^^^^^^^^^^^^ + +- **-info** display information messages. + +- **-quiet** do not display information messages or progress status. Alternatively, this can be achieved by setting the MRTRIX_QUIET environment variable to a non-empty string. + +- **-debug** display debugging messages. + +- **-force** force overwrite of output files. Caution: Using the same file as input and output might cause unexpected behaviour. + +- **-nthreads number** use this number of threads in multi-threaded applications (set to 0 to disable multi-threading). + +- **-help** display this information page and exit. + +- **-version** display version information and exit. + +-------------- + + + +**Author:** Robert E. Smith (robert.smith@florey.edu.au) + +**Copyright:** Copyright (c) 2008-2018 the MRtrix3 contributors. + +This Source Code Form is subject to the terms of the Mozilla Public +License, v. 2.0. If a copy of the MPL was not distributed with this +file, you can obtain one at http://mozilla.org/MPL/2.0/ + +MRtrix3 is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty +of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +For more details, see http://www.mrtrix.org/ + + diff --git a/docs/reference/commands/mrcentre.rst b/docs/reference/commands/mrcentre.rst new file mode 100644 index 0000000000..47009b3053 --- /dev/null +++ b/docs/reference/commands/mrcentre.rst @@ -0,0 +1,61 @@ +.. _mrcentre: + +mrcentre +=================== + +Synopsis +-------- + +Determine the centre of mass / centre of gravity of an image + +Usage +-------- + +:: + + mrcentre [ options ] input space + +- *input*: the input image +- *space*: the coordinate space in which to provide the centre (options are: scanner,voxel) + +Options +------- + +- **-mask image** only include voxels within a mask in the calculation + +Standard options +^^^^^^^^^^^^^^^^ + +- **-info** display information messages. + +- **-quiet** do not display information messages or progress status. Alternatively, this can be achieved by setting the MRTRIX_QUIET environment variable to a non-empty string. + +- **-debug** display debugging messages. + +- **-force** force overwrite of output files. Caution: Using the same file as input and output might cause unexpected behaviour. + +- **-nthreads number** use this number of threads in multi-threaded applications (set to 0 to disable multi-threading). + +- **-help** display this information page and exit. + +- **-version** display version information and exit. + +-------------- + + + +**Author:** Robert E. Smith (robert.smith@florey.edu.au) + +**Copyright:** Copyright (c) 2008-2018 the MRtrix3 contributors. + +This Source Code Form is subject to the terms of the Mozilla Public +License, v. 2.0. If a copy of the MPL was not distributed with this +file, you can obtain one at http://mozilla.org/MPL/2.0/ + +MRtrix3 is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty +of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +For more details, see http://www.mrtrix.org/ + + diff --git a/docs/reference/commands_list.rst b/docs/reference/commands_list.rst index 51a80b89a7..4fde072fd7 100644 --- a/docs/reference/commands_list.rst +++ b/docs/reference/commands_list.rst @@ -47,6 +47,7 @@ List of MRtrix3 commands commands/fod2fixel commands/label2colour commands/label2mesh + commands/labelcentre commands/labelconvert commands/maskdump commands/maskfilter @@ -56,6 +57,7 @@ List of MRtrix3 commands commands/mraverageheader commands/mrcalc commands/mrcat + commands/mrcentre commands/mrcheckerboardmask commands/mrclusterstats commands/mrconvert @@ -159,6 +161,7 @@ List of MRtrix3 commands :ref:`fod2fixel`, "Perform segmentation of continuous Fibre Orientation Distributions (FODs) to produce discrete fixels" :ref:`label2colour`, "Convert a parcellated image (where values are node indices) into a colour image" :ref:`label2mesh`, "Generate meshes from a label image" + :ref:`labelcentre`, "Determine the centre of mass / centre of gravity of each parcel within a label image" :ref:`labelconvert`, "Convert a connectome node image from one lookup table to another" :ref:`maskdump`, "Print out the locations of all non-zero voxels in a mask image" :ref:`maskfilter`, "Perform filtering operations on 3D / 4D mask images" @@ -168,6 +171,7 @@ List of MRtrix3 commands :ref:`mraverageheader`, "Calculate the average (unbiased) coordinate space of all input images" :ref:`mrcalc`, "Apply generic voxel-wise mathematical operations to images" :ref:`mrcat`, "Concatenate several images into one" + :ref:`mrcentre`, "Determine the centre of mass / centre of gravity of an image" :ref:`mrcheckerboardmask`, "Create bitwise checkerboard image" :ref:`mrclusterstats`, "Voxel-based analysis using permutation testing and threshold-free cluster enhancement" :ref:`mrconvert`, "Perform conversion between different file types and optionally extract a subset of the input image" From 874c73106062b2c6ed9a0825b5ea49eab1b63c09 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 3 Jul 2018 20:15:12 +1000 Subject: [PATCH 0155/1471] Docs: New page on handling external modules Additionally moved instructions related to the handling of external modules specific to Windows from the Windows installation page to this new page. --- docs/index.rst | 1 + docs/installation/windows_install.rst | 34 ----------- docs/tips_and_tricks/external_modules.rst | 69 +++++++++++++++++++++++ 3 files changed, 70 insertions(+), 34 deletions(-) create mode 100644 docs/tips_and_tricks/external_modules.rst diff --git a/docs/index.rst b/docs/index.rst index 53a64f95ec..574e2eaed6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -95,6 +95,7 @@ These applications have been written from scratch in C++, using the functionalit tips_and_tricks/dicom_handling tips_and_tricks/batch_processing_with_foreach + tips_and_tricks/external_modules .. toctree:: :maxdepth: 1 diff --git a/docs/installation/windows_install.rst b/docs/installation/windows_install.rst index b20083130e..2ff76d3113 100644 --- a/docs/installation/windows_install.rst +++ b/docs/installation/windows_install.rst @@ -169,37 +169,3 @@ Keeping *MRtrix3* up to date and re-run step 1 again. -Compiling external projects with ``msys2`` ------------------------------------------- - -In ``msys2``, the ``ln -s`` command actually creates a *copy* of the -target, *not* a symbolic link. By doing so, the build script is unable -to identify the location of the MRtrix libraries when trying to compile -an external module. - -The simplest way around this is simply to invoke the build script of the main -*MRtrix3* install directly. For example, if compiling an external project called -``myproject``, residing in a folder alongside the main ``mrtrix3`` folder, the -build script can be invoked with:: - - # current working directory is 'myproject': - ../mrtrix3/build - -If you really want a symbolic link, one solution is to use a standard Windows -command prompt, with Administrator privileges: In the file explorer, go to -``C:\Windows\system32``, locate the file ``cmd.exe``, right-click and -select 'Run as administrator'. Within this prompt, use the ``mklink`` -command (note that the argument order passed to ``mklink`` is reversed -with respect to ``ln -s``; i.e. provide the location of the link, *then* -the target). Make sure that you provide the *full path* to both link and -target, e.g.:: - - mklink C:\msys64\home\username\src\my_project\build C:\msys64\home\username\src\MRtrix3\build - -and ``msys64`` should be able to interpret the softlink path correctly -(confirm with ``ls -la``). - -I have also found recently that the build script will not correctly detect use -of a softlink for compiling an external project when run under Python2, so -Python3 must be used explicitly. - diff --git a/docs/tips_and_tricks/external_modules.rst b/docs/tips_and_tricks/external_modules.rst new file mode 100644 index 0000000000..ab6e859a91 --- /dev/null +++ b/docs/tips_and_tricks/external_modules.rst @@ -0,0 +1,69 @@ +.. _external_modules: + +External modules +================ + +The *MRtrix3* build process allows for the easy development of separate modules, +compiled against the *MRtrix3* core (or indeed against any other *MRtrix3* module). +This allows developers to maintain their own repository, or compile stand-alone +commands provided by developers / other users, without affecting their core *MRtrix3* +installation. The obvious benefit is that developers can keep their own developments +private if they wish to, and the *MRtrix3* core can be kept as lean as possible. + +A module simply consists of a separate directory, which contains its own ``cmd/`` +folder, and potentially also its own ``src/`` folder if required. The build process +is then almost identical to that for the *MRtrix3* core, with a few differences. + +The most relevant difference is how the build script is invoked. For a module, +compilation is started by invoking the *MRtrix3* core's ``build`` script, but with +the module's top-level folder being the current working directory. For example, if +the *MRtrix3* core resides in the directory ``~/src/mrtrix/core``, and the module +resides in ``~/src/mrtrix/mymodule``, then the module can be compiled by typing:: + + $ cd ~/src/mrtrix/mymodule + $ ../core/build + +For routine use, it is more convenient to set up a symbolic link pointing to the +*MRtrix3* core's build script, and invoke that:: + + $ cd ~/src/mrtrix/mymodule + $ ln -s ../core/build + $ ./build + +Regardless of which technique is used to invoke the build script, there should now +be compiled binaries present in the newly-created directory +``~/src/mrtrix/mymodule/bin/``. You can then invoke such commands either by providing +the full path to the executable file, or by adding the location of the module's ``bin/`` +directory to your ``PATH`` environment variable. + + + + +Note for Windows users +---------------------- + +In ``msys2``, the ``ln -s`` command actually creates a *copy* of the +target, *not* a symbolic link. By doing so, the build script is unable +to identify the location of the *MRtrix3* core libraries when trying to compile +an external module. + +The simplest way around this is simply to invoke the build script of the main +*MRtrix3* install directly, as shown in the first example above. + +If you *really* want a symbolic link, one solution is to use a standard Windows +command prompt, with Administrator privileges: In the file explorer, go to +``C:\Windows\system32``, locate the file ``cmd.exe``, right-click and +select 'Run as administrator'. Within this prompt, use the ``mklink`` +command (note that the argument order passed to ``mklink`` is reversed +with respect to ``ln -s``; i.e. provide the location of the link, *then* +the target). Make sure that you provide the *full path* to both link and +target, e.g.:: + + mklink C:\msys64\home\username\src\mrtrix\mymodule\build C:\msys64\home\username\src\mrtrix\core\build + +and ``msys64`` should then be able to interpret the softlink path correctly +(confirm with ``ls -la``). + +I have also found recently that the build script will not correctly detect use +of a softlink for compiling an external project when run under Python2, so +Python3 must be used explicitly. \ No newline at end of file From fbdf6b8a4ca9f053d17f46cc85d6c7e453f754ae Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 4 Jul 2018 18:06:55 +1000 Subject: [PATCH 0156/1471] Scripts: Tweak message on command failure Users seem to continue reporting failure of commands executed within a wrapping script as though they are failures of the script itself. Maybe a minor rewording of how the error message is presented will reinforce the fact that in many circumstances, such commands are responsible for their own failure, rather than the script providing the command with bad data resulting in a failure. --- lib/mrtrix3/app.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/mrtrix3/app.py b/lib/mrtrix3/app.py index 1555c7708c..42781e148a 100644 --- a/lib/mrtrix3/app.py +++ b/lib/mrtrix3/app.py @@ -270,7 +270,8 @@ def complete(): #pylint: disable=unused-variable # This needs to be printed even if the -quiet option is used if os.path.isfile(os.path.join(tempDir, 'error.txt')): with open(os.path.join(tempDir, 'error.txt'), 'r') as errortext: - sys.stderr.write(os.path.basename(sys.argv[0]) + ': ' + colourWarn + 'Script failed while executing the command: ' + errortext.readline().rstrip() + colourClear + '\n') + sys.stderr.write(os.path.basename(sys.argv[0]) + ': ' + colourWarn + 'Following command failed during execution of the script:' + colourClear + '\n') + sys.stderr.write(os.path.basename(sys.argv[0]) + ': ' + colourWarn + errortext.readline().rstrip() + colourClear + '\n') sys.stderr.write(os.path.basename(sys.argv[0]) + ': ' + colourWarn + 'For debugging, inspect contents of temporary directory: ' + tempDir + colourClear + '\n') else: sys.stderr.write(os.path.basename(sys.argv[0]) + ': ' + colourConsole + 'Contents of temporary directory kept, location: ' + tempDir + colourClear + '\n') From cfc9d1951c225b230cd0d51e2b8833f4d5aca184 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 4 Jul 2018 21:10:43 +1000 Subject: [PATCH 0157/1471] mesh2voxel: Tweak to partial volume estimation In some cases, a surface may intersect a single voxel at two locations that are a long distance from one another on the surface. When this happens, the directions of the normals of those polygons may be almost directly opposing. In this case, one does not want to choose whether or not a particular point is inside or outside the surface based on the polygon to which the point most accurately projects to the inside of the polygon; instead, if the point projects to the inside of more than one polygon, the polygon to which the point is geometrically closest should be used to determine whether the point is inside or outside the surface. --- src/surface/algo/mesh2image.cpp | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index f26348b334..69235cfaa0 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -366,8 +366,8 @@ namespace MR p += Eigen::Vector3 (voxel[0], voxel[1], voxel[2]); default_type best_min_edge_distance_on_plane = -std::numeric_limits::infinity(); - //default_type best_interior_distance_from_plane = std::numeric_limits::infinity(); bool best_result_inside = false; + default_type best_min_distance_from_interior_projection = std::numeric_limits::infinity(); // Only test against those polygons that are near this voxel for (vector::const_iterator polygon_index = in.second.begin(); polygon_index != in.second.end(); ++polygon_index) { @@ -378,6 +378,7 @@ namespace MR bool is_inside = false; default_type min_edge_distance_on_plane = std::numeric_limits::infinity(); + default_type distance_from_plane = 0.0; // FIXME // If point does not lie within projection of polygon, compute the @@ -393,17 +394,18 @@ namespace MR // First: is it aligned with the normal? const Vertex poly_centre ((v[0] + v[1] + v[2]) * (1.0/3.0)); const Vertex diff (p - poly_centre); - is_inside = (diff.dot (n) <= 0.0); + distance_from_plane = diff.dot (n); + is_inside = (distance_from_plane <= 0.0); // Second: how well does it project onto this polygon? const Vertex p_on_plane (p - (n * (diff.dot (n)))); std::array edge_distances; - Vertex zero = (v[2]-v[0]).cross (n); zero.normalize(); - Vertex one = (v[1]-v[2]).cross (n); one .normalize(); + Vertex zero = (v[1]-v[2]).cross (n); zero.normalize(); + Vertex one = (v[2]-v[0]).cross (n); one .normalize(); Vertex two = (v[0]-v[1]).cross (n); two .normalize(); - edge_distances[0] = (p_on_plane-v[0]).dot (zero); - edge_distances[1] = (p_on_plane-v[2]).dot (one); + edge_distances[0] = (p_on_plane-v[2]).dot (zero); + edge_distances[1] = (p_on_plane-v[0]).dot (one); edge_distances[2] = (p_on_plane-v[1]).dot (two); min_edge_distance_on_plane = std::min ( { edge_distances[0], edge_distances[1], edge_distances[2] } ); @@ -417,7 +419,8 @@ namespace MR // First: is it aligned with the normal? const Vertex poly_centre ((v[0] + v[1] + v[2] + v[3]) * 0.25); const Vertex diff (p - poly_centre); - is_inside = (diff.dot (n) <= 0.0); + distance_from_plane = diff.dot (n); + is_inside = (distance_from_plane <= 0.0); // Second: how well does it project onto this polygon? const Vertex p_on_plane (p - (n * (diff.dot (n)))); @@ -447,9 +450,16 @@ namespace MR } - if (min_edge_distance_on_plane > best_min_edge_distance_on_plane) { - best_min_edge_distance_on_plane = min_edge_distance_on_plane; - best_result_inside = is_inside; + if (min_edge_distance_on_plane > 0.0) { + if (std::abs (distance_from_plane) < std::abs (best_min_distance_from_interior_projection)) { + best_min_distance_from_interior_projection = distance_from_plane; + best_result_inside = is_inside; + } + } else if (!std::isfinite (best_min_distance_from_interior_projection)) { + if (min_edge_distance_on_plane > best_min_edge_distance_on_plane) { + best_min_edge_distance_on_plane = min_edge_distance_on_plane; + best_result_inside = is_inside; + } } } From 4959fe4b90431232fc7cc49059752c4d1f882f79 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 4 Jul 2018 21:21:52 +1000 Subject: [PATCH 0158/1471] mesh2image: Better progress information Split progress information into two stages: The voxel-based segmentation of the surface, and the partial volume estimation for edge voxels. --- src/surface/algo/mesh2image.cpp | 483 ++++++++++++++++---------------- 1 file changed, 245 insertions(+), 238 deletions(-) diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 69235cfaa0..c7ed77b321 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -44,270 +44,274 @@ namespace MR // For initial segmentation of mesh - identify voxels on the mesh, inside & outside enum vox_mesh_t { UNDEFINED, ON_MESH, PRELIM_OUTSIDE, PRELIM_INSIDE, FILL_TEMP, OUTSIDE, INSIDE }; - ProgressBar progress ("converting mesh to partial volume image", 8); - // For speed, want the vertex data to be in voxel positions - Filter::VertexTransform transform (image); - transform.set_real2voxel(); Mesh mesh; - transform (mesh_realspace, mesh); - - // These are needed now for interior filling section of algorithm - if (!mesh.have_normals()) - mesh.calculate_normals(); - - static const Vox adj_voxels[6] = { { -1, 0, 0 }, - { +1, 0, 0 }, - { 0, -1, 0 }, - { 0, +1, 0 }, - { 0, 0, -1 }, - { 0, 0, +1 } }; - - // Compute normals for polygons vector polygon_normals; - polygon_normals.reserve (mesh.num_polygons()); - for (TriangleList::const_iterator p = mesh.get_triangles().begin(); p != mesh.get_triangles().end(); ++p) - polygon_normals.push_back (normal (mesh, *p)); - for (QuadList::const_iterator p = mesh.get_quads().begin(); p != mesh.get_quads().end(); ++p) - polygon_normals.push_back (normal (mesh, *p)); - ++progress; - - // Create some memory to work with: - // Stores a flag for each voxel as encoded in enum vox_mesh_t - Header H (image); - auto init_seg = Image::scratch (H); - for (auto l = Loop(init_seg) (init_seg); l; ++l) - init_seg.value() = vox_mesh_t::UNDEFINED; - - // For every voxel, stores those polygons that may intersect the voxel + + // For every edge voxel, stores those polygons that may intersect the voxel using Vox2Poly = std::map< Vox, vector >; Vox2Poly voxel2poly; - // Map each polygon to the underlying voxels - for (size_t poly_index = 0; poly_index != mesh.num_polygons(); ++poly_index) { - - const size_t num_vertices = (poly_index < mesh.num_triangles()) ? 3 : 4; + { + ProgressBar progress ("Performing voxel-based segmentation of surface", 8); + + Filter::VertexTransform transform (image); + transform.set_real2voxel(); + transform (mesh_realspace, mesh); + ++progress; + + // These are needed now for interior filling section of algorithm + if (!mesh.have_normals()) + mesh.calculate_normals(); + + static const Vox adj_voxels[6] = { { -1, 0, 0 }, + { +1, 0, 0 }, + { 0, -1, 0 }, + { 0, +1, 0 }, + { 0, 0, -1 }, + { 0, 0, +1 } }; + + // Compute normals for polygons + polygon_normals.reserve (mesh.num_polygons()); + for (TriangleList::const_iterator p = mesh.get_triangles().begin(); p != mesh.get_triangles().end(); ++p) + polygon_normals.push_back (normal (mesh, *p)); + for (QuadList::const_iterator p = mesh.get_quads().begin(); p != mesh.get_quads().end(); ++p) + polygon_normals.push_back (normal (mesh, *p)); + ++progress; + + // Create some memory to work with: + // Stores a flag for each voxel as encoded in enum vox_mesh_t + Header H (image); + auto init_seg = Image::scratch (H); + for (auto l = Loop(init_seg) (init_seg); l; ++l) + init_seg.value() = vox_mesh_t::UNDEFINED; + + // Map each polygon to the underlying voxels + for (size_t poly_index = 0; poly_index != mesh.num_polygons(); ++poly_index) { + + const size_t num_vertices = (poly_index < mesh.num_triangles()) ? 3 : 4; + + // Figure out the voxel extent of this polygon in three dimensions + Vox lower_bound (H.size(0)-1, H.size(1)-1, H.size(2)-1), upper_bound (0, 0, 0); + VertexList this_poly_verts; + if (num_vertices == 3) + mesh.load_triangle_vertices (this_poly_verts, poly_index); + else + mesh.load_quad_vertices (this_poly_verts, poly_index - mesh.num_triangles()); + for (VertexList::const_iterator v = this_poly_verts.begin(); v != this_poly_verts.end(); ++v) { + for (size_t axis = 0; axis != 3; ++axis) { + const int this_axis_voxel = std::round((*v)[axis]); + lower_bound[axis] = std::min (lower_bound[axis], this_axis_voxel); + upper_bound[axis] = std::max (upper_bound[axis], this_axis_voxel); + } + } - // Figure out the voxel extent of this polygon in three dimensions - Vox lower_bound (H.size(0)-1, H.size(1)-1, H.size(2)-1), upper_bound (0, 0, 0); - VertexList this_poly_verts; - if (num_vertices == 3) - mesh.load_triangle_vertices (this_poly_verts, poly_index); - else - mesh.load_quad_vertices (this_poly_verts, poly_index - mesh.num_triangles()); - for (VertexList::const_iterator v = this_poly_verts.begin(); v != this_poly_verts.end(); ++v) { + // Constrain to lie within the dimensions of the image for (size_t axis = 0; axis != 3; ++axis) { - const int this_axis_voxel = std::round((*v)[axis]); - lower_bound[axis] = std::min (lower_bound[axis], this_axis_voxel); - upper_bound[axis] = std::max (upper_bound[axis], this_axis_voxel); + lower_bound[axis] = std::max(0, lower_bound[axis]); + upper_bound[axis] = std::min(int(H.size(axis)-1), upper_bound[axis]); } - } - // Constrain to lie within the dimensions of the image - for (size_t axis = 0; axis != 3; ++axis) { - lower_bound[axis] = std::max(0, lower_bound[axis]); - upper_bound[axis] = std::min(int(H.size(axis)-1), upper_bound[axis]); - } - - // For all voxels within this rectangular region, assign this polygon to the map - // Use the Separating Axis Theorem to be more stringent as to which voxels this - // polygon will be processed in - auto overlap = [&] (const Vox& vox, const size_t poly_index) -> bool { + // For all voxels within this rectangular region, assign this polygon to the map + // Use the Separating Axis Theorem to be more stringent as to which voxels this + // polygon will be processed in + auto overlap = [&] (const Vox& vox, const size_t poly_index) -> bool { + + VertexList vertices; + if (num_vertices == 3) + mesh.load_triangle_vertices (vertices, poly_index); + else + mesh.load_quad_vertices (vertices, poly_index - mesh.num_triangles()); + + // Test whether or not the two objects can be separated via projection onto an axis + auto separating_axis = [&] (const Eigen::Vector3& axis) -> bool { + default_type voxel_low = std::numeric_limits::infinity(); + default_type voxel_high = -std::numeric_limits::infinity(); + default_type poly_low = std::numeric_limits::infinity(); + default_type poly_high = -std::numeric_limits::infinity(); + + static const Eigen::Vector3 voxel_offsets[8] = { { -0.5, -0.5, -0.5 }, + { -0.5, -0.5, 0.5 }, + { -0.5, 0.5, -0.5 }, + { -0.5, 0.5, 0.5 }, + { 0.5, -0.5, -0.5 }, + { 0.5, -0.5, 0.5 }, + { 0.5, 0.5, -0.5 }, + { 0.5, 0.5, 0.5 } }; + + for (size_t i = 0; i != 8; ++i) { + const Eigen::Vector3 v (vox.matrix().cast() + voxel_offsets[i]); + const default_type projection = axis.dot (v); + voxel_low = std::min (voxel_low, projection); + voxel_high = std::max (voxel_high, projection); + } - VertexList vertices; - if (num_vertices == 3) - mesh.load_triangle_vertices (vertices, poly_index); - else - mesh.load_quad_vertices (vertices, poly_index - mesh.num_triangles()); - - // Test whether or not the two objects can be separated via projection onto an axis - auto separating_axis = [&] (const Eigen::Vector3& axis) -> bool { - default_type voxel_low = std::numeric_limits::infinity(); - default_type voxel_high = -std::numeric_limits::infinity(); - default_type poly_low = std::numeric_limits::infinity(); - default_type poly_high = -std::numeric_limits::infinity(); - - static const Eigen::Vector3 voxel_offsets[8] = { { -0.5, -0.5, -0.5 }, - { -0.5, -0.5, 0.5 }, - { -0.5, 0.5, -0.5 }, - { -0.5, 0.5, 0.5 }, - { 0.5, -0.5, -0.5 }, - { 0.5, -0.5, 0.5 }, - { 0.5, 0.5, -0.5 }, - { 0.5, 0.5, 0.5 } }; - - for (size_t i = 0; i != 8; ++i) { - const Eigen::Vector3 v (vox.matrix().cast() + voxel_offsets[i]); - const default_type projection = axis.dot (v); - voxel_low = std::min (voxel_low, projection); - voxel_high = std::max (voxel_high, projection); - } + for (const auto& v : vertices) { + const default_type projection = axis.dot (v); + poly_low = std::min (poly_low, projection); + poly_high = std::max (poly_high, projection); + } - for (const auto& v : vertices) { - const default_type projection = axis.dot (v); - poly_low = std::min (poly_low, projection); - poly_high = std::max (poly_high, projection); + // Is this a separating axis? + return (poly_low > voxel_high || voxel_low > poly_high); + }; + + // The following axes need to be tested as potential separating axes: + // x, y, z + // All cross-products between voxel and polygon edges + // Polygon normal + for (size_t i = 0; i != 3; ++i) { + Eigen::Vector3 axis (0.0, 0.0, 0.0); + axis[i] = 1.0; + if (separating_axis (axis)) + return false; + for (size_t j = 0; j != num_vertices; ++j) { + if (separating_axis (axis.cross (vertices[j+1] - vertices[j]))) + return false; + } + if (separating_axis (axis.cross (vertices[num_vertices-1] - vertices[0]))) + return false; } + if (separating_axis (polygon_normals[poly_index])) + return false; - // Is this a separating axis? - return (poly_low > voxel_high || voxel_low > poly_high); + // No axis has been found that separates the two objects + // Therefore, the two objects overlap + return true; }; - // The following axes need to be tested as potential separating axes: - // x, y, z - // All cross-products between voxel and polygon edges - // Polygon normal - for (size_t i = 0; i != 3; ++i) { - Eigen::Vector3 axis (0.0, 0.0, 0.0); - axis[i] = 1.0; - if (separating_axis (axis)) - return false; - for (size_t j = 0; j != num_vertices; ++j) { - if (separating_axis (axis.cross (vertices[j+1] - vertices[j]))) - return false; - } - if (separating_axis (axis.cross (vertices[num_vertices-1] - vertices[0]))) - return false; - } - if (separating_axis (polygon_normals[poly_index])) - return false; - - // No axis has been found that separates the two objects - // Therefore, the two objects overlap - return true; - }; - - Vox voxel; - for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { - for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { - for (voxel[0] = lower_bound[0]; voxel[0] <= upper_bound[0]; ++voxel[0]) { - // Rather than adding this polygon to the list of polygons to test for - // every single voxel within this 3D bounding box, only test it within - // those voxels that the polygon actually intersects - if (overlap (voxel, poly_index)) { - vector this_voxel_polys; - // Has this voxel already been intersected by at least one polygon? - // If it has, we need to concatenate this polygon to the list - // (which involves deleting the existing entry then re-writing the concatenated list); - // If it has not, we're adding a new entry to the list of voxels to be tested, - // with only one entry in the list for that voxel - Vox2Poly::const_iterator existing = voxel2poly.find (voxel); - if (existing != voxel2poly.end()) { - this_voxel_polys = existing->second; - voxel2poly.erase (existing); - } else { - // Only call this once each voxel, regardless of the number of intersecting polygons - assign_pos_of (voxel).to (init_seg); - init_seg.value() = vox_mesh_t::ON_MESH; - } - this_voxel_polys.push_back (poly_index); - voxel2poly.insert (std::make_pair (voxel, this_voxel_polys)); - } } } } + Vox voxel; + for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { + for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { + for (voxel[0] = lower_bound[0]; voxel[0] <= upper_bound[0]; ++voxel[0]) { + // Rather than adding this polygon to the list of polygons to test for + // every single voxel within this 3D bounding box, only test it within + // those voxels that the polygon actually intersects + if (overlap (voxel, poly_index)) { + vector this_voxel_polys; + // Has this voxel already been intersected by at least one polygon? + // If it has, we need to concatenate this polygon to the list + // (which involves deleting the existing entry then re-writing the concatenated list); + // If it has not, we're adding a new entry to the list of voxels to be tested, + // with only one entry in the list for that voxel + Vox2Poly::const_iterator existing = voxel2poly.find (voxel); + if (existing != voxel2poly.end()) { + this_voxel_polys = existing->second; + voxel2poly.erase (existing); + } else { + // Only call this once each voxel, regardless of the number of intersecting polygons + assign_pos_of (voxel).to (init_seg); + init_seg.value() = vox_mesh_t::ON_MESH; + } + this_voxel_polys.push_back (poly_index); + voxel2poly.insert (std::make_pair (voxel, this_voxel_polys)); + } } } } - } - ++progress; - - - // For *any* voxel not on the mesh but neighbouring a voxel in which a vertex lies, - // track a floating-point value corresponding to its distance from the plane defined - // by the normal at the vertex. - // Each voxel not directly on the mesh should then be assigned as prelim_inside or prelim_outside - // depending on whether the summed value is positive or negative - auto sum_distances = Image::scratch (H, "Sum of distances from polygon planes"); - Vox adj_voxel; - for (size_t i = 0; i != mesh.num_vertices(); ++i) { - const Vox centre_voxel (mesh.vert(i)); - for (adj_voxel[2] = centre_voxel[2]-1; adj_voxel[2] <= centre_voxel[2]+1; ++adj_voxel[2]) { - for (adj_voxel[1] = centre_voxel[1]-1; adj_voxel[1] <= centre_voxel[1]+1; ++adj_voxel[1]) { - for (adj_voxel[0] = centre_voxel[0]-1; adj_voxel[0] <= centre_voxel[0]+1; ++adj_voxel[0]) { - if (!is_out_of_bounds (H, adj_voxel) && (adj_voxel - centre_voxel).any()) { - const Eigen::Vector3 offset (adj_voxel.cast().matrix() - mesh.vert(i)); - const default_type dp_normal = offset.dot (mesh.norm(i)); - const default_type offset_on_plane = (offset - (mesh.norm(i) * dp_normal)).norm(); - assign_pos_of (adj_voxel).to (sum_distances); - // If offset_on_plane is close to zero, this vertex should contribute strongly toward - // the sum of distances from the surface within this voxel - sum_distances.value() += (1.0 / (1.0 + offset_on_plane)) * dp_normal; + } + ++progress; + + + // For *any* voxel not on the mesh but neighbouring a voxel in which a vertex lies, + // track a floating-point value corresponding to its distance from the plane defined + // by the normal at the vertex. + // Each voxel not directly on the mesh should then be assigned as prelim_inside or prelim_outside + // depending on whether the summed value is positive or negative + auto sum_distances = Image::scratch (H, "Sum of distances from polygon planes"); + Vox adj_voxel; + for (size_t i = 0; i != mesh.num_vertices(); ++i) { + const Vox centre_voxel (mesh.vert(i)); + for (adj_voxel[2] = centre_voxel[2]-1; adj_voxel[2] <= centre_voxel[2]+1; ++adj_voxel[2]) { + for (adj_voxel[1] = centre_voxel[1]-1; adj_voxel[1] <= centre_voxel[1]+1; ++adj_voxel[1]) { + for (adj_voxel[0] = centre_voxel[0]-1; adj_voxel[0] <= centre_voxel[0]+1; ++adj_voxel[0]) { + if (!is_out_of_bounds (H, adj_voxel) && (adj_voxel - centre_voxel).any()) { + const Eigen::Vector3 offset (adj_voxel.cast().matrix() - mesh.vert(i)); + const default_type dp_normal = offset.dot (mesh.norm(i)); + const default_type offset_on_plane = (offset - (mesh.norm(i) * dp_normal)).norm(); + assign_pos_of (adj_voxel).to (sum_distances); + // If offset_on_plane is close to zero, this vertex should contribute strongly toward + // the sum of distances from the surface within this voxel + sum_distances.value() += (1.0 / (1.0 + offset_on_plane)) * dp_normal; + } } } } } - } - ++progress; - for (auto l = Loop(init_seg) (init_seg, sum_distances); l; ++l) { - if (static_cast (sum_distances.value()) != 0.0f && init_seg.value() != vox_mesh_t::ON_MESH) - init_seg.value() = sum_distances.value() < 0.0 ? vox_mesh_t::PRELIM_INSIDE : vox_mesh_t::PRELIM_OUTSIDE; - } - ++progress; - - - // Can't guarantee that mesh might have a single isolated polygon pointing the wrong way - // Therefore, need to: - // - Select voxels both inside and outside the mesh to expand - // - When expanding each region, count the number of pre-assigned voxels both inside and outside - // - For the final region selection, assign values to voxels based on a majority vote - Image seed (init_seg); - vector to_fill; - std::stack to_expand; - for (auto l = Loop(seed) (seed); l; ++l) { - if (seed.value() == vox_mesh_t::PRELIM_INSIDE || seed.value() == vox_mesh_t::PRELIM_OUTSIDE) { - size_t prelim_inside_count = 0, prelim_outside_count = 0; - if (seed.value() == vox_mesh_t::PRELIM_INSIDE) - prelim_inside_count = 1; - else - prelim_outside_count = 1; - to_expand.push (Vox (seed.index(0), seed.index(1), seed.index(2))); - to_fill.assign (1, to_expand.top()); - do { - const Vox voxel (to_expand.top()); - to_expand.pop(); - for (size_t adj_vox_idx = 0; adj_vox_idx != 6; ++adj_vox_idx) { - const Vox adj_voxel (voxel + adj_voxels[adj_vox_idx]); - assign_pos_of (adj_voxel).to (init_seg); - if (!is_out_of_bounds (init_seg)) { - const uint8_t adj_value = init_seg.value(); - if (adj_value == vox_mesh_t::UNDEFINED || adj_value == vox_mesh_t::PRELIM_INSIDE || adj_value == vox_mesh_t::PRELIM_OUTSIDE) { - if (adj_value == vox_mesh_t::PRELIM_INSIDE) - ++prelim_inside_count; - else if (adj_value == vox_mesh_t::PRELIM_OUTSIDE) - ++prelim_outside_count; - to_expand.push (adj_voxel); - to_fill.push_back (adj_voxel); - init_seg.value() = vox_mesh_t::FILL_TEMP; + ++progress; + for (auto l = Loop(init_seg) (init_seg, sum_distances); l; ++l) { + if (static_cast (sum_distances.value()) != 0.0f && init_seg.value() != vox_mesh_t::ON_MESH) + init_seg.value() = sum_distances.value() < 0.0 ? vox_mesh_t::PRELIM_INSIDE : vox_mesh_t::PRELIM_OUTSIDE; + } + ++progress; + + + // Can't guarantee that mesh might have a single isolated polygon pointing the wrong way + // Therefore, need to: + // - Select voxels both inside and outside the mesh to expand + // - When expanding each region, count the number of pre-assigned voxels both inside and outside + // - For the final region selection, assign values to voxels based on a majority vote + Image seed (init_seg); + vector to_fill; + std::stack to_expand; + for (auto l = Loop(seed) (seed); l; ++l) { + if (seed.value() == vox_mesh_t::PRELIM_INSIDE || seed.value() == vox_mesh_t::PRELIM_OUTSIDE) { + size_t prelim_inside_count = 0, prelim_outside_count = 0; + if (seed.value() == vox_mesh_t::PRELIM_INSIDE) + prelim_inside_count = 1; + else + prelim_outside_count = 1; + to_expand.push (Vox (seed.index(0), seed.index(1), seed.index(2))); + to_fill.assign (1, to_expand.top()); + do { + const Vox voxel (to_expand.top()); + to_expand.pop(); + for (size_t adj_vox_idx = 0; adj_vox_idx != 6; ++adj_vox_idx) { + const Vox adj_voxel (voxel + adj_voxels[adj_vox_idx]); + assign_pos_of (adj_voxel).to (init_seg); + if (!is_out_of_bounds (init_seg)) { + const uint8_t adj_value = init_seg.value(); + if (adj_value == vox_mesh_t::UNDEFINED || adj_value == vox_mesh_t::PRELIM_INSIDE || adj_value == vox_mesh_t::PRELIM_OUTSIDE) { + if (adj_value == vox_mesh_t::PRELIM_INSIDE) + ++prelim_inside_count; + else if (adj_value == vox_mesh_t::PRELIM_OUTSIDE) + ++prelim_outside_count; + to_expand.push (adj_voxel); + to_fill.push_back (adj_voxel); + init_seg.value() = vox_mesh_t::FILL_TEMP; + } } } + } while (to_expand.size()); + if (prelim_inside_count == prelim_outside_count) + throw Exception ("Mapping mesh to image failed: Unable to label connected voxel region as inside or outside mesh"); + const vox_mesh_t fill_value = (prelim_inside_count > prelim_outside_count ? vox_mesh_t::INSIDE : vox_mesh_t::OUTSIDE); + for (auto voxel : to_fill) { + assign_pos_of (voxel).to (init_seg); + init_seg.value() = fill_value; } - } while (to_expand.size()); - if (prelim_inside_count == prelim_outside_count) - throw Exception ("Mapping mesh to image failed: Unable to label connected voxel region as inside or outside mesh"); - const vox_mesh_t fill_value = (prelim_inside_count > prelim_outside_count ? vox_mesh_t::INSIDE : vox_mesh_t::OUTSIDE); - for (auto voxel : to_fill) { - assign_pos_of (voxel).to (init_seg); - init_seg.value() = fill_value; + to_fill.clear(); } - to_fill.clear(); } - } - ++progress; + ++progress; - // Any voxel not yet processed must lie outside the structure(s) - for (auto l = Loop(init_seg) (init_seg); l; ++l) { - if (init_seg.value() == vox_mesh_t::UNDEFINED) - init_seg.value() = vox_mesh_t::OUTSIDE; - } - ++progress; - - // Write initial ternary segmentation - for (auto l = Loop (init_seg) (init_seg, image); l; ++l) { - switch (init_seg.value()) { - case vox_mesh_t (UNDEFINED): throw Exception ("Code error: poor filling of initial mesh estimate"); break; - case vox_mesh_t (ON_MESH): image.value() = 0.5; break; - case vox_mesh_t (OUTSIDE): image.value() = 0.0; break; - case vox_mesh_t (INSIDE): image.value() = 1.0; break; - default: assert (0); + // Any voxel not yet processed must lie outside the structure(s) + for (auto l = Loop(init_seg) (init_seg); l; ++l) { + if (init_seg.value() == vox_mesh_t::UNDEFINED) + init_seg.value() = vox_mesh_t::OUTSIDE; } + ++progress; + + // Write initial ternary segmentation + for (auto l = Loop (init_seg) (init_seg, image); l; ++l) { + switch (init_seg.value()) { + case vox_mesh_t (UNDEFINED): throw Exception ("Code error: poor filling of initial mesh estimate"); break; + case vox_mesh_t (ON_MESH): image.value() = 0.5; break; + case vox_mesh_t (OUTSIDE): image.value() = 0.0; break; + case vox_mesh_t (INSIDE): image.value() = 1.0; break; + default: assert (0); + } + } + } - ++progress; // Construct class functors necessary to calculate, for each voxel intersected by the // surface, the partial volume fraction @@ -484,25 +488,28 @@ namespace MR class Sink { MEMALIGN(Sink) public: - Sink (Image& image) : - image (image) { } + Sink (Image& image, const size_t voxel_count) : + image (image), + progress ("Calculating partial volume fractions of edge voxels", voxel_count) { } bool operator() (const std::pair& in) { assign_pos_of (in.first).to (image); assert (!is_out_of_bounds (image)); image.value() = in.second; + ++progress; return true; } private: Image image; + ProgressBar progress; }; Source source (voxel2poly); Pipe pipe (mesh, polygon_normals); - Sink sink (image); + Sink sink (image, voxel2poly.size()); Thread::run_queue (source, std::pair>(), From 4a9083fbc4f14fb8b87cac9de0042f884df9d6b6 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 4 Jul 2018 22:10:04 +1000 Subject: [PATCH 0159/1471] 5ttgen hsvs: Combine CC segments before smoothing --- lib/mrtrix3/_5ttgen/hsvs.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/lib/mrtrix3/_5ttgen/hsvs.py b/lib/mrtrix3/_5ttgen/hsvs.py index 0448b591e2..41269f0d80 100644 --- a/lib/mrtrix3/_5ttgen/hsvs.py +++ b/lib/mrtrix3/_5ttgen/hsvs.py @@ -202,16 +202,16 @@ def checkDir(dirpath): (63, 1, 'Right-choroid-plexus'), (72, 3, '5th-Ventricle'), (192, 2, 'Corpus_Callosum'), - (250, 2, 'Fornix'), - # TODO Would rather combine CC segments into a single mask before converting to mesh - (251, 2, 'CC_Posterior'), - (252, 2, 'CC_Mid_Posterior'), - (253, 2, 'CC_Central'), - (254, 2, 'CC_Mid_Anterior'), - (255, 2, 'CC_Anterior') ] - # TODO Need to do something about anterior commissure + (250, 2, 'Fornix') ] + corpus_callosum = [ (251, 'CC_Posterior'), + (252, 'CC_Mid_Posterior'), + (253, 'CC_Central'), + (254, 'CC_Mid_Anterior'), + (255, 'CC_Anterior') ] + + # TODO Need to do something about anterior commissure # Get the main cerebrum segments; these are already smooth # FIXME There may be some minor mismatch between the WM and pial segments within the medial section @@ -231,7 +231,7 @@ def checkDir(dirpath): progress.done() # Get other structures that need to be converted from the voxel image - progress = app.progressBar('Smoothing non-cortical structures segmented by FreeSurfer', len(structures)) + progress = app.progressBar('Smoothing non-cortical structures segmented by FreeSurfer', len(structures) + 1) for (index, tissue, name) in structures: # Don't segment anything for which we have instead obtained estimates using FIRST # Also don't segment the hippocampi from the aparc+aseg image if we're using the hippocampal subfields module @@ -252,6 +252,17 @@ def checkDir(dirpath): run.command('mesh2voxel ' + smoothed_mesh_path + ' ' + template_image + ' ' + name + '.mif') file.delTemporary(smoothed_mesh_path) progress.increment() + + # Combine corpus callosum segments before smoothing + for (index, name) in corpus_callosum: + run.command('mrcalc ' + aparc_image + ' ' + str(index) + ' -eq ' + name + '.mif -datatype bit') + cc_init_mesh_path = 'combined_corpus_callosum_init.vtk' + cc_smoothed_mesh_path = 'combined_corpus_callosum.vtk' + run.command('mrmath ' + ' '.join([ name + '.mif' for (index, name) in corpus_callosum ]) + ' sum - | mrmesh - -threshold 0.5 ' + cc_init_mesh_path) + run.command('meshfilter ' + cc_init_mesh_path + ' smooth ' + cc_smoothed_mesh_path) + file.delTemporary(cc_init_mesh_path) + run.command('mesh2voxel ' + cc_smoothed_mesh_path + ' ' + template_image + ' combined_corpus_callosum.mif') + file.delTemporary(cc_smoothed_mesh_path) progress.done() # Construct images with the partial volume of each tissue @@ -262,7 +273,7 @@ def checkDir(dirpath): if tissue == 0: image_list.extend([ 'lh.pial.mif', 'rh.pial.mif' ]) elif tissue == 2: - image_list.extend([ 'lh.white.mif', 'rh.white.mif' ]) + image_list.extend([ 'lh.white.mif', 'rh.white.mif', 'combined_corpus_callosum.mif' ]) run.command('mrmath ' + ' '.join(image_list) + ' sum - | mrcalc - 1.0 -min tissue' + str(tissue) + '_init.mif') # TODO Update file.delTemporary() to support list input for entry in image_list: From b4cb9b12a6f7d2dda5692511e2d3c7cc6abf1726 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 4 Jul 2018 22:12:10 +1000 Subject: [PATCH 0160/1471] Surface::Filter::Smooth: Skip tiny surfaces Attempting to smooth a very small surface (e.g. 1 voxel from voxel2mesh) seems to sometimes result in erroneous behaviour in a subsequent mesh2voxel call, where the entire image is labelled as being inside the surface. This is a quick fix to prevent this behaviour where it's been observed, but if a similar result is observed elsewhere then more comprehensive digging will need to be performed. --- src/surface/filter/smooth.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/surface/filter/smooth.cpp b/src/surface/filter/smooth.cpp index 4588d605dc..2f923ebe98 100644 --- a/src/surface/filter/smooth.cpp +++ b/src/surface/filter/smooth.cpp @@ -42,6 +42,11 @@ namespace MR const size_t T = in.num_triangles(); if (V == 3*T) throw Exception ("Cannot perform smoothing on this mesh: no triangulation information"); + if (V <= 8) { + WARN ("No mesh smoothing applied; structure is too small"); + out = in; + return; + } // Pre-compute polygon centroids and areas VertexList centroids; From 80f0f76b4a118a3313a0ef2d3d5c83a2255b769e Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 16 Mar 2018 16:47:34 +1100 Subject: [PATCH 0161/1471] mesh2voxel: Ensure output image is floating-point --- cmd/mesh2voxel.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/mesh2voxel.cpp b/cmd/mesh2voxel.cpp index 273057c7ac..fc60475ecf 100644 --- a/cmd/mesh2voxel.cpp +++ b/cmd/mesh2voxel.cpp @@ -38,7 +38,7 @@ void usage () SYNOPSIS = "Convert a mesh surface to a partial volume estimation image"; - REFERENCES + REFERENCES + "Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. " // Internal "Anatomically-constrained tractography: Improved diffusion MRI streamlines tractography through effective use of anatomical information. " "NeuroImage, 2012, 62, 1924-1938"; @@ -62,6 +62,11 @@ void run () Header template_header = Header::open (argument[1]); check_3D_nonunity (template_header); + // Ensure that a floating-point representation is used for the output image, + // as is required for representing partial volumes + template_header.datatype() = DataType::Float32; + template_header.datatype().set_byte_order_native(); + // Create the output image template_header.datatype() = DataType::Float32; template_header.datatype().set_byte_order_native(); From 0a5c6aa831ee4780c6b4d1c96c1323024548f820 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 16 Mar 2018 17:02:36 +1100 Subject: [PATCH 0162/1471] mrtrix3.fsl: Fix exeName() function --- lib/mrtrix3/fsl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/mrtrix3/fsl.py b/lib/mrtrix3/fsl.py index 43f2e1d9d9..2399ff7d2e 100644 --- a/lib/mrtrix3/fsl.py +++ b/lib/mrtrix3/fsl.py @@ -71,14 +71,14 @@ def eddyBinary(cuda): #pylint: disable=unused-variable # makes it more convenient to locate these commands. # Note that if FSL 4 and 5 are installed side-by-side, the approach taken in this # function will select the version 5 executable. -def exeName(name): #pylint: disable=unused-variable +def exeName(name, required=True): #pylint: disable=unused-variable from mrtrix3 import app from distutils.spawn import find_executable if find_executable('fsl5.0-' + name): output = 'fsl5.0-' + name elif find_executable(name): output = name - else: + elif required: app.error('Could not find FSL program \"' + name + '\"; please verify FSL install') app.debug(output) return output From 7995c423165aaad6c28b9a4865d8cee5f21a1507 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sat, 14 Apr 2018 15:14:05 +1000 Subject: [PATCH 0163/1471] Fix compilation errors in src/surface/algos/ --- src/surface/algo/image2mesh.h | 4 ++-- src/surface/algo/mesh2image.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/surface/algo/image2mesh.h b/src/surface/algo/image2mesh.h index 78468cee07..047181a3d5 100644 --- a/src/surface/algo/image2mesh.h +++ b/src/surface/algo/image2mesh.h @@ -74,7 +74,7 @@ namespace MR // refers to the lower corner of the voxel; that way searches for existing // vertices can be done using a simple map - Vox pos; + Vox pos (0, 0, 0); for (auto loop = Loop(voxel) (voxel); loop; ++loop) { if (voxel.value()) { @@ -444,7 +444,7 @@ namespace MR ImageType voxel (input_image); float in_vertex_values[8]; std::map< Vox, std::map > input_vertex_pair_to_output_vertex_index_map; - Vox lower_corner; + Vox lower_corner (-1, -1, -1); for (lower_corner[2] = -1; lower_corner[2] != voxel.size(2); ++lower_corner[2]) { for (lower_corner[1] = -1; lower_corner[1] != voxel.size(1); ++lower_corner[1]) { for (lower_corner[0] = -1; lower_corner[0] != voxel.size(0); ++lower_corner[0]) { diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 59866b4aea..1f5211833b 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -175,7 +175,7 @@ namespace MR return true; }; - Vox voxel; + Vox voxel (lower_bound[0], lower_bound[1], lower_bound[2]); for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { for (voxel[0] = lower_bound[0]; voxel[0] <= upper_bound[0]; ++voxel[0]) { From 3d31fb0e85b348beea665cfd3435b28d7fae6016 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Sat, 14 Apr 2018 17:05:56 +1000 Subject: [PATCH 0164/1471] Further fix of compilation errors in src/surface/algos Follows incomplete solution in 48995c36. --- src/surface/algo/image2mesh.h | 4 ++-- src/surface/algo/mesh2image.cpp | 2 +- src/surface/types.h | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/surface/algo/image2mesh.h b/src/surface/algo/image2mesh.h index 047181a3d5..78468cee07 100644 --- a/src/surface/algo/image2mesh.h +++ b/src/surface/algo/image2mesh.h @@ -74,7 +74,7 @@ namespace MR // refers to the lower corner of the voxel; that way searches for existing // vertices can be done using a simple map - Vox pos (0, 0, 0); + Vox pos; for (auto loop = Loop(voxel) (voxel); loop; ++loop) { if (voxel.value()) { @@ -444,7 +444,7 @@ namespace MR ImageType voxel (input_image); float in_vertex_values[8]; std::map< Vox, std::map > input_vertex_pair_to_output_vertex_index_map; - Vox lower_corner (-1, -1, -1); + Vox lower_corner; for (lower_corner[2] = -1; lower_corner[2] != voxel.size(2); ++lower_corner[2]) { for (lower_corner[1] = -1; lower_corner[1] != voxel.size(1); ++lower_corner[1]) { for (lower_corner[0] = -1; lower_corner[0] != voxel.size(0); ++lower_corner[0]) { diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 1f5211833b..59866b4aea 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -175,7 +175,7 @@ namespace MR return true; }; - Vox voxel (lower_bound[0], lower_bound[1], lower_bound[2]); + Vox voxel; for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { for (voxel[0] = lower_bound[0]; voxel[0] <= upper_bound[0]; ++voxel[0]) { diff --git a/src/surface/types.h b/src/surface/types.h index 39e1df3d7c..2c29edfaaf 100644 --- a/src/surface/types.h +++ b/src/surface/types.h @@ -39,6 +39,7 @@ namespace MR { MEMALIGN (Vox) public: using Eigen::Array3i::Array3i; + Vox () : Eigen::Array3i (-1, -1, -1) { } Vox (const Eigen::Vector3& p) : Eigen::Array3i (int(std::round (p[0])), int(std::round (p[1])), int(std::round (p[2]))) { } bool operator< (const Vox& i) const { From 7f515c75142caec4bad3f8d711e3e3bf63c9ea40 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Tue, 22 May 2018 22:48:07 +1000 Subject: [PATCH 0165/1471] mesh2voxel: Improve filling step Use an improved algorithm for determining and classifying voxels that are near the mesh but are not intersected by it. Conflicts: src/surface/algo/mesh2image.cpp --- src/surface/algo/mesh2image.cpp | 82 +++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 35 deletions(-) diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 59866b4aea..54d6a7d8cc 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -44,7 +44,7 @@ namespace MR // For initial segmentation of mesh - identify voxels on the mesh, inside & outside enum vox_mesh_t { UNDEFINED, ON_MESH, PRELIM_OUTSIDE, PRELIM_INSIDE, FILL_TEMP, OUTSIDE, INSIDE }; - ProgressBar progress ("converting mesh to partial volume image", 7); + ProgressBar progress ("converting mesh to partial volume image", 8); // For speed, want the vertex data to be in voxel positions Filter::VertexTransform transform (image); @@ -76,6 +76,8 @@ namespace MR // Stores a flag for each voxel as encoded in enum vox_mesh_t Header H (image); auto init_seg = Image::scratch (H); + for (auto l = Loop(init_seg) (init_seg); l; ++l) + init_seg.value() = vox_mesh_t::UNDEFINED; // For every voxel, stores those polygons that may intersect the voxel using Vox2Poly = std::map< Vox, vector >; @@ -196,7 +198,7 @@ namespace MR } else { // Only call this once each voxel, regardless of the number of intersecting polygons assign_pos_of (voxel).to (init_seg); - init_seg.value() = ON_MESH; + init_seg.value() = vox_mesh_t::ON_MESH; } this_voxel_polys.push_back (poly_index); voxel2poly.insert (std::make_pair (voxel, this_voxel_polys)); @@ -206,38 +208,39 @@ namespace MR ++progress; - // New implementation of filling in the centre of the mesh - // Rather than selecting the eight external corners and filling in outside the - // mesh (which may omit some areas), selecting anything remaining as 'inside', - // fill inwards from vertices according to their normals, and select anything - // remaining as 'outside'. - std::stack to_expand; + + // For *any* voxel not on the mesh but neighbouring a voxel in which a vertex lies, + // track a floating-point value corresponding to its distance from the plane defined + // by the normal at the vertex. + // Each voxel not directly on the mesh should then be assigned as prelim_inside or prelim_outside + // depending on whether the summed value is positive or negative + auto sum_distances = Image::scratch (H, "Sum of distances from polygon planes"); + Vox adj_voxel; for (size_t i = 0; i != mesh.num_vertices(); ++i) { - const Vox voxel (mesh.vert (i)); - Eigen::Vector3 normal (mesh.norm (i)); - // Scale the normal such that the maximum length along any individual axis is 1.0 (but may be negative) - normal /= normal.array().abs().maxCoeff(); - // Use this to select an adjacent voxel outside the structure (based on the - const Vox outside_neighbour (voxel + Vox(normal)); - // Add this to the set of exterior voxels to be expanded if appropriate - assign_pos_of (outside_neighbour).to (init_seg); - if (!is_out_of_bounds (init_seg)) { - if (init_seg.value() == vox_mesh_t::UNDEFINED) { - init_seg.value() = vox_mesh_t::PRELIM_OUTSIDE; - //to_expand.push (outside_neighbour); - } - } - // Now do the same for inside the structure - const Vox inside_neighbour (voxel - Vox(normal)); - assign_pos_of (inside_neighbour).to (init_seg); - if (!is_out_of_bounds (init_seg)) { - if (init_seg.value() == vox_mesh_t::UNDEFINED) { - init_seg.value() = vox_mesh_t::PRELIM_INSIDE; - //to_expand.push (inside_neighbour); + const Vox centre_voxel (mesh.vert(i)); + for (adj_voxel[2] = centre_voxel[2]-1; adj_voxel[2] <= centre_voxel[2]+1; ++adj_voxel[2]) { + for (adj_voxel[1] = centre_voxel[1]-1; adj_voxel[1] <= centre_voxel[1]+1; ++adj_voxel[1]) { + for (adj_voxel[0] = centre_voxel[0]-1; adj_voxel[0] <= centre_voxel[0]+1; ++adj_voxel[0]) { + if (!is_out_of_bounds (H, adj_voxel) && (adj_voxel - centre_voxel).any()) { + const Eigen::Vector3 offset (adj_voxel.cast().matrix() - mesh.vert(i)); + const default_type dp_normal = offset.dot (mesh.norm(i)); + const default_type offset_on_plane = (offset - (mesh.norm(i) * dp_normal)).norm(); + assign_pos_of (adj_voxel).to (sum_distances); + // If offset_on_plane is close to zero, this vertex should contribute strongly toward + // the sum of distances from the surface within this voxel + sum_distances.value() += (1.0 / (1.0 + offset_on_plane)) * dp_normal; + } + } } } } ++progress; + for (auto l = Loop(init_seg) (init_seg, sum_distances); l; ++l) { + if (static_cast (sum_distances.value()) != 0.0f && init_seg.value() != vox_mesh_t::ON_MESH) + init_seg.value() = sum_distances.value() < 0.0 ? vox_mesh_t::PRELIM_INSIDE : vox_mesh_t::PRELIM_OUTSIDE; + } + ++progress; + // Can't guarantee that mesh might have a single isolated polygon pointing the wrong way // Therefore, need to: @@ -246,6 +249,7 @@ namespace MR // - For the final region selection, assign values to voxels based on a majority vote Image seed (init_seg); vector to_fill; + std::stack to_expand; for (auto l = Loop(seed) (seed); l; ++l) { if (seed.value() == vox_mesh_t::PRELIM_INSIDE || seed.value() == vox_mesh_t::PRELIM_OUTSIDE) { size_t prelim_inside_count = 0, prelim_outside_count = 0; @@ -362,7 +366,8 @@ namespace MR Vertex p (*i_p); p += Eigen::Vector3 (voxel[0], voxel[1], voxel[2]); - default_type best_min_edge_distance = -std::numeric_limits::infinity(); + default_type best_min_edge_distance_on_plane = -std::numeric_limits::infinity(); + //default_type best_interior_distance_from_plane = std::numeric_limits::infinity(); bool best_result_inside = false; // Only test against those polygons that are near this voxel @@ -373,7 +378,14 @@ namespace MR VertexList v; bool is_inside = false; - default_type min_edge_distance = std::numeric_limits::infinity(); + default_type min_edge_distance_on_plane = std::numeric_limits::infinity(); + + // FIXME + // If point does not lie within projection of polygon, compute the + // distance of the point projected onto the plane to the nearest edge of that polygon; + // use this distance to decide which polygon classifies the point + // If point does lie within projection of polygon (potentially more than one), then the + // polygon to which the distance from the plane is minimal classifies the point if (polygon_num_vertices == 3) { @@ -394,7 +406,7 @@ namespace MR edge_distances[0] = (p_on_plane-v[0]).dot (zero); edge_distances[1] = (p_on_plane-v[2]).dot (one); edge_distances[2] = (p_on_plane-v[1]).dot (two); - min_edge_distance = std::min (edge_distances[0], std::min (edge_distances[1], edge_distances[2])); + min_edge_distance_on_plane = std::min ( { edge_distances[0], edge_distances[1], edge_distances[2] } ); } else { @@ -430,14 +442,14 @@ namespace MR // Now, how far away is the point within the plane from this edge? const default_type this_edge_distance = (p_on_plane - p1).dot (edge_normal); - min_edge_distance = std::min (min_edge_distance, this_edge_distance); + min_edge_distance_on_plane = std::min (min_edge_distance_on_plane, this_edge_distance); } } - if (min_edge_distance > best_min_edge_distance) { - best_min_edge_distance = min_edge_distance; + if (min_edge_distance_on_plane > best_min_edge_distance_on_plane) { + best_min_edge_distance_on_plane = min_edge_distance_on_plane; best_result_inside = is_inside; } From c1abda0b4fadf7f58485e58cd676703e17ba2d95 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 4 Jul 2018 21:10:43 +1000 Subject: [PATCH 0166/1471] mesh2voxel: Tweak to partial volume estimation In some cases, a surface may intersect a single voxel at two locations that are a long distance from one another on the surface. When this happens, the directions of the normals of those polygons may be almost directly opposing. In this case, one does not want to choose whether or not a particular point is inside or outside the surface based on the polygon to which the point most accurately projects to the inside of the polygon; instead, if the point projects to the inside of more than one polygon, the polygon to which the point is geometrically closest should be used to determine whether the point is inside or outside the surface. --- src/surface/algo/mesh2image.cpp | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index 54d6a7d8cc..d001821870 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -367,8 +367,8 @@ namespace MR p += Eigen::Vector3 (voxel[0], voxel[1], voxel[2]); default_type best_min_edge_distance_on_plane = -std::numeric_limits::infinity(); - //default_type best_interior_distance_from_plane = std::numeric_limits::infinity(); bool best_result_inside = false; + default_type best_min_distance_from_interior_projection = std::numeric_limits::infinity(); // Only test against those polygons that are near this voxel for (vector::const_iterator polygon_index = in.second.begin(); polygon_index != in.second.end(); ++polygon_index) { @@ -379,6 +379,7 @@ namespace MR bool is_inside = false; default_type min_edge_distance_on_plane = std::numeric_limits::infinity(); + default_type distance_from_plane = 0.0; // FIXME // If point does not lie within projection of polygon, compute the @@ -394,17 +395,18 @@ namespace MR // First: is it aligned with the normal? const Vertex poly_centre ((v[0] + v[1] + v[2]) * (1.0/3.0)); const Vertex diff (p - poly_centre); - is_inside = (diff.dot (n) <= 0.0); + distance_from_plane = diff.dot (n); + is_inside = (distance_from_plane <= 0.0); // Second: how well does it project onto this polygon? const Vertex p_on_plane (p - (n * (diff.dot (n)))); std::array edge_distances; - Vertex zero = (v[2]-v[0]).cross (n); zero.normalize(); - Vertex one = (v[1]-v[2]).cross (n); one .normalize(); + Vertex zero = (v[1]-v[2]).cross (n); zero.normalize(); + Vertex one = (v[2]-v[0]).cross (n); one .normalize(); Vertex two = (v[0]-v[1]).cross (n); two .normalize(); - edge_distances[0] = (p_on_plane-v[0]).dot (zero); - edge_distances[1] = (p_on_plane-v[2]).dot (one); + edge_distances[0] = (p_on_plane-v[2]).dot (zero); + edge_distances[1] = (p_on_plane-v[0]).dot (one); edge_distances[2] = (p_on_plane-v[1]).dot (two); min_edge_distance_on_plane = std::min ( { edge_distances[0], edge_distances[1], edge_distances[2] } ); @@ -418,7 +420,8 @@ namespace MR // First: is it aligned with the normal? const Vertex poly_centre ((v[0] + v[1] + v[2] + v[3]) * 0.25); const Vertex diff (p - poly_centre); - is_inside = (diff.dot (n) <= 0.0); + distance_from_plane = diff.dot (n); + is_inside = (distance_from_plane <= 0.0); // Second: how well does it project onto this polygon? const Vertex p_on_plane (p - (n * (diff.dot (n)))); @@ -448,9 +451,16 @@ namespace MR } - if (min_edge_distance_on_plane > best_min_edge_distance_on_plane) { - best_min_edge_distance_on_plane = min_edge_distance_on_plane; - best_result_inside = is_inside; + if (min_edge_distance_on_plane > 0.0) { + if (std::abs (distance_from_plane) < std::abs (best_min_distance_from_interior_projection)) { + best_min_distance_from_interior_projection = distance_from_plane; + best_result_inside = is_inside; + } + } else if (!std::isfinite (best_min_distance_from_interior_projection)) { + if (min_edge_distance_on_plane > best_min_edge_distance_on_plane) { + best_min_edge_distance_on_plane = min_edge_distance_on_plane; + best_result_inside = is_inside; + } } } From 0a7a202a0e55e925e7969a624fc70174caef7de7 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 4 Jul 2018 21:21:52 +1000 Subject: [PATCH 0167/1471] mesh2image: Better progress information Split progress information into two stages: The voxel-based segmentation of the surface, and the partial volume estimation for edge voxels. Conflicts: src/surface/algo/mesh2image.cpp --- src/surface/algo/mesh2image.cpp | 485 ++++++++++++++++---------------- 1 file changed, 246 insertions(+), 239 deletions(-) diff --git a/src/surface/algo/mesh2image.cpp b/src/surface/algo/mesh2image.cpp index d001821870..19c3cd7089 100644 --- a/src/surface/algo/mesh2image.cpp +++ b/src/surface/algo/mesh2image.cpp @@ -44,271 +44,275 @@ namespace MR // For initial segmentation of mesh - identify voxels on the mesh, inside & outside enum vox_mesh_t { UNDEFINED, ON_MESH, PRELIM_OUTSIDE, PRELIM_INSIDE, FILL_TEMP, OUTSIDE, INSIDE }; - ProgressBar progress ("converting mesh to partial volume image", 8); - // For speed, want the vertex data to be in voxel positions - Filter::VertexTransform transform (image); - transform.set_real2voxel(); Mesh mesh; - transform (mesh_realspace, mesh); - - // These are needed now for interior filling section of algorithm - if (!mesh.have_normals()) - mesh.calculate_normals(); - - static const Vox adj_voxels[6] = { { -1, 0, 0 }, - { +1, 0, 0 }, - { 0, -1, 0 }, - { 0, +1, 0 }, - { 0, 0, -1 }, - { 0, 0, +1 } }; - - // Compute normals for polygons vector polygon_normals; - polygon_normals.reserve (mesh.num_polygons()); - for (TriangleList::const_iterator p = mesh.get_triangles().begin(); p != mesh.get_triangles().end(); ++p) - polygon_normals.push_back (normal (mesh, *p)); - for (QuadList::const_iterator p = mesh.get_quads().begin(); p != mesh.get_quads().end(); ++p) - polygon_normals.push_back (normal (mesh, *p)); - ++progress; - - // Create some memory to work with: - // Stores a flag for each voxel as encoded in enum vox_mesh_t - Header H (image); - auto init_seg = Image::scratch (H); - for (auto l = Loop(init_seg) (init_seg); l; ++l) - init_seg.value() = vox_mesh_t::UNDEFINED; - - // For every voxel, stores those polygons that may intersect the voxel + + // For every edge voxel, stores those polygons that may intersect the voxel using Vox2Poly = std::map< Vox, vector >; Vox2Poly voxel2poly; - // Map each polygon to the underlying voxels - for (size_t poly_index = 0; poly_index != mesh.num_polygons(); ++poly_index) { - - const size_t num_vertices = (poly_index < mesh.num_triangles()) ? 3 : 4; + { + ProgressBar progress ("Performing voxel-based segmentation of surface", 8); + + Filter::VertexTransform transform (image); + transform.set_real2voxel(); + transform (mesh_realspace, mesh); + ++progress; + + // These are needed now for interior filling section of algorithm + if (!mesh.have_normals()) + mesh.calculate_normals(); + + static const Vox adj_voxels[6] = { { -1, 0, 0 }, + { +1, 0, 0 }, + { 0, -1, 0 }, + { 0, +1, 0 }, + { 0, 0, -1 }, + { 0, 0, +1 } }; + + // Compute normals for polygons + polygon_normals.reserve (mesh.num_polygons()); + for (TriangleList::const_iterator p = mesh.get_triangles().begin(); p != mesh.get_triangles().end(); ++p) + polygon_normals.push_back (normal (mesh, *p)); + for (QuadList::const_iterator p = mesh.get_quads().begin(); p != mesh.get_quads().end(); ++p) + polygon_normals.push_back (normal (mesh, *p)); + ++progress; + + // Create some memory to work with: + // Stores a flag for each voxel as encoded in enum vox_mesh_t + Header H (image); + auto init_seg = Image::scratch (H); + for (auto l = Loop(init_seg) (init_seg); l; ++l) + init_seg.value() = vox_mesh_t::UNDEFINED; + + // Map each polygon to the underlying voxels + for (size_t poly_index = 0; poly_index != mesh.num_polygons(); ++poly_index) { + + const size_t num_vertices = (poly_index < mesh.num_triangles()) ? 3 : 4; + + // Figure out the voxel extent of this polygon in three dimensions + Vox lower_bound (H.size(0)-1, H.size(1)-1, H.size(2)-1), upper_bound (0, 0, 0); + VertexList this_poly_verts; + if (num_vertices == 3) + mesh.load_triangle_vertices (this_poly_verts, poly_index); + else + mesh.load_quad_vertices (this_poly_verts, poly_index - mesh.num_triangles()); + for (VertexList::const_iterator v = this_poly_verts.begin(); v != this_poly_verts.end(); ++v) { + for (size_t axis = 0; axis != 3; ++axis) { + const int this_axis_voxel = std::round((*v)[axis]); + lower_bound[axis] = std::min (lower_bound[axis], this_axis_voxel); + upper_bound[axis] = std::max (upper_bound[axis], this_axis_voxel); + } + } - // Figure out the voxel extent of this polygon in three dimensions - Vox lower_bound (H.size(0)-1, H.size(1)-1, H.size(2)-1), upper_bound (0, 0, 0); - VertexList this_poly_verts; - if (num_vertices == 3) - mesh.load_triangle_vertices (this_poly_verts, poly_index); - else - mesh.load_quad_vertices (this_poly_verts, poly_index - mesh.num_triangles()); - for (VertexList::const_iterator v = this_poly_verts.begin(); v != this_poly_verts.end(); ++v) { + // Constrain to lie within the dimensions of the image for (size_t axis = 0; axis != 3; ++axis) { - const int this_axis_voxel = std::round((*v)[axis]); - lower_bound[axis] = std::min (lower_bound[axis], this_axis_voxel); - upper_bound[axis] = std::max (upper_bound[axis], this_axis_voxel); + lower_bound[axis] = std::max(0, lower_bound[axis]); + upper_bound[axis] = std::min(int(H.size(axis)-1), upper_bound[axis]); } - } - // Constrain to lie within the dimensions of the image - for (size_t axis = 0; axis != 3; ++axis) { - lower_bound[axis] = std::max(0, lower_bound[axis]); - upper_bound[axis] = std::min(int(H.size(axis)-1), upper_bound[axis]); - } - - // For all voxels within this rectangular region, assign this polygon to the map - // Use the Separating Axis Theorem to be more stringent as to which voxels this - // polygon will be processed in - auto overlap = [&] (const Vox& vox, const size_t poly_index) -> bool { + // For all voxels within this rectangular region, assign this polygon to the map + // Use the Separating Axis Theorem to be more stringent as to which voxels this + // polygon will be processed in + auto overlap = [&] (const Vox& vox, const size_t poly_index) -> bool { + + VertexList vertices; + if (num_vertices == 3) + mesh.load_triangle_vertices (vertices, poly_index); + else + mesh.load_quad_vertices (vertices, poly_index - mesh.num_triangles()); + + // Test whether or not the two objects can be separated via projection onto an axis + auto separating_axis = [&] (const Eigen::Vector3& axis) -> bool { + default_type voxel_low = std::numeric_limits::infinity(); + default_type voxel_high = -std::numeric_limits::infinity(); + default_type poly_low = std::numeric_limits::infinity(); + default_type poly_high = -std::numeric_limits::infinity(); + + static const Eigen::Vector3 voxel_offsets[8] = { { -0.5, -0.5, -0.5 }, + { -0.5, -0.5, 0.5 }, + { -0.5, 0.5, -0.5 }, + { -0.5, 0.5, 0.5 }, + { 0.5, -0.5, -0.5 }, + { 0.5, -0.5, 0.5 }, + { 0.5, 0.5, -0.5 }, + { 0.5, 0.5, 0.5 } }; + + for (size_t i = 0; i != 8; ++i) { + const Eigen::Vector3 v (vox.matrix().cast() + voxel_offsets[i]); + const default_type projection = axis.dot (v); + voxel_low = std::min (voxel_low, projection); + voxel_high = std::max (voxel_high, projection); + } - VertexList vertices; - if (num_vertices == 3) - mesh.load_triangle_vertices (vertices, poly_index); - else - mesh.load_quad_vertices (vertices, poly_index - mesh.num_triangles()); - - // Test whether or not the two objects can be separated via projection onto an axis - auto separating_axis = [&] (const Eigen::Vector3& axis) -> bool { - default_type voxel_low = std::numeric_limits::infinity(); - default_type voxel_high = -std::numeric_limits::infinity(); - default_type poly_low = std::numeric_limits::infinity(); - default_type poly_high = -std::numeric_limits::infinity(); - - static const Eigen::Vector3 voxel_offsets[8] = { { -0.5, -0.5, -0.5 }, - { -0.5, -0.5, 0.5 }, - { -0.5, 0.5, -0.5 }, - { -0.5, 0.5, 0.5 }, - { 0.5, -0.5, -0.5 }, - { 0.5, -0.5, 0.5 }, - { 0.5, 0.5, -0.5 }, - { 0.5, 0.5, 0.5 } }; - - for (size_t i = 0; i != 8; ++i) { - const Eigen::Vector3 v (vox.matrix().cast() + voxel_offsets[i]); - const default_type projection = axis.dot (v); - voxel_low = std::min (voxel_low, projection); - voxel_high = std::max (voxel_high, projection); - } + for (const auto& v : vertices) { + const default_type projection = axis.dot (v); + poly_low = std::min (poly_low, projection); + poly_high = std::max (poly_high, projection); + } - for (const auto& v : vertices) { - const default_type projection = axis.dot (v); - poly_low = std::min (poly_low, projection); - poly_high = std::max (poly_high, projection); + // Is this a separating axis? + return (poly_low > voxel_high || voxel_low > poly_high); + }; + + // The following axes need to be tested as potential separating axes: + // x, y, z + // All cross-products between voxel and polygon edges + // Polygon normal + for (size_t i = 0; i != 3; ++i) { + Eigen::Vector3 axis (0.0, 0.0, 0.0); + axis[i] = 1.0; + if (separating_axis (axis)) + return false; + for (size_t j = 0; j != num_vertices; ++j) { + if (separating_axis (axis.cross (vertices[j+1] - vertices[j]))) + return false; + } + if (separating_axis (axis.cross (vertices[num_vertices-1] - vertices[0]))) + return false; } + if (separating_axis (polygon_normals[poly_index])) + return false; - // Is this a separating axis? - return (poly_low > voxel_high || voxel_low > poly_high); + // No axis has been found that separates the two objects + // Therefore, the two objects overlap + return true; }; - // The following axes need to be tested as potential separating axes: - // x, y, z - // All cross-products between voxel and polygon edges - // Polygon normal - for (size_t i = 0; i != 3; ++i) { - Eigen::Vector3 axis (0.0, 0.0, 0.0); - axis[i] = 1.0; - if (separating_axis (axis)) - return false; - for (size_t j = 0; j != num_vertices; ++j) { - if (separating_axis (axis.cross (vertices[j+1] - vertices[j]))) - return false; - } - if (separating_axis (axis.cross (vertices[num_vertices-1] - vertices[0]))) - return false; - } - if (separating_axis (polygon_normals[poly_index])) - return false; - - // No axis has been found that separates the two objects - // Therefore, the two objects overlap - return true; - }; - - Vox voxel; - for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { - for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { - for (voxel[0] = lower_bound[0]; voxel[0] <= upper_bound[0]; ++voxel[0]) { - // Rather than adding this polygon to the list of polygons to test for - // every single voxel within this 3D bounding box, only test it within - // those voxels that the polygon actually intersects - if (overlap (voxel, poly_index)) { - vector this_voxel_polys; - // Has this voxel already been intersected by at least one polygon? - // If it has, we need to concatenate this polygon to the list - // (which involves deleting the existing entry then re-writing the concatenated list); - // If it has not, we're adding a new entry to the list of voxels to be tested, - // with only one entry in the list for that voxel - Vox2Poly::const_iterator existing = voxel2poly.find (voxel); - if (existing != voxel2poly.end()) { - this_voxel_polys = existing->second; - voxel2poly.erase (existing); - } else { - // Only call this once each voxel, regardless of the number of intersecting polygons - assign_pos_of (voxel).to (init_seg); - init_seg.value() = vox_mesh_t::ON_MESH; - } - this_voxel_polys.push_back (poly_index); - voxel2poly.insert (std::make_pair (voxel, this_voxel_polys)); - } } } } + Vox voxel; + for (voxel[2] = lower_bound[2]; voxel[2] <= upper_bound[2]; ++voxel[2]) { + for (voxel[1] = lower_bound[1]; voxel[1] <= upper_bound[1]; ++voxel[1]) { + for (voxel[0] = lower_bound[0]; voxel[0] <= upper_bound[0]; ++voxel[0]) { + // Rather than adding this polygon to the list of polygons to test for + // every single voxel within this 3D bounding box, only test it within + // those voxels that the polygon actually intersects + if (overlap (voxel, poly_index)) { + vector this_voxel_polys; + // Has this voxel already been intersected by at least one polygon? + // If it has, we need to concatenate this polygon to the list + // (which involves deleting the existing entry then re-writing the concatenated list); + // If it has not, we're adding a new entry to the list of voxels to be tested, + // with only one entry in the list for that voxel + Vox2Poly::const_iterator existing = voxel2poly.find (voxel); + if (existing != voxel2poly.end()) { + this_voxel_polys = existing->second; + voxel2poly.erase (existing); + } else { + // Only call this once each voxel, regardless of the number of intersecting polygons + assign_pos_of (voxel).to (init_seg); + init_seg.value() = vox_mesh_t::ON_MESH; + } + this_voxel_polys.push_back (poly_index); + voxel2poly.insert (std::make_pair (voxel, this_voxel_polys)); + } } } } - } - ++progress; - - - - // For *any* voxel not on the mesh but neighbouring a voxel in which a vertex lies, - // track a floating-point value corresponding to its distance from the plane defined - // by the normal at the vertex. - // Each voxel not directly on the mesh should then be assigned as prelim_inside or prelim_outside - // depending on whether the summed value is positive or negative - auto sum_distances = Image::scratch (H, "Sum of distances from polygon planes"); - Vox adj_voxel; - for (size_t i = 0; i != mesh.num_vertices(); ++i) { - const Vox centre_voxel (mesh.vert(i)); - for (adj_voxel[2] = centre_voxel[2]-1; adj_voxel[2] <= centre_voxel[2]+1; ++adj_voxel[2]) { - for (adj_voxel[1] = centre_voxel[1]-1; adj_voxel[1] <= centre_voxel[1]+1; ++adj_voxel[1]) { - for (adj_voxel[0] = centre_voxel[0]-1; adj_voxel[0] <= centre_voxel[0]+1; ++adj_voxel[0]) { - if (!is_out_of_bounds (H, adj_voxel) && (adj_voxel - centre_voxel).any()) { - const Eigen::Vector3 offset (adj_voxel.cast().matrix() - mesh.vert(i)); - const default_type dp_normal = offset.dot (mesh.norm(i)); - const default_type offset_on_plane = (offset - (mesh.norm(i) * dp_normal)).norm(); - assign_pos_of (adj_voxel).to (sum_distances); - // If offset_on_plane is close to zero, this vertex should contribute strongly toward - // the sum of distances from the surface within this voxel - sum_distances.value() += (1.0 / (1.0 + offset_on_plane)) * dp_normal; + + } + ++progress; + + + // For *any* voxel not on the mesh but neighbouring a voxel in which a vertex lies, + // track a floating-point value corresponding to its distance from the plane defined + // by the normal at the vertex. + // Each voxel not directly on the mesh should then be assigned as prelim_inside or prelim_outside + // depending on whether the summed value is positive or negative + auto sum_distances = Image::scratch (H, "Sum of distances from polygon planes"); + Vox adj_voxel; + for (size_t i = 0; i != mesh.num_vertices(); ++i) { + const Vox centre_voxel (mesh.vert(i)); + for (adj_voxel[2] = centre_voxel[2]-1; adj_voxel[2] <= centre_voxel[2]+1; ++adj_voxel[2]) { + for (adj_voxel[1] = centre_voxel[1]-1; adj_voxel[1] <= centre_voxel[1]+1; ++adj_voxel[1]) { + for (adj_voxel[0] = centre_voxel[0]-1; adj_voxel[0] <= centre_voxel[0]+1; ++adj_voxel[0]) { + if (!is_out_of_bounds (H, adj_voxel) && (adj_voxel - centre_voxel).any()) { + const Eigen::Vector3 offset (adj_voxel.cast().matrix() - mesh.vert(i)); + const default_type dp_normal = offset.dot (mesh.norm(i)); + const default_type offset_on_plane = (offset - (mesh.norm(i) * dp_normal)).norm(); + assign_pos_of (adj_voxel).to (sum_distances); + // If offset_on_plane is close to zero, this vertex should contribute strongly toward + // the sum of distances from the surface within this voxel + sum_distances.value() += (1.0 / (1.0 + offset_on_plane)) * dp_normal; + } } } } } - } - ++progress; - for (auto l = Loop(init_seg) (init_seg, sum_distances); l; ++l) { - if (static_cast (sum_distances.value()) != 0.0f && init_seg.value() != vox_mesh_t::ON_MESH) - init_seg.value() = sum_distances.value() < 0.0 ? vox_mesh_t::PRELIM_INSIDE : vox_mesh_t::PRELIM_OUTSIDE; - } - ++progress; - - - // Can't guarantee that mesh might have a single isolated polygon pointing the wrong way - // Therefore, need to: - // - Select voxels both inside and outside the mesh to expand - // - When expanding each region, count the number of pre-assigned voxels both inside and outside - // - For the final region selection, assign values to voxels based on a majority vote - Image seed (init_seg); - vector to_fill; - std::stack to_expand; - for (auto l = Loop(seed) (seed); l; ++l) { - if (seed.value() == vox_mesh_t::PRELIM_INSIDE || seed.value() == vox_mesh_t::PRELIM_OUTSIDE) { - size_t prelim_inside_count = 0, prelim_outside_count = 0; - if (seed.value() == vox_mesh_t::PRELIM_INSIDE) - prelim_inside_count = 1; - else - prelim_outside_count = 1; - to_expand.push (Vox (seed.index(0), seed.index(1), seed.index(2))); - to_fill.assign (1, to_expand.top()); - do { - const Vox voxel (to_expand.top()); - to_expand.pop(); - for (size_t adj_vox_idx = 0; adj_vox_idx != 6; ++adj_vox_idx) { - const Vox adj_voxel (voxel + adj_voxels[adj_vox_idx]); - assign_pos_of (adj_voxel).to (init_seg); - if (!is_out_of_bounds (init_seg)) { - const uint8_t adj_value = init_seg.value(); - if (adj_value == vox_mesh_t::UNDEFINED || adj_value == vox_mesh_t::PRELIM_INSIDE || adj_value == vox_mesh_t::PRELIM_OUTSIDE) { - if (adj_value == vox_mesh_t::PRELIM_INSIDE) - ++prelim_inside_count; - else if (adj_value == vox_mesh_t::PRELIM_OUTSIDE) - ++prelim_outside_count; - to_expand.push (adj_voxel); - to_fill.push_back (adj_voxel); - init_seg.value() = vox_mesh_t::FILL_TEMP; + ++progress; + for (auto l = Loop(init_seg) (init_seg, sum_distances); l; ++l) { + if (static_cast (sum_distances.value()) != 0.0f && init_seg.value() != vox_mesh_t::ON_MESH) + init_seg.value() = sum_distances.value() < 0.0 ? vox_mesh_t::PRELIM_INSIDE : vox_mesh_t::PRELIM_OUTSIDE; + } + ++progress; + + + // Can't guarantee that mesh might have a single isolated polygon pointing the wrong way + // Therefore, need to: + // - Select voxels both inside and outside the mesh to expand + // - When expanding each region, count the number of pre-assigned voxels both inside and outside + // - For the final region selection, assign values to voxels based on a majority vote + Image seed (init_seg); + vector to_fill; + std::stack to_expand; + for (auto l = Loop(seed) (seed); l; ++l) { + if (seed.value() == vox_mesh_t::PRELIM_INSIDE || seed.value() == vox_mesh_t::PRELIM_OUTSIDE) { + size_t prelim_inside_count = 0, prelim_outside_count = 0; + if (seed.value() == vox_mesh_t::PRELIM_INSIDE) + prelim_inside_count = 1; + else + prelim_outside_count = 1; + to_expand.push (Vox (seed.index(0), seed.index(1), seed.index(2))); + to_fill.assign (1, to_expand.top()); + do { + const Vox voxel (to_expand.top()); + to_expand.pop(); + for (size_t adj_vox_idx = 0; adj_vox_idx != 6; ++adj_vox_idx) { + const Vox adj_voxel (voxel + adj_voxels[adj_vox_idx]); + assign_pos_of (adj_voxel).to (init_seg); + if (!is_out_of_bounds (init_seg)) { + const uint8_t adj_value = init_seg.value(); + if (adj_value == vox_mesh_t::UNDEFINED || adj_value == vox_mesh_t::PRELIM_INSIDE || adj_value == vox_mesh_t::PRELIM_OUTSIDE) { + if (adj_value == vox_mesh_t::PRELIM_INSIDE) + ++prelim_inside_count; + else if (adj_value == vox_mesh_t::PRELIM_OUTSIDE) + ++prelim_outside_count; + to_expand.push (adj_voxel); + to_fill.push_back (adj_voxel); + init_seg.value() = vox_mesh_t::FILL_TEMP; + } } } + } while (to_expand.size()); + if (prelim_inside_count == prelim_outside_count) + throw Exception ("Mapping mesh to image failed: Unable to label connected voxel region as inside or outside mesh"); + const vox_mesh_t fill_value = (prelim_inside_count > prelim_outside_count ? vox_mesh_t::INSIDE : vox_mesh_t::OUTSIDE); + for (auto voxel : to_fill) { + assign_pos_of (voxel).to (init_seg); + init_seg.value() = fill_value; } - } while (to_expand.size()); - if (prelim_inside_count == prelim_outside_count) - throw Exception ("Mapping mesh to image failed: Unable to label connected voxel region as inside or outside mesh"); - const vox_mesh_t fill_value = (prelim_inside_count > prelim_outside_count ? vox_mesh_t::INSIDE : vox_mesh_t::OUTSIDE); - for (auto voxel : to_fill) { - assign_pos_of (voxel).to (init_seg); - init_seg.value() = fill_value; + to_fill.clear(); } - to_fill.clear(); } - } - ++progress; + ++progress; - // Any voxel not yet processed must lie outside the structure(s) - for (auto l = Loop(init_seg) (init_seg); l; ++l) { - if (init_seg.value() == vox_mesh_t::UNDEFINED) - init_seg.value() = vox_mesh_t::OUTSIDE; - } - ++progress; - - // Write initial ternary segmentation - for (auto l = Loop (init_seg) (init_seg, image); l; ++l) { - switch (init_seg.value()) { - case vox_mesh_t (UNDEFINED): throw Exception ("Code error: poor filling of initial mesh estimate"); break; - case vox_mesh_t (ON_MESH): image.value() = 0.5; break; - case vox_mesh_t (OUTSIDE): image.value() = 0.0; break; - case vox_mesh_t (INSIDE): image.value() = 1.0; break; - default: assert (0); + // Any voxel not yet processed must lie outside the structure(s) + for (auto l = Loop(init_seg) (init_seg); l; ++l) { + if (init_seg.value() == vox_mesh_t::UNDEFINED) + init_seg.value() = vox_mesh_t::OUTSIDE; } + ++progress; + + // Write initial ternary segmentation + for (auto l = Loop (init_seg) (init_seg, image); l; ++l) { + switch (init_seg.value()) { + case vox_mesh_t (UNDEFINED): throw Exception ("Code error: poor filling of initial mesh estimate"); break; + case vox_mesh_t (ON_MESH): image.value() = 0.5; break; + case vox_mesh_t (OUTSIDE): image.value() = 0.0; break; + case vox_mesh_t (INSIDE): image.value() = 1.0; break; + default: assert (0); + } + } + } - ++progress; // Construct class functors necessary to calculate, for each voxel intersected by the // surface, the partial volume fraction @@ -485,25 +489,28 @@ namespace MR class Sink { MEMALIGN(Sink) public: - Sink (Image& image) : - image (image) { } + Sink (Image& image, const size_t voxel_count) : + image (image), + progress ("Calculating partial volume fractions of edge voxels", voxel_count) { } bool operator() (const std::pair& in) { assign_pos_of (in.first).to (image); assert (!is_out_of_bounds (image)); image.value() = in.second; + ++progress; return true; } private: Image image; + ProgressBar progress; }; Source source (voxel2poly); Pipe pipe (mesh, polygon_normals); - Sink sink (image); + Sink sink (image, voxel2poly.size()); Thread::run_queue (source, std::pair>(), From 7b2b2152e8cd9b0eb7ea1b6076da2722b9010488 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 4 Jul 2018 22:12:10 +1000 Subject: [PATCH 0168/1471] Surface::Filter::Smooth: Skip tiny surfaces Attempting to smooth a very small surface (e.g. 1 voxel from voxel2mesh) seems to sometimes result in erroneous behaviour in a subsequent mesh2voxel call, where the entire image is labelled as being inside the surface. This is a quick fix to prevent this behaviour where it's been observed, but if a similar result is observed elsewhere then more comprehensive digging will need to be performed. --- src/surface/filter/smooth.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/surface/filter/smooth.cpp b/src/surface/filter/smooth.cpp index 4588d605dc..2f923ebe98 100644 --- a/src/surface/filter/smooth.cpp +++ b/src/surface/filter/smooth.cpp @@ -42,6 +42,11 @@ namespace MR const size_t T = in.num_triangles(); if (V == 3*T) throw Exception ("Cannot perform smoothing on this mesh: no triangulation information"); + if (V <= 8) { + WARN ("No mesh smoothing applied; structure is too small"); + out = in; + return; + } // Pre-compute polygon centroids and areas VertexList centroids; From 52dc9126fdf5b7900dd549098970c08f38aac2ac Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 4 Jul 2018 23:39:18 +1000 Subject: [PATCH 0169/1471] Docs: More detail on modules More explicit instructions for the case where the user is provided with, or downloads, a single .cpp file, and therefore needs to construct the relevant module structure and place the .cpp file in the correct location to compile. --- docs/tips_and_tricks/external_modules.rst | 33 ++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/docs/tips_and_tricks/external_modules.rst b/docs/tips_and_tricks/external_modules.rst index ab6e859a91..36bd2c8199 100644 --- a/docs/tips_and_tricks/external_modules.rst +++ b/docs/tips_and_tricks/external_modules.rst @@ -38,6 +38,37 @@ directory to your ``PATH`` environment variable. +Single-``cpp``-file commands +---------------------------- + +In many instances, you may be provided with a single ``.cpp`` file that contains +all of the code necessary to compile a particular command that makes use of the +*MRtrix3* libraries: a developer may choose to distribute *just* the relevant +``.cpp`` file for a particular functionality, rather than enclosing it within the +requisite directory structure required for an external *MRtrix3* module. + +In such a circumstance, the steps to compile the command are as follows: + +1. Create a new directory on your file system for this 'module'; for this example, + let's suppose this is created at ``~/src/mrtrix/mymodule/``. + +2. Create a sub-directory called ``cmd/`` within this directory (so the complete + path to this new sub-directory in this instance would be: ``~/src/mrtrix/mymodule/cmd/``. + +3. Place the ``.cpp`` file provided to you by the developer into the ``cmd/`` + sub-directory. + +4. Within the root directory of this 'module', create a soft-link to the ``build`` + script that is stored within the root directory of your core *MRtrix3* + installation, as described above. + +5. Execute the ``build`` script from inside this module directory. + +The ``build`` script should *automatically* generate a sub-directory ``bin/`` +within your module directory, containing the executable file for the command +provided to you. + + Note for Windows users ---------------------- @@ -66,4 +97,4 @@ and ``msys64`` should then be able to interpret the softlink path correctly I have also found recently that the build script will not correctly detect use of a softlink for compiling an external project when run under Python2, so -Python3 must be used explicitly. \ No newline at end of file +Python3 must be used explicitly. From 64dc8e83299c05c1cc93f31a794e6a80cc6c6106 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Wed, 11 Apr 2018 18:05:14 +1000 Subject: [PATCH 0170/1471] mrtrix3.file.delTemporary: Compatibility with list input --- lib/mrtrix3/file.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/lib/mrtrix3/file.py b/lib/mrtrix3/file.py index c780546527..e786bdf9b6 100644 --- a/lib/mrtrix3/file.py +++ b/lib/mrtrix3/file.py @@ -11,6 +11,24 @@ def delTemporary(path): #pylint: disable=unused-variable from mrtrix3 import app if not app.cleanup: return + if isinstance(path, list): + if len(path) == 1: + delTemporary(path[0]) + return + if app.verbosity > 2: + app.console('Deleting ' + str(len(path)) + ' temporary items: ' + str(path)) + for entry in path: + if os.path.isfile(entry): + func = os.remove + elif os.path.isdir(entry): + func = shutil.rmtree + else: + continue + try: + func(entry) + except OSError: + pass + return if os.path.isfile(path): temporary_type = 'file' func = os.remove From a8e4ec0d12420d0fde0dd7dff2d12303c639204f Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 00:32:18 +1000 Subject: [PATCH 0171/1471] mesh2voxel: Fix code duplication Likely a residual error from large amounts of branch divergence resolution. --- cmd/mesh2voxel.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/mesh2voxel.cpp b/cmd/mesh2voxel.cpp index fc60475ecf..ec2a727312 100644 --- a/cmd/mesh2voxel.cpp +++ b/cmd/mesh2voxel.cpp @@ -68,8 +68,6 @@ void run () template_header.datatype().set_byte_order_native(); // Create the output image - template_header.datatype() = DataType::Float32; - template_header.datatype().set_byte_order_native(); Image output = Image::create (argument[2], template_header); // Perform the partial volume estimation From 384b7b8878af86281aea572b1a2290736c4c24d7 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 11:18:58 +1000 Subject: [PATCH 0172/1471] dwipreproc: Use file.delTemporary() list input capability --- bin/dwipreproc | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/bin/dwipreproc b/bin/dwipreproc index 60ba0fcae6..e9d296ea3c 100755 --- a/bin/dwipreproc +++ b/bin/dwipreproc @@ -798,9 +798,7 @@ if do_topup: else: run.command('mrcat ' + ' '.join(applytopup_image_list) + ' - -axis 3 | dwi2mask - - | maskfilter - dilate - | mrconvert - eddy_mask.nii -datatype float32 -strides -1,+2,+3') - for entry in applytopup_image_list: - file.delTemporary(entry) - + file.delTemporary(applytopup_image_list) eddy_in_topup_option = ' --topup=field' else: @@ -1056,8 +1054,7 @@ else: # Finally the recombined volumes must be concatenated to produce the resulting image series run.command('mrcat ' + ' '.join(combined_image_list) + ' - -axis 3 | mrconvert - result.mif' + dwi_post_eddy_crop_option + ' -fslgrad bvecs_combined bvals_combined' + stride_option) - for entry in combined_image_list: - file.delTemporary(entry) + file.delTemporary(combined_image_list) # Grab any relevant files that eddy has created, and copy them to the requested directory From f2a1263b251f2b8ad98b620c551a340de6c45cf8 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 14:00:25 +1000 Subject: [PATCH 0173/1471] First attempt at CI for Windows using AppVeyor --- appveyor.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 appveyor.yml diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 0000000000..4e663db26b --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,28 @@ +version: 1.0.{build} + +image: Visual Studio 2017 + +platform: x64 + +configuration: Release + +init: + - git config --global core.autocrlf input + +clone_folder: c:\msys64\mrtrix3 + +shallow_clone: true +clone_depth: 5 + +matrix: + fast_finish: false # set this flag to immediately finish build once one of the jobs fails. + +environment: + test: run + matrix: + - py: python2 + - py: python3 + +test_script: + - cmd: C:\msys64\usr\bin\bash -e -l -c "cd /mrtrix3/ && ./travis.sh" + From 72cc8f99c49d5a343e2f4234f1545a3d339fd2e8 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 14:07:57 +1000 Subject: [PATCH 0174/1471] AppVeyor: Change some directives From page: https://help.appveyor.com/discussions/problems/12705-custom-build-script-script-mode Try to explicitly disable MSBuild mode using 'build: off', and execute tests using 'build_script' instead of 'test_script' (since the latter can apparently also disable MSBuild mode, which is the error currently being produced). --- appveyor.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index 4e663db26b..716ce78b61 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -23,6 +23,8 @@ environment: - py: python2 - py: python3 -test_script: +build: off + +build_script: - cmd: C:\msys64\usr\bin\bash -e -l -c "cd /mrtrix3/ && ./travis.sh" From 8064b350f6e660bfb29bb8bd1beb88d1fdba719d Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 14:13:42 +1000 Subject: [PATCH 0175/1471] AppVeyor: Explicitly install dependencies within MSYS --- appveyor.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/appveyor.yml b/appveyor.yml index 716ce78b61..bce96f75c3 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -26,5 +26,6 @@ environment: build: off build_script: + - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm pkg-config mingw-w64-x86_64-gcc mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff" - cmd: C:\msys64\usr\bin\bash -e -l -c "cd /mrtrix3/ && ./travis.sh" From bd69d0917cfd68a7be865f12f9ab2540284f9236 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 14:26:35 +1000 Subject: [PATCH 0176/1471] AppVeyor: Try a single-line build script In the previous test, the MRtrix3 configure script failed to find Eigen, even though it had been installed on the previous line. Trying having both installation of prerequisites and MRtrix3 compilation / testing on a single line, in case the environment is not persistent between commands. --- appveyor.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index bce96f75c3..5532910a01 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -26,6 +26,5 @@ environment: build: off build_script: - - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm pkg-config mingw-w64-x86_64-gcc mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff" - - cmd: C:\msys64\usr\bin\bash -e -l -c "cd /mrtrix3/ && ./travis.sh" + - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff && cd /mrtrix3/ && ./travis.sh" From a58696a8dc02c3b534074863097148dcc665fcf3 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 14:48:38 +1000 Subject: [PATCH 0177/1471] AppVeyor: Add SSH information Only a temporary change, to enable snooping around in the VM and hopefully figure out why the MRtrix3 configure script can't find Eigen. --- appveyor.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index 5532910a01..016632b3b5 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -22,9 +22,14 @@ environment: matrix: - py: python2 - py: python3 + APPVEYOR_SSH_KEY: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQClwDCeYLhtrdMlMFMmTDo+xmPARbPoHaGS5/6QyIxieDDNjUdMONu5+P+XAV8R7wt9vzu3ET5xZwDauG17SDle14nP43zE6H/klk+RQpu+Eq1Z+G0eI2LbJSqtf7JqRo1L2LjBAB0YupUi3Kxj3woUSBIcl9sdhm/9hhmSM43Z8ggFIK//rZsEfZ6ZDWrZ7AjrIeyjW5aTMcOCHehiuqpd3c4Er9oKJHS9bpRFxG6OfCfFxh4vB/h24m2kASPGS4GNv3K/p64lJzDUkC2/wB1xZLpj22Moee+KQ/Fxe/vdHPfV3/d5w9Hfh9JjlFJOdBIiDyiiGM7nv9QoGeYiAqkyGi4maMt3utkiXJw1EP/V7NJjpTNe5+8xNfM/nb0umeEYfRkEk/iLDcZ4x3kNd5zviBls5tDejkEmdLvnzw0mtFWW74HWRKR1gS9gkN/5ZD/8OYOnlt7g7RLPB0O961SbzTFitcDJu209Okq7ebbXJudmMd8epv8797XoM9lIk9imITGnZ2vz2PJ/IumrTotbjamNh+jhSQUKc7qzyHQ4TJav7MiCSJV4CZ+TRX0wi08rFnGXhYZ/Cgju/Y9z/8F+A1JMMyuXecQAPIcHaJ1SRG4xWe6IPL+VBOlIM3FQSJGUYaPVr7pVJYfPO1nllvy2+ojz70q8jhH3YnohJDCS8Q== robert.smith@florey.edu.au" + APPVEYOR_SSH_BLOCK: true + build: off build_script: - - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff && cd /mrtrix3/ && ./travis.sh" + - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff" + - sh: curl -sflL 'https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-ssh.sh' | bash -e - + - cmd: C:\msys64\usr\bin\bash -e -l -c "cd /mrtrix3/ && ./travis.sh" From 00992c506b51867ebb5f936e117a28599fc47fa6 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 15:02:42 +1000 Subject: [PATCH 0178/1471] AppVeyor: Second attempt at SSH access Having the curl command execute via sh in between the pacman install and MRtrix3 testing steps appeared to not execute. Therefore trying having the SSH acces call separately within the on_finish block on its own. --- appveyor.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 016632b3b5..d82290369a 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -15,7 +15,7 @@ shallow_clone: true clone_depth: 5 matrix: - fast_finish: false # set this flag to immediately finish build once one of the jobs fails. + fast_finish: false environment: test: run @@ -29,7 +29,9 @@ environment: build: off build_script: - - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff" - - sh: curl -sflL 'https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-ssh.sh' | bash -e - + - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-python3 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff" - cmd: C:\msys64\usr\bin\bash -e -l -c "cd /mrtrix3/ && ./travis.sh" +on_finish: + - sh: curl -sflL 'https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-ssh.sh' | bash -e - + From 21069305a62395b10ce17bb19adf23c846dbcbf0 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 15:15:04 +1000 Subject: [PATCH 0179/1471] AppVeyor: Try SSH access in 'init:' block --- appveyor.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index d82290369a..1c2f36632b 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,9 +6,6 @@ platform: x64 configuration: Release -init: - - git config --global core.autocrlf input - clone_folder: c:\msys64\mrtrix3 shallow_clone: true @@ -25,13 +22,13 @@ environment: APPVEYOR_SSH_KEY: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQClwDCeYLhtrdMlMFMmTDo+xmPARbPoHaGS5/6QyIxieDDNjUdMONu5+P+XAV8R7wt9vzu3ET5xZwDauG17SDle14nP43zE6H/klk+RQpu+Eq1Z+G0eI2LbJSqtf7JqRo1L2LjBAB0YupUi3Kxj3woUSBIcl9sdhm/9hhmSM43Z8ggFIK//rZsEfZ6ZDWrZ7AjrIeyjW5aTMcOCHehiuqpd3c4Er9oKJHS9bpRFxG6OfCfFxh4vB/h24m2kASPGS4GNv3K/p64lJzDUkC2/wB1xZLpj22Moee+KQ/Fxe/vdHPfV3/d5w9Hfh9JjlFJOdBIiDyiiGM7nv9QoGeYiAqkyGi4maMt3utkiXJw1EP/V7NJjpTNe5+8xNfM/nb0umeEYfRkEk/iLDcZ4x3kNd5zviBls5tDejkEmdLvnzw0mtFWW74HWRKR1gS9gkN/5ZD/8OYOnlt7g7RLPB0O961SbzTFitcDJu209Okq7ebbXJudmMd8epv8797XoM9lIk9imITGnZ2vz2PJ/IumrTotbjamNh+jhSQUKc7qzyHQ4TJav7MiCSJV4CZ+TRX0wi08rFnGXhYZ/Cgju/Y9z/8F+A1JMMyuXecQAPIcHaJ1SRG4xWe6IPL+VBOlIM3FQSJGUYaPVr7pVJYfPO1nllvy2+ojz70q8jhH3YnohJDCS8Q== robert.smith@florey.edu.au" APPVEYOR_SSH_BLOCK: true +init: + - git config --global core.autocrlf input + - sh: curl -sflL 'https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-ssh.sh' | bash -e - build: off build_script: - - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-python3 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff" + - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff" - cmd: C:\msys64\usr\bin\bash -e -l -c "cd /mrtrix3/ && ./travis.sh" -on_finish: - - sh: curl -sflL 'https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-ssh.sh' | bash -e - - From 09480d06da9d30fc550dabd8eff19332a313218e Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Thu, 5 Jul 2018 16:34:11 +1000 Subject: [PATCH 0180/1471] fixelcfestats: Extra processing step multi-threaded Normalisation of fixel-fixel connectivity matrix, conversion into space-efficient format, and calculation of smoothing weights, is now multi-threaded across fixels. --- cmd/fixelcfestats.cpp | 127 ++++++++++++++++++++++-------------------- 1 file changed, 68 insertions(+), 59 deletions(-) diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 6aaa924ff4..00df0930b7 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -406,77 +406,86 @@ void run() Stats::CFE::norm_connectivity_matrix_type norm_connectivity_matrix (mask_fixels); // Also pre-compute fixel-fixel weights for smoothing. Stats::CFE::norm_connectivity_matrix_type smoothing_weights (mask_fixels); - bool do_smoothing = false; + const bool do_smoothing = (smooth_std_dev > 0.0); + const float gaussian_const1 = do_smoothing ? (1.0 / (smooth_std_dev * std::sqrt (2.0 * Math::pi))) : 1.0; const float gaussian_const2 = 2.0 * smooth_std_dev * smooth_std_dev; - float gaussian_const1 = 1.0; - if (smooth_std_dev > 0.0) { - do_smoothing = true; - gaussian_const1 = 1.0 / (smooth_std_dev * std::sqrt (2.0 * Math::pi)); - } { - // TODO This could trivially be multi-threaded; fixels are handled independently - ProgressBar progress ("Normalising and thresholding fixel-fixel connectivity matrix", num_fixels); - for (index_type fixel = 0; fixel < num_fixels; ++fixel) { - - mask.index(0) = fixel; - if (mask.value()) { - - const int32_t column = fixel2column[fixel]; - - // Here, the connectivity matrix needs to be modified to reflect the - // fact that fixel indices in the template fixel image may not - // correspond to rows in the statistical analysis - connectivity_value_type sum_weights = 0.0; - for (auto& it : connectivity_matrix[fixel]) { - -#ifndef NDEBUG - // Even if this fixel is within the mask, it should still not - // connect to any fixel that is outside the mask - mask.index(0) = it.first; - assert (mask.value()); -#endif - const connectivity_value_type connectivity = it.second.value / connectivity_value_type (fixel_TDI[fixel]); - if (connectivity >= connectivity_threshold) { - if (do_smoothing) { - const value_type distance = std::sqrt (Math::pow2 (positions[fixel][0] - positions[it.first][0]) + - Math::pow2 (positions[fixel][1] - positions[it.first][1]) + - Math::pow2 (positions[fixel][2] - positions[it.first][2])); - const connectivity_value_type smoothing_weight = connectivity * gaussian_const1 * std::exp (-Math::pow2 (distance) / gaussian_const2); - if (smoothing_weight >= connectivity_threshold) { - smoothing_weights[column].push_back (Stats::CFE::NormMatrixElement (fixel2column[it.first], smoothing_weight)); - sum_weights += smoothing_weight; - } + class Source + { MEMALIGN(Source) + public: + Source (Image& mask) : + mask (mask), + num_fixels (mask.size (0)), + counter (0), + progress ("normalising and thresholding fixel-fixel connectivity matrix", num_fixels) { } + bool operator() (size_t& fixel_index) { + while (counter < num_fixels) { + mask.index(0) = counter; + ++progress; + if (mask.value()) { + fixel_index = counter++; + return true; } - - // Here we pre-exponentiate each connectivity value by C - norm_connectivity_matrix[column].push_back (Stats::CFE::NormMatrixElement (fixel2column[it.first], std::pow (connectivity, cfe_c))); + ++counter; } + fixel_index = num_fixels; + return false; } + private: + Image mask; + const size_t num_fixels; + size_t counter; + ProgressBar progress; + }; + + auto Sink = [&] (const size_t& fixel_index) + { + assert (fixel_index < connectivity_matrix.size()); + const int32_t column = fixel2column[fixel_index]; + assert (column >= 0 && column < norm_connectivity_matrix.size()); + + // Here, the connectivity matrix needs to be modified to reflect the + // fact that fixel indices in the template fixel image may not + // correspond to rows in the statistical analysis + connectivity_value_type sum_weights = 0.0; + for (auto& it : connectivity_matrix[fixel_index]) { + const connectivity_value_type connectivity = it.second.value / connectivity_value_type (fixel_TDI[fixel_index]); + if (connectivity >= connectivity_threshold) { + if (do_smoothing) { + const value_type distance = std::sqrt (Math::pow2 (positions[fixel_index][0] - positions[it.first][0]) + + Math::pow2 (positions[fixel_index][1] - positions[it.first][1]) + + Math::pow2 (positions[fixel_index][2] - positions[it.first][2])); + const connectivity_value_type smoothing_weight = connectivity * gaussian_const1 * std::exp (-Math::pow2 (distance) / gaussian_const2); + if (smoothing_weight >= connectivity_threshold) { + smoothing_weights[column].push_back (Stats::CFE::NormMatrixElement (fixel2column[it.first], smoothing_weight)); + sum_weights += smoothing_weight; + } + } + // Here we pre-exponentiate each connectivity value by C + norm_connectivity_matrix[column].push_back (Stats::CFE::NormMatrixElement (fixel2column[it.first], std::pow (connectivity, cfe_c))); + } + } - // Make sure the fixel is fully connected to itself - norm_connectivity_matrix[column].push_back (Stats::CFE::NormMatrixElement (uint32_t(column), connectivity_value_type(1.0))); - smoothing_weights[column].push_back (Stats::CFE::NormMatrixElement (uint32_t(column), connectivity_value_type(gaussian_const1))); - sum_weights += connectivity_value_type(gaussian_const1); - - // Normalise smoothing weights - const connectivity_value_type norm_factor = connectivity_value_type(1.0) / sum_weights; - for (auto i : smoothing_weights[column]) - i.normalise (norm_factor); + // Make sure the fixel is fully connected to itself + norm_connectivity_matrix[column].push_back (Stats::CFE::NormMatrixElement (uint32_t(column), connectivity_value_type(1.0))); + smoothing_weights[column].push_back (Stats::CFE::NormMatrixElement (uint32_t(column), connectivity_value_type(gaussian_const1))); + sum_weights += connectivity_value_type (gaussian_const1); - // Force deallocation of memory used for this fixel in the original matrix - std::map().swap (connectivity_matrix[fixel]); + // Normalise smoothing weights + const connectivity_value_type norm_factor = connectivity_value_type(1.0) / sum_weights; + for (auto i : smoothing_weights[column]) + i.normalise (norm_factor); - } else { + // Force deallocation of memory used for this fixel in the original matrix + std::map().swap (connectivity_matrix[fixel_index]); - // If fixel is not in the mask, tract_processor should never assign - // any connections to it - assert (connectivity_matrix[fixel].empty()); + return true; + }; - } - progress++; - } + Source source (mask); + Thread::run_queue (source, size_t(), Thread::multi (Sink)); } From 6cb89f98d1427859655dbb0c58052913adc426a4 Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 6 Jul 2018 10:45:54 +1000 Subject: [PATCH 0181/1471] AppVeyor: Still trying to get SSH access Try having the SSH enable script as the only entry in the init block, since it seems to have again been skipped in the last test. --- appveyor.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index 1c2f36632b..6e560e6d85 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -23,7 +23,6 @@ environment: APPVEYOR_SSH_BLOCK: true init: - - git config --global core.autocrlf input - sh: curl -sflL 'https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-ssh.sh' | bash -e - build: off From 238c096d97432f86677b17e87ed2883df41e65aa Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 6 Jul 2018 10:53:14 +1000 Subject: [PATCH 0182/1471] AppVeyor: Windows RDP instructions Turns out AppVeyor has separate instructions for remote access depending on whether the remote machine is a Windows or Linux VM: https://www.appveyor.com/docs/how-to/rdp-to-build-worker/ --- appveyor.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 6e560e6d85..1335f48a9e 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -19,15 +19,15 @@ environment: matrix: - py: python2 - py: python3 - APPVEYOR_SSH_KEY: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQClwDCeYLhtrdMlMFMmTDo+xmPARbPoHaGS5/6QyIxieDDNjUdMONu5+P+XAV8R7wt9vzu3ET5xZwDauG17SDle14nP43zE6H/klk+RQpu+Eq1Z+G0eI2LbJSqtf7JqRo1L2LjBAB0YupUi3Kxj3woUSBIcl9sdhm/9hhmSM43Z8ggFIK//rZsEfZ6ZDWrZ7AjrIeyjW5aTMcOCHehiuqpd3c4Er9oKJHS9bpRFxG6OfCfFxh4vB/h24m2kASPGS4GNv3K/p64lJzDUkC2/wB1xZLpj22Moee+KQ/Fxe/vdHPfV3/d5w9Hfh9JjlFJOdBIiDyiiGM7nv9QoGeYiAqkyGi4maMt3utkiXJw1EP/V7NJjpTNe5+8xNfM/nb0umeEYfRkEk/iLDcZ4x3kNd5zviBls5tDejkEmdLvnzw0mtFWW74HWRKR1gS9gkN/5ZD/8OYOnlt7g7RLPB0O961SbzTFitcDJu209Okq7ebbXJudmMd8epv8797XoM9lIk9imITGnZ2vz2PJ/IumrTotbjamNh+jhSQUKc7qzyHQ4TJav7MiCSJV4CZ+TRX0wi08rFnGXhYZ/Cgju/Y9z/8F+A1JMMyuXecQAPIcHaJ1SRG4xWe6IPL+VBOlIM3FQSJGUYaPVr7pVJYfPO1nllvy2+ojz70q8jhH3YnohJDCS8Q== robert.smith@florey.edu.au" - APPVEYOR_SSH_BLOCK: true + APPVEYOR_RDP_PASSWORD: mrtrix-guest init: - - sh: curl -sflL 'https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-ssh.sh' | bash -e - + - git config --global core.autocrlf input build: off build_script: - cmd: C:\msys64\usr\bin\bash -e -l -c "pacman -S --noconfirm mingw-w64-x86_64-eigen3 mingw-w64-x86_64-qt5 mingw-w64-x86_64-fftw mingw-w64-x86_64-libtiff" + - ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1')) - cmd: C:\msys64\usr\bin\bash -e -l -c "cd /mrtrix3/ && ./travis.sh" From 343dba54b36ff2ca322444fa079079f66c0689cf Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 6 Jul 2018 13:17:27 +1000 Subject: [PATCH 0183/1471] Stats: New output: Null distribution contributions For all statistical inference commands (connectomestats, fixelcfestats, mrclusterstats, vectorstats), generate an additional output per hypothesis, that shows the number of times each element contributed to the null distribution (as the element with the maximal enhanced statistic for the permutation). This may provide some insight into non-stationarity effects. --- cmd/connectomestats.cpp | 18 ++++++++++-------- cmd/fixelcfestats.cpp | 14 +++++++++----- cmd/mrclusterstats.cpp | 21 ++++++++++++--------- cmd/vectorstats.cpp | 5 ++++- src/stats/permtest.cpp | 14 +++++++++++--- src/stats/permtest.h | 6 +++++- 6 files changed, 51 insertions(+), 27 deletions(-) diff --git a/cmd/connectomestats.cpp b/cmd/connectomestats.cpp index d64aa4dd76..e6812ec7ba 100644 --- a/cmd/connectomestats.cpp +++ b/cmd/connectomestats.cpp @@ -37,6 +37,7 @@ using namespace MR::Math::Stats::GLM; using Math::Stats::matrix_type; using Math::Stats::vector_type; +using Stats::PermTest::count_matrix_type; const char* algorithms[] = { "nbs", "nbse", "none", nullptr }; @@ -296,31 +297,32 @@ void run() if (do_nonstationarity_adjustment) { Stats::PermTest::precompute_empirical_stat (glm_test, enhancer, empirical_statistic); for (size_t i = 0; i != num_contrasts; ++i) - save_matrix (mat2vec.V2M (empirical_statistic.col(i)), output_prefix + "_empirical" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (empirical_statistic.col(i)), output_prefix + "empirical" + postfix(i) + ".csv"); } // Precompute default statistic and enhanced statistic matrix_type tvalue_output, enhanced_output; Stats::PermTest::precompute_default_permutation (glm_test, enhancer, empirical_statistic, enhanced_output, tvalue_output); for (size_t i = 0; i != num_contrasts; ++i) { - save_matrix (mat2vec.V2M (tvalue_output.col(i)), output_prefix + "_" + (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".csv"); - save_matrix (mat2vec.V2M (enhanced_output.col(i)), output_prefix + "_enhanced" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (tvalue_output.col(i)), output_prefix + (contrasts[i].is_F() ? "F" : "t") + "value" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (enhanced_output.col(i)), output_prefix + "enhanced" + postfix(i) + ".csv"); } // Perform permutation testing if (!get_options ("notest").size()) { matrix_type null_distribution, uncorrected_pvalues; + count_matrix_type null_contributions; Stats::PermTest::run_permutations (glm_test, enhancer, empirical_statistic, - enhanced_output, null_distribution, uncorrected_pvalues); - + enhanced_output, null_distribution, null_contributions, uncorrected_pvalues); for (size_t i = 0; i != num_contrasts; ++i) - save_vector (null_distribution.col(i), output_prefix + "_null_dist" + postfix(i) + ".txt"); + save_vector (null_distribution.col(i), output_prefix + "null_dist" + postfix(i) + ".txt"); const matrix_type pvalue_output = MR::Math::Stats::fwe_pvalue (null_distribution, enhanced_output); for (size_t i = 0; i != num_contrasts; ++i) { - save_matrix (mat2vec.V2M (pvalue_output.col(i)), output_prefix + "_fwe_pvalue" + postfix(i) + ".csv"); - save_matrix (mat2vec.V2M (uncorrected_pvalues.col(i)), output_prefix + "_uncorrected_pvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (pvalue_output.col(i)), output_prefix + "fwe_pvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (uncorrected_pvalues.col(i)), output_prefix + "uncorrected_pvalue" + postfix(i) + ".csv"); + save_matrix (mat2vec.V2M (null_contributions.col(i)), output_prefix + "null_contributions" + postfix(i) + ".csv"); } } diff --git a/cmd/fixelcfestats.cpp b/cmd/fixelcfestats.cpp index 00df0930b7..1d858fbedc 100644 --- a/cmd/fixelcfestats.cpp +++ b/cmd/fixelcfestats.cpp @@ -44,6 +44,7 @@ using namespace MR::Math::Stats::GLM; using Stats::CFE::direction_type; using Stats::CFE::connectivity_value_type; using Stats::CFE::index_type; +using Stats::PermTest::count_matrix_type; #define DEFAULT_CFE_DH 0.1 #define DEFAULT_CFE_E 2.0 @@ -620,24 +621,27 @@ void run() // Perform permutation testing if (!get_options ("notest").size()) { - matrix_type perm_distribution, uncorrected_pvalues; + matrix_type null_distribution, uncorrected_pvalues; + count_matrix_type null_contributions; Stats::PermTest::run_permutations (glm_test, cfe_integrator, empirical_cfe_statistic, - cfe_output, perm_distribution, uncorrected_pvalues); + cfe_output, null_distribution, null_contributions, uncorrected_pvalues); - ProgressBar progress ("Outputting final results", 3*num_contrasts + 1); + ProgressBar progress ("Outputting final results", 4*num_contrasts + 1); for (size_t i = 0; i != num_contrasts; ++i) { - save_vector (perm_distribution.col(i), Path::join (output_fixel_directory, "perm_dist" + postfix(i) + ".txt")); + save_vector (null_distribution.col(i), Path::join (output_fixel_directory, "perm_dist" + postfix(i) + ".txt")); ++progress; } - const matrix_type pvalue_output = MR::Math::Stats::fwe_pvalue (perm_distribution, cfe_output); + const matrix_type pvalue_output = MR::Math::Stats::fwe_pvalue (null_distribution, cfe_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { write_fixel_output (Path::join (output_fixel_directory, "fwe_pvalue" + postfix(i) + ".mif"), pvalue_output.col(i), output_header); ++progress; write_fixel_output (Path::join (output_fixel_directory, "uncorrected_pvalue" + postfix(i) + ".mif"), uncorrected_pvalues.col(i), output_header); ++progress; + write_fixel_output (Path::join (output_fixel_directory, "null_contributions" + postfix(i) + ".mif"), null_contributions.col(i), output_header); + ++progress; } } } diff --git a/cmd/mrclusterstats.cpp b/cmd/mrclusterstats.cpp index 5928886f03..e74837aec7 100644 --- a/cmd/mrclusterstats.cpp +++ b/cmd/mrclusterstats.cpp @@ -37,6 +37,8 @@ using namespace App; using namespace MR::Math::Stats; using namespace MR::Math::Stats::GLM; +using Stats::PermTest::count_matrix_type; + #define DEFAULT_TFCE_DH 0.1 #define DEFAULT_TFCE_H 2.0 @@ -335,24 +337,25 @@ void run() { if (!get_options ("notest").size()) { - matrix_type perm_distribution, uncorrected_pvalue; + matrix_type null_distribution, uncorrected_pvalue; + count_matrix_type null_contributions; Stats::PermTest::run_permutations (glm_test, enhancer, empirical_enhanced_statistic, - default_enhanced_output, perm_distribution, uncorrected_pvalue); + default_enhanced_output, null_distribution, null_contributions, uncorrected_pvalue); for (size_t i = 0; i != num_contrasts; ++i) - save_vector (perm_distribution.col(i), prefix + "perm_dist" + postfix(i) + ".txt"); + save_vector (null_distribution.col(i), prefix + "perm_dist" + postfix(i) + ".txt"); - ProgressBar progress ("Generating output images", 1 + (2 * num_contrasts)); - for (size_t i = 0; i != num_contrasts; ++i) { - write_output (uncorrected_pvalue.col(i), *v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); - ++progress; - } - const matrix_type fwe_pvalue_output = MR::Math::Stats::fwe_pvalue (perm_distribution, default_enhanced_output); + ProgressBar progress ("Generating output images", 1 + (3 * num_contrasts)); + const matrix_type fwe_pvalue_output = MR::Math::Stats::fwe_pvalue (null_distribution, default_enhanced_output); ++progress; for (size_t i = 0; i != num_contrasts; ++i) { write_output (fwe_pvalue_output.col(i), *v2v, prefix + "fwe_pvalue" + postfix(i) + ".mif", output_header); ++progress; + write_output (uncorrected_pvalue.col(i), *v2v, prefix + "uncorrected_pvalue" + postfix(i) + ".mif", output_header); + ++progress; + write_output (null_contributions.col(i), *v2v, prefix + "null_contributions" + postfix(i) + ".mif", output_header); + ++progress; } } diff --git a/cmd/vectorstats.cpp b/cmd/vectorstats.cpp index 1eea792d71..c401e5352c 100644 --- a/cmd/vectorstats.cpp +++ b/cmd/vectorstats.cpp @@ -71,6 +71,7 @@ void usage () using Math::Stats::matrix_type; using Math::Stats::vector_type; +using Stats::PermTest::count_matrix_type; @@ -224,14 +225,16 @@ void run() std::shared_ptr enhancer; matrix_type null_distribution, uncorrected_pvalues; + count_matrix_type null_contributions; matrix_type empirical_distribution; // unused Stats::PermTest::run_permutations (glm_test, enhancer, empirical_distribution, - default_tvalues, null_distribution, uncorrected_pvalues); + default_tvalues, null_distribution, null_contributions, uncorrected_pvalues); const matrix_type fwe_pvalues = MR::Math::Stats::fwe_pvalue (null_distribution, default_tvalues); for (size_t i = 0; i != num_contrasts; ++i) { save_vector (fwe_pvalues.col(i), output_prefix + "fwe_pvalue" + postfix(i) + ".csv"); save_vector (uncorrected_pvalues.col(i), output_prefix + "uncorrected_pvalue" + postfix(i) + ".csv"); + save_vector (null_contributions.col(i), output_prefix + "null_contributions" + postfix(i) + ".csv"); } } diff --git a/src/stats/permtest.cpp b/src/stats/permtest.cpp index 0190fc8b0f..3f423c5d3f 100644 --- a/src/stats/permtest.cpp +++ b/src/stats/permtest.cpp @@ -81,6 +81,7 @@ namespace MR const matrix_type& empirical_enhanced_statistics, const matrix_type& default_enhanced_statistics, matrix_type& perm_dist, + count_matrix_type& perm_dist_contributions, count_matrix_type& global_uncorrected_pvalue_counter) : stats_calculator (stats_calculator), enhancer (enhancer), @@ -88,9 +89,11 @@ namespace MR default_enhanced_statistics (default_enhanced_statistics), statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), enhanced_statistics (stats_calculator->num_elements(), stats_calculator->num_outputs()), - uncorrected_pvalue_counter (count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())), perm_dist (perm_dist), + global_perm_dist_contributions (perm_dist_contributions), + perm_dist_contribution_counter (count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())), global_uncorrected_pvalue_counter (global_uncorrected_pvalue_counter), + uncorrected_pvalue_counter (count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())), mutex (new std::mutex()) { assert (stats_calculator); @@ -102,6 +105,7 @@ namespace MR { std::lock_guard lock (*mutex); global_uncorrected_pvalue_counter += uncorrected_pvalue_counter; + global_perm_dist_contributions += perm_dist_contribution_counter; } @@ -117,9 +121,10 @@ namespace MR if (empirical_enhanced_statistics.size()) enhanced_statistics.array() /= empirical_enhanced_statistics.array(); - perm_dist.row(shuffle.index) = enhanced_statistics.colwise().maxCoeff(); - + ssize_t max_index; for (ssize_t contrast = 0; contrast != enhanced_statistics.cols(); ++contrast) { + perm_dist(shuffle.index, contrast) = enhanced_statistics.col (contrast).maxCoeff (&max_index); + perm_dist_contribution_counter(max_index, contrast)++; for (ssize_t element = 0; element != enhanced_statistics.rows(); ++element) { if (default_enhanced_statistics(element, contrast) > enhanced_statistics(element, contrast)) uncorrected_pvalue_counter(element, contrast)++; @@ -188,11 +193,13 @@ namespace MR const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, matrix_type& perm_dist, + count_matrix_type& perm_dist_contributions, matrix_type& uncorrected_pvalues) { assert (stats_calculator); Math::Stats::Shuffler shuffler (stats_calculator->num_subjects(), false, "Running permutations"); perm_dist.resize (shuffler.size(), stats_calculator->num_outputs()); + perm_dist_contributions = count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs()); count_matrix_type global_uncorrected_pvalue_count (count_matrix_type::Zero (stats_calculator->num_elements(), stats_calculator->num_outputs())); { @@ -200,6 +207,7 @@ namespace MR empirical_enhanced_statistic, default_enhanced_statistics, perm_dist, + perm_dist_contributions, global_uncorrected_pvalue_count); Thread::run_queue (shuffler, Math::Stats::Shuffle(), Thread::multi (processor)); } diff --git a/src/stats/permtest.h b/src/stats/permtest.h index b82caac5d1..b97a65a596 100644 --- a/src/stats/permtest.h +++ b/src/stats/permtest.h @@ -87,6 +87,7 @@ namespace MR const matrix_type& empirical_enhanced_statistics, const matrix_type& default_enhanced_statistics, matrix_type& perm_dist, + count_matrix_type& global_perm_dist_contributions, count_matrix_type& global_uncorrected_pvalue_counter); ~Processor(); @@ -100,9 +101,11 @@ namespace MR const matrix_type& default_enhanced_statistics; matrix_type statistics; matrix_type enhanced_statistics; - count_matrix_type uncorrected_pvalue_counter; matrix_type& perm_dist; + count_matrix_type& global_perm_dist_contributions; + count_matrix_type perm_dist_contribution_counter; count_matrix_type& global_uncorrected_pvalue_counter; + count_matrix_type uncorrected_pvalue_counter; std::shared_ptr mutex; }; @@ -132,6 +135,7 @@ namespace MR const matrix_type& empirical_enhanced_statistic, const matrix_type& default_enhanced_statistics, matrix_type& perm_dist, + count_matrix_type& perm_dist_contributions, matrix_type& uncorrected_pvalues); //! @} From 47e7288f3a70393c6b1896924fede67e214d952d Mon Sep 17 00:00:00 2001 From: Robert Smith Date: Fri, 6 Jul 2018 18:09:21 +1000 Subject: [PATCH 0184/1471] Scripts: Better output image key-value handling Because Python scripts frequently call multiple underlying commands, the key-value field 'command_history' typically gets mangled by including a large number of steps that occur internally within the script and were not executed by the user; additionally, any requisite conversion to NIfTI within a script will result in loss of the command history, as well as any other header key-value fields. This commit adds the necessary functionalities in order for the header key-value entries of an image generated by an MRtrix3 Python script to behave identically to any C++ executable command: The key-value entries are copied from the input image to the output, and field 'command_history' is updated specifically with the command-line invocation of that particular script. This is handled internally using a new mrconvert option '-compel_keyvalues', which is appropriately hidden from users. Closes #1188. --- bin/5ttgen | 3 +- bin/dwi2response | 2 -- bin/dwibiascorrect | 4 +-- bin/dwiintensitynorm | 6 ++-- bin/dwipreproc | 26 ++++++++------ bin/labelsgmfix | 2 +- bin/population_template | 18 +++++++--- cmd/mrconvert.cpp | 7 +++- core/app.cpp | 48 ++++++++++++++------------ core/cmdline_option.h | 10 +++++- core/header.cpp | 41 +++++++++++++++++----- lib/mrtrix3/_5ttgen/freesurfer.py | 2 ++ lib/mrtrix3/_5ttgen/fsl.py | 6 ++-- lib/mrtrix3/_5ttgen/gif.py | 4 ++- lib/mrtrix3/app.py | 23 ++++++++++++ lib/mrtrix3/dwi2response/dhollander.py | 2 ++ lib/mrtrix3/dwi2response/fa.py | 2 ++ lib/mrtrix3/dwi2response/manual.py | 3 +- lib/mrtrix3/dwi2response/msmt_5tt.py | 2 ++ lib/mrtrix3/dwi2response/tax.py | 2 ++ lib/mrtrix3/dwi2response/tournier.py | 2 ++ lib/mrtrix3/run.py | 6 +++- 22 files changed, 158 insertions(+), 63 deletions(-) diff --git a/bin/5ttgen b/bin/5ttgen index a148978b14..cda6a6bf32 100755 --- a/bin/5ttgen +++ b/bin/5ttgen @@ -18,7 +18,7 @@ if not os.path.isdir(lib_folder): sys.path.insert(0, lib_folder) -from mrtrix3 import algorithm, app, path, run +from mrtrix3 import algorithm, app, run app.init('Robert E. Smith (robert.smith@florey.edu.au)', 'Generate a 5TT image suitable for ACT') app.cmdline.addCitation('', 'Smith, R. E.; Tournier, J.-D.; Calamante, F. & Connelly, A. Anatomically-constrained tractography: Improved diffusion MRI streamlines tractography through effective use of anatomical information. NeuroImage, 2012, 62, 1924-1938', False) @@ -52,5 +52,4 @@ if stderr: for line in stderr.splitlines(): app.warn(line) -run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else '')) app.complete() diff --git a/bin/dwi2response b/bin/dwi2response index af0be53533..8beeeb1310 100755 --- a/bin/dwi2response +++ b/bin/dwi2response @@ -108,6 +108,4 @@ alg.execute() # Finalize for all algorithms -if app.args.voxels: - run.command('mrconvert voxels.mif ' + path.fromUser(app.args.voxels, True) + (' -force' if app.forceOverwrite else '')) app.complete() diff --git a/bin/dwibiascorrect b/bin/dwibiascorrect index b8ca354ffe..579201b622 100755 --- a/bin/dwibiascorrect +++ b/bin/dwibiascorrect @@ -140,7 +140,7 @@ elif app.args.ants: run.command('mrcalc in.mif ' + bias_path + ' -div result.mif') -run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else '')) +run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + app.mrconvertOutputOption(path.fromUser(app.args.input, True))) if app.args.bias: - run.command('mrconvert ' + bias_path + ' ' + path.fromUser(app.args.bias, True) + (' -force' if app.forceOverwrite else '')) + run.command('mrconvert ' + bias_path + ' ' + path.fromUser(app.args.bias, True) + app.mrconvertOutputOption(path.fromUser(app.args.input, True))) app.complete() diff --git a/bin/dwiintensitynorm b/bin/dwiintensitynorm index 046d2d596f..4ca3fa1d84 100755 --- a/bin/dwiintensitynorm +++ b/bin/dwiintensitynorm @@ -103,12 +103,12 @@ progress = app.progressBar('Intensity normalising subject images', len(input_lis file.makeDir('wm_mask_warped') for i in input_list: run.command('mrtransform template_wm_mask.mif -interp nearest -warp_full ' + os.path.join('population_template', 'warps', i.prefix + '.mif') + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' -from 2 -template ' + os.path.join('fa', i.prefix + '.mif')) - run.command('dwinormalise ' + abspath(i.directory, i.filename) + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' ' + path.fromUser(os.path.join(app.args.output_dir, i.filename), True) + (' -force' if app.forceOverwrite else '')) + run.command('dwinormalise ' + abspath(i.directory, i.filename) + ' ' + os.path.join('wm_mask_warped', i.prefix + '.mif') + ' ' + path.fromUser(os.path.join(app.args.output_dir, i.filename), True) + app.mrconvertOutputOption(path.fromUser(os.path.join(inputDir, i.filename), True))) progress.increment() progress.done() app.console('Exporting template images to user locations') -run.command('mrconvert template_wm_mask.mif ' + path.fromUser(app.args.wm_mask, True) + (' -force' if app.forceOverwrite else '')) -run.command('mrconvert fa_template.mif ' + path.fromUser(app.args.fa_template, True) + (' -force' if app.forceOverwrite else '')) +run.command('mrconvert template_wm_mask.mif ' + path.fromUser(app.args.wm_mask, True) + app.mrconvertOutputOption('NULL')) +run.command('mrconvert fa_template.mif ' + path.fromUser(app.args.fa_template, True) + app.mrconvertOutputOption('NULL')) app.complete() diff --git a/bin/dwipreproc b/bin/dwipreproc index e9d296ea3c..620a2f82e7 100755 --- a/bin/dwipreproc +++ b/bin/dwipreproc @@ -22,7 +22,7 @@ if not os.path.isdir(lib_folder): sys.exit(1) sys.path.insert(0, lib_folder) -import math, itertools, shutil +import itertools, json, math, shutil from mrtrix3 import app, file, fsl, image, path, phaseEncoding, run #pylint: disable=redefined-builtin @@ -156,10 +156,11 @@ if app.args.grad: grad_option = ' -grad ' + path.fromUser(app.args.grad, True) elif app.args.fslgrad: grad_option = ' -fslgrad ' + path.fromUser(app.args.fslgrad[0], True) + ' ' + path.fromUser(app.args.fslgrad[1], True) -json_option = '' +json_import_option = '' if app.args.json_import: - json_option = ' -json_import ' + path.fromUser(app.args.json_import, True) -run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('dwi.mif', True) + grad_option + json_option) + json_import_option = ' -json_import ' + path.fromUser(app.args.json_import, True) +json_export_option = ' -json_export ' + path.toTemp('dwi.json', True) +run.command('mrconvert ' + path.fromUser(app.args.input, True) + ' ' + path.toTemp('dwi.mif', True) + grad_option + json_import_option + json_export_option) if app.args.se_epi: image.check3DNonunity(path.fromUser(app.args.se_epi, False)) run.command('mrconvert ' + path.fromUser(app.args.se_epi, True) + ' ' + path.toTemp('se_epi.mif', True)) @@ -1072,14 +1073,17 @@ if eddyqc_path: # output image, as they may have been useful for controlling pre-processing # but are no longer required, and will just bloat the key-value listings of # all subsequent derived images -# Disabled this for now: The output from eddy is a NIfTI, so all these fields -# have been lost. For now just neglect to re-introduce them; in the future, -# this may be combined with GitHub Issue #1188 (proper behaviour of -# command_history header key-value entry when running a Python script) -#keys_to_remove = [ 'EchoTime', 'FlipAngle', 'MultibandAccelerationFactor', 'PhaseEncodingDirection', 'RepetitionTime', 'SliceEncodingDirection', 'SliceTiming', 'TotalReadoutTime', 'pe_scheme' ] -#clear_property_options = ' ' + ' '.join(['-clear_property '+key for key in keys_to_remove if key in dwi_header.keyval() ]) +keys_to_remove = [ 'EchoTime', 'FlipAngle', 'MultibandAccelerationFactor', 'PhaseEncodingDirection', 'RepetitionTime', 'SliceEncodingDirection', 'SliceTiming', 'TotalReadoutTime', 'pe_scheme' ] +# Get the header key-value entries from the input DWI, remove those we don't wish to keep, and +# export the result to a new JSON file so that they can be inserted into the output header +with open('dwi.json', 'r') as f: + kv = json.load(f) +for key in keys_to_remove: + kv.pop(key, None) +with open('output.json', 'w') as f: + json.dump(kv, f) # Finish! -run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + grad_export_option + (' -force' if app.forceOverwrite else '')) +run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + grad_export_option + app.mrconvertOutputOption('output.json')) app.complete() diff --git a/bin/labelsgmfix b/bin/labelsgmfix index 92594b8312..6a85658b36 100755 --- a/bin/labelsgmfix +++ b/bin/labelsgmfix @@ -148,5 +148,5 @@ progress.done() # Insert the new delineations of all SGM structures in a single call # Enforce unsigned integer datatype of output image run.command('mrcalc sgm_new_labels.mif 0.5 -gt sgm_new_labels.mif parc.mif -if result.mif -datatype uint32') -run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + (' -force' if app.forceOverwrite else '')) +run.command('mrconvert result.mif ' + path.fromUser(app.args.output, True) + app.mrconvertOutputOption(path.fromUser(app.args.parc, True))) app.complete() diff --git a/bin/population_template b/bin/population_template index d02425fbb4..d58f7d1f93 100755 --- a/bin/population_template +++ b/bin/population_template @@ -737,13 +737,18 @@ if dononlinear: run.function(move, os.path.join('warps_' + str(level), '%s.mif' % i.prefix), 'warps') -run.command('mrconvert ' + current_template + ' ' + path.fromUser(app.args.template, True) + (' -force' if app.forceOverwrite else '')) +run.command('mrconvert ' + current_template + ' ' + path.fromUser(app.args.template, True) + app.mrconvertOutputOption('NULL')) if app.args.warp_dir: warp_path = path.fromUser(app.args.warp_dir, False) if os.path.exists(warp_path): run.function(rmtree, warp_path) - run.function(copytree, 'warps', warp_path) + os.makedirs(warp_path) + progress = app.progressBar('Copying non-linear warps to output directory "' + warp_path + '"') + for i in inputs: + run.command('mrconvert ' + os.path.join('warps', '%s.mif' % i.prefix) + ' ' + os.path.join(warp_path, '%s.mif' % i.prefix) + app.mrconvertOutputOption(path.fromUser(os.path.join(inputDir, i.filename), True))) + progress.increment() + progress.done() if app.args.linear_transformations_dir: linear_transformations_path = path.fromUser(app.args.linear_transformations_dir, False) @@ -755,9 +760,14 @@ if app.args.transformed_dir: transformed_path = path.fromUser(app.args.transformed_dir, False) if os.path.exists(transformed_path): run.function(rmtree, transformed_path) - run.function(copytree, 'inputs_transformed', transformed_path) + os.makedirs(transformed_path) + progress = app.progressBar('Copying transformed images to output directory "' + transformed_path + '"') + for i in inputs: + run.command('mrconvert ' + os.path.join('inputs_transformed', '%s.mif' % i.prefix) + ' ' + os.path.join(transformed_path, '%s.mif' % i.prefix) + app.mrconvertOutputOption(path.fromUser(os.path.join(inputDir, i.filename), True))) + progress.increment() + progress.done() if app.args.template_mask: - run.command('mrconvert ' + current_template_mask + ' ' + path.fromUser(app.args.template_mask, True) + (' -force' if app.forceOverwrite else '')) + run.command('mrconvert ' + current_template_mask + ' ' + path.fromUser(app.args.template_mask, True) + app.mrconvertOutputOption('NULL')) app.complete() diff --git a/cmd/mrconvert.cpp b/cmd/mrconvert.cpp index 2ebc65250e..91a39e3e1d 100644 --- a/cmd/mrconvert.cpp +++ b/cmd/mrconvert.cpp @@ -162,7 +162,12 @@ void usage () + DWI::GradExportOptions() + PhaseEncoding::ImportOptions - + PhaseEncoding::ExportOptions; + + PhaseEncoding::ExportOptions + + + OptionGroup ("Hidden option for manipulating header key-value entries").hidden() + + Option ("compel_keyvalues", "force the header key-value contents to reflect invocation of a higher-level script of which this is the last step") + + Argument ("basis").type_text() + + Argument ("command").type_text(); } diff --git a/core/app.cpp b/core/app.cpp index 51b77b9d85..990097d64b 100644 --- a/core/app.cpp +++ b/core/app.cpp @@ -402,13 +402,15 @@ namespace MR size_t n = i; while ((*this)[n].name != group_names[i]) ++n; - s += (*this)[n].header (format); - while (n < size()) { - if ((*this)[n].name == group_names[i]) - s += (*this)[n].contents (format); - ++n; + if ((*this)[n].show) { + s += (*this)[n].header (format); + while (n < size()) { + if ((*this)[n].name == group_names[i]) + s += (*this)[n].contents (format); + ++n; + } + s += OptionGroup::footer (format); } - s += OptionGroup::footer (format); } return s; @@ -683,14 +685,16 @@ namespace MR size_t n = i; while (OPTIONS[n].name != group_names[i]) ++n; - if (OPTIONS[n].name != std::string("OPTIONS")) - s += std::string ("#### ") + OPTIONS[n].name + "\n\n"; - while (n < OPTIONS.size()) { - if (OPTIONS[n].name == group_names[i]) { - for (size_t o = 0; o < OPTIONS[n].size(); ++o) - s += format_option (OPTIONS[n][o]); + if (OPTIONS[n].show) { + if (OPTIONS[n].name != std::string("OPTIONS")) + s += std::string ("#### ") + OPTIONS[n].name + "\n\n"; + while (n < OPTIONS.size()) { + if (OPTIONS[n].name == group_names[i]) { + for (size_t o = 0; o < OPTIONS[n].size(); ++o) + s += format_option (OPTIONS[n][o]); + } + ++n; } - ++n; } } @@ -773,14 +777,12 @@ namespace MR s += std::string("- *") + ARGUMENTS[i].id + "*: " + escape_special (indent_newlines (ARGUMENTS[i].desc)) + "\n"; s += "\n"; - if (DESCRIPTION.size()) { s += "Description\n-----------\n\n"; for (size_t i = 0; i < DESCRIPTION.size(); ++i) s += indent_newlines (DESCRIPTION[i]) + "\n\n"; } - vector group_names; for (size_t i = 0; i < OPTIONS.size(); ++i) { if (std::find (group_names.begin(), group_names.end(), OPTIONS[i].name) == group_names.end()) @@ -800,14 +802,16 @@ namespace MR size_t n = i; while (OPTIONS[n].name != group_names[i]) ++n; - if (OPTIONS[n].name != std::string("OPTIONS")) - s += OPTIONS[n].name + std::string("\n") + std::string(std::strlen(OPTIONS[n].name), '^') + "\n\n"; - while (n < OPTIONS.size()) { - if (OPTIONS[n].name == group_names[i]) { - for (size_t o = 0; o < OPTIONS[n].size(); ++o) - s += format_option (OPTIONS[n][o]); + if (OPTIONS[n].show) { + if (OPTIONS[n].name != std::string("OPTIONS")) + s += OPTIONS[n].name + std::string("\n") + std::string(std::strlen(OPTIONS[n].name), '^') + "\n\n"; + while (n < OPTIONS.size()) { + if (OPTIONS[n].name == group_names[i]) { + for (size_t o = 0; o < OPTIONS[n].size(); ++o) + s += format_option (OPTIONS[n][o]); + } + ++n; } - ++n; } } diff --git a/core/cmdline_option.h b/core/cmdline_option.h index bab4456b92..59e6715b47 100644 --- a/core/cmdline_option.h +++ b/core/cmdline_option.h @@ -416,7 +416,7 @@ namespace MR */ class OptionGroup : public vector