diff --git a/.circleci/config.yml b/.circleci/config.yml
index bca927a36d3..93874db5441 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -444,8 +444,8 @@ jobs:
deploy:
- machine:
- image: ubuntu-2004:202111-01
+ docker:
+ - image: cimg/base:current-22.04
steps:
- attach_workspace:
at: /tmp/build
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 53e02d49867..b7ab58dc917 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -10,4 +10,4 @@ This project and everyone participating in it is governed by the [MNE-Python's C
## How to contribute
-Before contributing make sure you are familiar with [our contributing guide](https://mne.tools/dev/install/contributing.html).
+Before contributing make sure you are familiar with [our contributing guide](https://mne.tools/dev/development/contributing.html).
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index ea102484a7f..1ca19246c37 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,5 +1,5 @@
Thanks for contributing a pull request! Please make sure you have read the
-[contribution guidelines](https://mne.tools/dev/install/contributing.html)
+[contribution guidelines](https://mne.tools/dev/development/contributing.html)
before submitting.
Please be aware that we are a loose team of volunteers so patience is
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 42bbaba2a21..908555af797 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -23,14 +23,14 @@ jobs:
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- - uses: pre-commit/action@v3.0.0
+ - uses: pre-commit/action@v3.0.1
bandit:
name: Bandit
needs: style
runs-on: ubuntu-latest
steps:
- - uses: davidslusser/actions_python_bandit@v1.0.0
+ - uses: davidslusser/actions_python_bandit@v1.0.1
with:
src: "mne"
options: "-c pyproject.toml -ll -r"
@@ -129,5 +129,7 @@ jobs:
path: ~/mne_data
- run: ./tools/github_actions_download.sh
- run: ./tools/github_actions_test.sh
- - uses: codecov/codecov-action@v3
+ - uses: codecov/codecov-action@v4
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
if: success()
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index cfc33cc5ceb..0298815545a 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,7 +1,7 @@
repos:
# Ruff mne
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.1.14
+ rev: v0.3.0
hooks:
- id: ruff
name: ruff lint mne
@@ -11,18 +11,14 @@ repos:
name: ruff lint mne preview
args: ["--fix", "--preview", "--select=NPY201"]
files: ^mne/
- - id: ruff-format
- name: ruff format mne
- files: ^mne/
- id: ruff
- name: ruff lint tutorials and examples
+ name: ruff lint doc, tutorials, and examples
# D103: missing docstring in public function
# D400: docstring first line must end with period
args: ["--ignore=D103,D400", "--fix"]
- files: ^tutorials/|^examples/
+ files: ^doc/|^tutorials/|^examples/
- id: ruff-format
- name: ruff format tutorials and examples
- files: ^tutorials/|^examples/
+ files: ^mne/|^doc/|^tutorials/|^examples/
# Codespell
- repo: https://github.com/codespell-project/codespell
@@ -36,7 +32,7 @@ repos:
# yamllint
- repo: https://github.com/adrienverge/yamllint.git
- rev: v1.33.0
+ rev: v1.35.1
hooks:
- id: yamllint
args: [--strict, -c, .yamllint.yml]
diff --git a/README.rst b/README.rst
index 433c6a1d82f..806f5469e1d 100644
--- a/README.rst
+++ b/README.rst
@@ -88,12 +88,12 @@ For full functionality, some functions require:
- `scikit-learn `__ ≥ 1.0
- `Joblib `__ ≥ 0.15 (for parallelization)
- `mne-qt-browser `__ ≥ 0.1 (for fast raw data visualization)
-- `Qt `__ ≥ 5.12 via one of the following bindings (for fast raw data visualization and interactive 3D visualization):
+- `Qt `__ ≥ 5.15 via one of the following bindings (for fast raw data visualization and interactive 3D visualization):
- `PyQt6 `__ ≥ 6.0
- `PySide6 `__ ≥ 6.0
- - `PyQt5 `__ ≥ 5.12
- - `PySide2 `__ ≥ 5.12
+ - `PyQt5 `__ ≥ 5.15
+ - `PySide2 `__ ≥ 5.15
- `Numba `__ ≥ 0.54.0
- `NiBabel `__ ≥ 3.2.1
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 6cac2d5990f..43cdb1db960 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -104,7 +104,7 @@ stages:
- bash: |
set -e
python -m pip install --progress-bar off --upgrade pip
- python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git@main" pyvista scikit-learn pytest-error-for-skips python-picard "PyQt6!=6.5.1,!=6.6.1" "PyQt6-Qt6!=6.6.1" qtpy nibabel sphinx-gallery
+ python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git@main" pyvista scikit-learn pytest-error-for-skips python-picard "PyQt6!=6.5.1,!=6.6.1,!=6.6.2" "PyQt6-Qt6!=6.6.1,!=6.6.2" qtpy nibabel sphinx-gallery
python -m pip uninstall -yq mne
python -m pip install --progress-bar off --upgrade -e .[test]
displayName: 'Install dependencies with pip'
@@ -183,7 +183,7 @@ stages:
displayName: 'Get test data'
- bash: |
set -e
- python -m pip install "PyQt6!=6.6.1" "PyQt6-Qt6!=6.6.1"
+ python -m pip install "PyQt6!=6.6.1,!=6.6.2" "PyQt6-Qt6!=6.6.1,!=6.6.2"
LD_DEBUG=libs python -c "from PyQt6.QtWidgets import QApplication, QWidget; app = QApplication([]); import matplotlib; matplotlib.use('QtAgg'); import matplotlib.pyplot as plt; plt.figure()"
- bash: |
mne sys_info -pd
diff --git a/doc/_includes/channel_interpolation.rst b/doc/_includes/channel_interpolation.rst
index 4639604af58..e90a763d214 100644
--- a/doc/_includes/channel_interpolation.rst
+++ b/doc/_includes/channel_interpolation.rst
@@ -59,7 +59,7 @@ where :math:`G_{ds} \in R^{M \times N}` computes :math:`g_{m}(\boldsymbol{r_i},
To interpolate bad channels, one can simply do:
- >>> evoked.interpolate_bads(reset_bads=False) # doctest: +SKIP
+ >>> evoked.interpolate_bads(reset_bads=False) # doctest: +SKIP
and the bad channel will be fixed.
@@ -67,4 +67,4 @@ and the bad channel will be fixed.
.. topic:: Examples:
- * :ref:`ex-interpolate-bad-channels`
+ * :ref:`ex-interpolate-bad-channels`
diff --git a/doc/_includes/forward.rst b/doc/_includes/forward.rst
index f92632f8220..d04eeba7b5b 100644
--- a/doc/_includes/forward.rst
+++ b/doc/_includes/forward.rst
@@ -130,26 +130,26 @@ transformation symbols (:math:`T_x`) indicate the transformations actually
present in the FreeSurfer files. Generally,
.. math:: \begin{bmatrix}
- x_2 \\
- y_2 \\
- z_2 \\
- 1
- \end{bmatrix} = T_{12} \begin{bmatrix}
- x_1 \\
- y_1 \\
- z_1 \\
- 1
- \end{bmatrix} = \begin{bmatrix}
- R_{11} & R_{12} & R_{13} & x_0 \\
- R_{21} & R_{22} & R_{23} & y_0 \\
- R_{31} & R_{32} & R_{33} & z_0 \\
- 0 & 0 & 0 & 1
- \end{bmatrix} \begin{bmatrix}
- x_1 \\
- y_1 \\
- z_1 \\
- 1
- \end{bmatrix}\ ,
+ x_2 \\
+ y_2 \\
+ z_2 \\
+ 1
+ \end{bmatrix} = T_{12} \begin{bmatrix}
+ x_1 \\
+ y_1 \\
+ z_1 \\
+ 1
+ \end{bmatrix} = \begin{bmatrix}
+ R_{11} & R_{12} & R_{13} & x_0 \\
+ R_{21} & R_{22} & R_{23} & y_0 \\
+ R_{31} & R_{32} & R_{33} & z_0 \\
+ 0 & 0 & 0 & 1
+ \end{bmatrix} \begin{bmatrix}
+ x_1 \\
+ y_1 \\
+ z_1 \\
+ 1
+ \end{bmatrix}\ ,
where :math:`x_k`, :math:`y_k`,and :math:`z_k` are the location coordinates in
two coordinate systems, :math:`T_{12}` is the coordinate transformation from
@@ -161,20 +161,20 @@ files produced by FreeSurfer and MNE.
The fixed transformations :math:`T_-` and :math:`T_+` are:
.. math:: T_{-} = \begin{bmatrix}
- 0.99 & 0 & 0 & 0 \\
- 0 & 0.9688 & 0.042 & 0 \\
- 0 & -0.0485 & 0.839 & 0 \\
- 0 & 0 & 0 & 1
- \end{bmatrix}
+ 0.99 & 0 & 0 & 0 \\
+ 0 & 0.9688 & 0.042 & 0 \\
+ 0 & -0.0485 & 0.839 & 0 \\
+ 0 & 0 & 0 & 1
+ \end{bmatrix}
and
.. math:: T_{+} = \begin{bmatrix}
- 0.99 & 0 & 0 & 0 \\
- 0 & 0.9688 & 0.046 & 0 \\
- 0 & -0.0485 & 0.9189 & 0 \\
- 0 & 0 & 0 & 1
- \end{bmatrix}
+ 0.99 & 0 & 0 & 0 \\
+ 0 & 0.9688 & 0.046 & 0 \\
+ 0 & -0.0485 & 0.9189 & 0 \\
+ 0 & 0 & 0 & 1
+ \end{bmatrix}
.. note::
This section does not discuss the transformation between the MRI voxel
@@ -352,11 +352,11 @@ coordinates (:math:`r_D`) by
where
.. math:: T = \begin{bmatrix}
- e_x & 0 \\
- e_y & 0 \\
- e_z & 0 \\
- r_{0D} & 1
- \end{bmatrix}\ .
+ e_x & 0 \\
+ e_y & 0 \\
+ e_z & 0 \\
+ r_{0D} & 1
+ \end{bmatrix}\ .
Calculation of the magnetic field
---------------------------------
diff --git a/doc/_includes/ssp.rst b/doc/_includes/ssp.rst
index 1bc860d15db..40b25a237db 100644
--- a/doc/_includes/ssp.rst
+++ b/doc/_includes/ssp.rst
@@ -101,12 +101,12 @@ The EEG average reference is the mean signal over all the sensors. It is
typical in EEG analysis to subtract the average reference from all the sensor
signals :math:`b^{1}(t), ..., b^{n}(t)`. That is:
-.. math:: {b}^{j}_{s}(t) = b^{j}(t) - \frac{1}{n}\sum_{k}{b^k(t)}
+.. math:: {b}^{j}_{s}(t) = b^{j}(t) - \frac{1}{n}\sum_{k}{b^k(t)}
:name: eeg_proj
where the noise term :math:`b_{n}^{j}(t)` is given by
-.. math:: b_{n}^{j}(t) = \frac{1}{n}\sum_{k}{b^k(t)}
+.. math:: b_{n}^{j}(t) = \frac{1}{n}\sum_{k}{b^k(t)}
:name: noise_term
Thus, the projector vector :math:`P_{\perp}` will be given by
diff --git a/doc/api/events.rst b/doc/api/events.rst
index f9447741a09..3f7159a22d5 100644
--- a/doc/api/events.rst
+++ b/doc/api/events.rst
@@ -55,4 +55,4 @@ Events
average_movements
combine_event_ids
equalize_epoch_counts
- make_metadata
\ No newline at end of file
+ make_metadata
diff --git a/doc/api/file_io.rst b/doc/api/file_io.rst
index 3b43de6ce64..2da9059deb3 100644
--- a/doc/api/file_io.rst
+++ b/doc/api/file_io.rst
@@ -63,4 +63,4 @@ Base class:
:toctree: ../generated/
:template: autosummary/class_no_members.rst
- BaseEpochs
\ No newline at end of file
+ BaseEpochs
diff --git a/doc/api/preprocessing.rst b/doc/api/preprocessing.rst
index 54d4bfa2999..1e0e9e56079 100644
--- a/doc/api/preprocessing.rst
+++ b/doc/api/preprocessing.rst
@@ -93,6 +93,7 @@ Projections:
cortical_signal_suppression
create_ecg_epochs
create_eog_epochs
+ find_bad_channels_lof
find_bad_channels_maxwell
find_ecg_events
find_eog_events
@@ -162,6 +163,8 @@ Projections:
Calibration
read_eyelink_calibration
set_channel_types_eyetrack
+ convert_units
+ get_screen_visual_angle
interpolate_blinks
EEG referencing:
diff --git a/doc/changes/devel/11234.newfeature.rst b/doc/changes/devel/11234.newfeature.rst
new file mode 100644
index 00000000000..46cc408a3d9
--- /dev/null
+++ b/doc/changes/devel/11234.newfeature.rst
@@ -0,0 +1 @@
+Detecting Bad EEG/MEG channels using the local outlier factor (LOF) algorithm in :func:`mne.preprocessing.find_bad_channels_lof`, by :newcontrib:`Velu Prabhakar Kumaravel`.
\ No newline at end of file
diff --git a/doc/changes/devel/12206.bugfix.rst b/doc/changes/devel/12206.bugfix.rst
new file mode 100644
index 00000000000..6cf72e266b9
--- /dev/null
+++ b/doc/changes/devel/12206.bugfix.rst
@@ -0,0 +1 @@
+Fix bug in :meth:`mne.Epochs.apply_function` where data was handed down incorrectly in parallel processing, by `Dominik Welke`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12206.newfeature.rst b/doc/changes/devel/12206.newfeature.rst
new file mode 100644
index 00000000000..9ef966ed208
--- /dev/null
+++ b/doc/changes/devel/12206.newfeature.rst
@@ -0,0 +1,3 @@
+Custom functions applied via :meth:`mne.io.Raw.apply_function`, :meth:`mne.Epochs.apply_function` or :meth:`mne.Evoked.apply_function` can now use ``ch_idx`` or ``ch_name`` to get access to the currently processed channel during channel wise processing.
+
+:meth:`mne.Evoked.apply_function` can now also work on full data array instead of just channel wise, analogous to :meth:`mne.io.Raw.apply_function` and :meth:`mne.Epochs.apply_function`, by `Dominik Welke`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12207.newfeature.rst b/doc/changes/devel/12207.newfeature.rst
new file mode 100644
index 00000000000..7d741a06bf5
--- /dev/null
+++ b/doc/changes/devel/12207.newfeature.rst
@@ -0,0 +1 @@
+Allow :class:`mne.time_frequency.EpochsTFR` as input to :func:`mne.epochs.equalize_epoch_counts`, by `Carina Forster`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12237.newfeature.rst b/doc/changes/devel/12237.newfeature.rst
new file mode 100644
index 00000000000..e89822f27ed
--- /dev/null
+++ b/doc/changes/devel/12237.newfeature.rst
@@ -0,0 +1,2 @@
+Added a helper function :func:`mne.preprocessing.eyetracking.convert_units` to convert eyegaze data from pixel-on-screen values to radians of visual angle. Also added a helper function :func:`mne.preprocessing.eyetracking.get_screen_visual_angle` to get the visual angle that the participant screen subtends, by `Scott Huberty`_.
+
diff --git a/doc/changes/devel/12323.newfeature.rst b/doc/changes/devel/12323.newfeature.rst
new file mode 100644
index 00000000000..f10fdf5cf23
--- /dev/null
+++ b/doc/changes/devel/12323.newfeature.rst
@@ -0,0 +1 @@
+Add :meth:`~mne.SourceEstimate.savgol_filter`, :meth:`~mne.SourceEstimate.filter`, :meth:`~mne.SourceEstimate.apply_hilbert`, and :meth:`~mne.SourceEstimate.apply_function` methods to :class:`mne.SourceEstimate` and related classes, by `Hamza Abdelhedi`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12324.bugfix.rst b/doc/changes/devel/12324.bugfix.rst
new file mode 100644
index 00000000000..ec7f2c5849d
--- /dev/null
+++ b/doc/changes/devel/12324.bugfix.rst
@@ -0,0 +1 @@
+Add ``tol`` parameter to :meth:`mne.events_from_annotations` so that the user can specify the tolerance to ignore rounding errors of event onsets when using ``chunk_duration`` is not None (default is 1e-8), by `Michiru Kaneda`_
diff --git a/doc/changes/devel/12382.apichange.rst b/doc/changes/devel/12382.apichange.rst
new file mode 100644
index 00000000000..aa38b436cf0
--- /dev/null
+++ b/doc/changes/devel/12382.apichange.rst
@@ -0,0 +1 @@
+Change :func:`mne.stc_near_sensors` ``surface`` default from the ``'pial'`` surface to the surface in ``src`` if ``src`` is not ``None`` in version 1.8, by `Alex Rockhill`_.
diff --git a/doc/changes/devel/12382.bugfix.rst b/doc/changes/devel/12382.bugfix.rst
new file mode 100644
index 00000000000..8409f016206
--- /dev/null
+++ b/doc/changes/devel/12382.bugfix.rst
@@ -0,0 +1 @@
+Fix bad channels not handled properly in :func:`mne.stc_near_sensors` by `Alex Rockhill`_.
diff --git a/doc/changes/devel/12393.bugfix.rst b/doc/changes/devel/12393.bugfix.rst
new file mode 100644
index 00000000000..017f81b398b
--- /dev/null
+++ b/doc/changes/devel/12393.bugfix.rst
@@ -0,0 +1 @@
+Change how samples are read when using ``data_format='auto'`` in :func:`mne.io.read_raw_cnt`, by `Jacob Woessner`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12420.other.rst b/doc/changes/devel/12420.other.rst
new file mode 100644
index 00000000000..8b949d25dc7
--- /dev/null
+++ b/doc/changes/devel/12420.other.rst
@@ -0,0 +1 @@
+Clarify in the :ref:`EEG referencing tutorial ` that an average reference projector ready is required for inverse modeling, by :newcontrib:`Nabil Alibou`
diff --git a/doc/changes/devel/12430.bugfix.rst b/doc/changes/devel/12430.bugfix.rst
new file mode 100644
index 00000000000..688e7066fa8
--- /dev/null
+++ b/doc/changes/devel/12430.bugfix.rst
@@ -0,0 +1 @@
+Reformats channel and detector lookup in :func:`mne.io.read_raw_snirf` from array based to dictionary based. Removes incorrect assertions that every detector and source must have data associated with every registered optode position, by :newcontrib:`Alex Kiefer`.
\ No newline at end of file
diff --git a/doc/changes/devel/12436.bugfix.rst b/doc/changes/devel/12436.bugfix.rst
new file mode 100644
index 00000000000..7ddbd9f5d21
--- /dev/null
+++ b/doc/changes/devel/12436.bugfix.rst
@@ -0,0 +1 @@
+Fix :ref:`tut-working-with-seeg` use of :func:`mne.stc_near_sensors` to use the :class:`mne.VolSourceEstimate` positions and not the pial surface, by `Alex Rockhill`_
diff --git a/doc/changes/devel/12443.newfeature.rst b/doc/changes/devel/12443.newfeature.rst
new file mode 100644
index 00000000000..f704e45b4a5
--- /dev/null
+++ b/doc/changes/devel/12443.newfeature.rst
@@ -0,0 +1 @@
+Add option to pass ``image_kwargs`` to :class:`mne.Report.add_epochs` to allow adjusting e.g. ``vmin`` and ``vmax`` of the epochs image in the report, by `Sophie Herbst`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12444.bugfix.rst b/doc/changes/devel/12444.bugfix.rst
new file mode 100644
index 00000000000..c27fb5e8425
--- /dev/null
+++ b/doc/changes/devel/12444.bugfix.rst
@@ -0,0 +1 @@
+Fix validation of ``ch_type`` in :func:`mne.preprocessing.annotate_muscle_zscore`, by `Mathieu Scheltienne`_.
diff --git a/doc/changes/devel/12445.newfeature.rst b/doc/changes/devel/12445.newfeature.rst
new file mode 100644
index 00000000000..ccaef2c2c07
--- /dev/null
+++ b/doc/changes/devel/12445.newfeature.rst
@@ -0,0 +1 @@
+Add support for multiple raw instances in :func:`mne.preprocessing.compute_average_dev_head_t` by `Eric Larson`_.
diff --git a/doc/changes/devel/12446.newfeature.rst b/doc/changes/devel/12446.newfeature.rst
new file mode 100644
index 00000000000..734721ce628
--- /dev/null
+++ b/doc/changes/devel/12446.newfeature.rst
@@ -0,0 +1 @@
+Support partial pathlength factors for each wavelength in :func:`mne.preprocessing.nirs.beer_lambert_law`, by :newcontrib:`Richard Scholz`.
diff --git a/doc/changes/devel/12450.other.rst b/doc/changes/devel/12450.other.rst
new file mode 100644
index 00000000000..48265f87416
--- /dev/null
+++ b/doc/changes/devel/12450.other.rst
@@ -0,0 +1 @@
+Move private data preparation functions for BrainVision export from ``pybv`` to ``mne``, by `Clemens Brunner`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12451.bugfix.rst b/doc/changes/devel/12451.bugfix.rst
new file mode 100644
index 00000000000..2aca44529f1
--- /dev/null
+++ b/doc/changes/devel/12451.bugfix.rst
@@ -0,0 +1 @@
+Fix errant redundant use of ``BIDSPath.split`` when writing split raw and epochs data, by `Eric Larson`_.
diff --git a/doc/changes/devel/12451.dependency.rst b/doc/changes/devel/12451.dependency.rst
new file mode 100644
index 00000000000..8227dd779ad
--- /dev/null
+++ b/doc/changes/devel/12451.dependency.rst
@@ -0,0 +1 @@
+``pytest-harvest`` is no longer used as a test dependency, by `Eric Larson`_.
diff --git a/doc/changes/devel/12454.newfeature.rst b/doc/changes/devel/12454.newfeature.rst
new file mode 100644
index 00000000000..5a4a9cc9cdb
--- /dev/null
+++ b/doc/changes/devel/12454.newfeature.rst
@@ -0,0 +1 @@
+Completing PR 12453. Add option to pass ``image_kwargs`` per channel type to :class:`mne.Report.add_epochs`.
\ No newline at end of file
diff --git a/doc/changes/devel/12456.bugfix.rst b/doc/changes/devel/12456.bugfix.rst
new file mode 100644
index 00000000000..01e15b3c22e
--- /dev/null
+++ b/doc/changes/devel/12456.bugfix.rst
@@ -0,0 +1 @@
+Disable config parser interpolation when reading BrainVision files, which allows using the percent sign as a regular character in channel units, by `Clemens Brunner`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12461.other.rst b/doc/changes/devel/12461.other.rst
new file mode 100644
index 00000000000..b6fcea48fc7
--- /dev/null
+++ b/doc/changes/devel/12461.other.rst
@@ -0,0 +1 @@
+Fix dead links in ``README.rst`` documentation by :newcontrib:`Will Turner`.
\ No newline at end of file
diff --git a/doc/changes/devel/12462.newfeature.rst b/doc/changes/devel/12462.newfeature.rst
new file mode 100644
index 00000000000..4624579ba26
--- /dev/null
+++ b/doc/changes/devel/12462.newfeature.rst
@@ -0,0 +1 @@
+:func:`mne.epochs.make_metadata` now accepts strings as ``tmin`` and ``tmax`` parameter values, simplifying metadata creation based on time-varying events such as responses to a stimulus, by `Richard Höchenberger`_.
diff --git a/doc/changes/devel/12463.newfeature.rst b/doc/changes/devel/12463.newfeature.rst
new file mode 100644
index 00000000000..d041b0c912f
--- /dev/null
+++ b/doc/changes/devel/12463.newfeature.rst
@@ -0,0 +1 @@
+Include date of acquisition and filter parameters in ``raw.info`` for :func:`mne.io.read_raw_neuralynx` by `Kristijan Armeni`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12470.bugfix.rst b/doc/changes/devel/12470.bugfix.rst
new file mode 100644
index 00000000000..d8d72843304
--- /dev/null
+++ b/doc/changes/devel/12470.bugfix.rst
@@ -0,0 +1 @@
+- Fix the default color of :meth:`mne.viz.Brain.add_text` to properly contrast with the figure background color, by `Marijn van Vliet`_.
diff --git a/doc/changes/devel/12474.bugfix.rst b/doc/changes/devel/12474.bugfix.rst
new file mode 100644
index 00000000000..875d7574f7b
--- /dev/null
+++ b/doc/changes/devel/12474.bugfix.rst
@@ -0,0 +1 @@
+- Changed default ECoG and sEEG electrode sizes in brain plots to better reflect real world sizes, by `Liberty Hamilton`_
diff --git a/doc/changes/devel/12481.bugfix.rst b/doc/changes/devel/12481.bugfix.rst
new file mode 100644
index 00000000000..a9108fe4040
--- /dev/null
+++ b/doc/changes/devel/12481.bugfix.rst
@@ -0,0 +1 @@
+- Fix reading segmented recordings with :func:`mne.io.read_raw_eyelink` by `Dominik Welke`_.
\ No newline at end of file
diff --git a/doc/changes/devel/12483.bugfix.rst b/doc/changes/devel/12483.bugfix.rst
new file mode 100644
index 00000000000..601bf94838c
--- /dev/null
+++ b/doc/changes/devel/12483.bugfix.rst
@@ -0,0 +1 @@
+Improve compatibility with other Qt-based GUIs by handling theme icons better, by `Eric Larson`_.
diff --git a/doc/changes/devel/12489.bugfix.rst b/doc/changes/devel/12489.bugfix.rst
new file mode 100644
index 00000000000..9172ec64f7e
--- /dev/null
+++ b/doc/changes/devel/12489.bugfix.rst
@@ -0,0 +1 @@
+Fix cleaning of channel names for non vectorview or CTF dataset including whitespaces or dash in their channel names, by `Mathieu Scheltienne`_.
diff --git a/doc/changes/devel/12491.dependency.rst b/doc/changes/devel/12491.dependency.rst
new file mode 100644
index 00000000000..423082320ca
--- /dev/null
+++ b/doc/changes/devel/12491.dependency.rst
@@ -0,0 +1 @@
+The minimum supported version of Qt bindings is 5.15, by `Eric Larson`_.
diff --git a/doc/changes/names.inc b/doc/changes/names.inc
index 0389f75e83e..d3dfd61b916 100644
--- a/doc/changes/names.inc
+++ b/doc/changes/names.inc
@@ -20,6 +20,8 @@
.. _Alex Gramfort: https://alexandre.gramfort.net
+.. _Alex Kiefer: https://home.alexk101.dev
+
.. _Alex Rockhill: https://github.com/alexrockhill/
.. _Alexander Rudiuk: https://github.com/ARudiuk
@@ -74,7 +76,7 @@
.. _Carlos de la Torre-Ortiz: https://ctorre.me
-.. _Carina Forster: https://github.com/carinafo
+.. _Carina Forster: https://github.com/CarinaFo
.. _Cathy Nangini: https://github.com/KatiRG
@@ -206,7 +208,7 @@
.. _Henrich Kolkhorst: https://github.com/hekolk
-.. _Hongjiang Ye: https://github.com/rubyyhj
+.. _Hongjiang Ye: https://github.com/hongjiang-ye
.. _Hubert Banville: https://github.com/hubertjb
@@ -390,6 +392,8 @@
.. _Motofumi Fushimi: https://github.com/motofumi-fushimi/motofumi-fushimi.github.io
+.. _Nabil Alibou: https://github.com/nabilalibou
+
.. _Natalie Klein: https://github.com/natalieklein
.. _Nathalie Gayraud: https://github.com/ngayraud
@@ -414,7 +418,7 @@
.. _Okba Bekhelifi: https://github.com/okbalefthanded
-.. _Olaf Hauk: https://www.neuroscience.cam.ac.uk/directory/profile.php?olafhauk
+.. _Olaf Hauk: https://neuroscience.cam.ac.uk/member/olafhauk
.. _Oleh Kozynets: https://github.com/OlehKSS
@@ -470,6 +474,8 @@
.. _Richard Koehler: https://github.com/richardkoehler
+.. _Richard Scholz: https://github.com/scholzri
+
.. _Riessarius Stargardsky: https://github.com/Riessarius
.. _Roan LaPlante: https://github.com/aestrivex
@@ -582,12 +588,16 @@
.. _Valerii Chirkov: https://github.com/vagechirkov
+.. _Velu Prabhakar Kumaravel: https://github.com/vpKumaravel
+
.. _Victor Ferat: https://github.com/vferat
.. _Victoria Peterson: https://github.com/vpeterson
.. _Xiaokai Xia: https://github.com/dddd1007
+.. _Will Turner: https://bootstrapbill.github.io
+
.. _Yaroslav Halchenko: http://haxbylab.dartmouth.edu/ppl/yarik.html
.. _Yiping Zuo: https://github.com/frostime
diff --git a/doc/changes/v0.10.rst b/doc/changes/v0.10.rst
index 6a0c3322e88..ac4f2e42857 100644
--- a/doc/changes/v0.10.rst
+++ b/doc/changes/v0.10.rst
@@ -91,7 +91,7 @@ BUG
- Fix dropping of events after downsampling stim channels by `Marijn van Vliet`_
-- Fix scaling in :func:``mne.viz.utils._setup_vmin_vmax`` by `Jaakko Leppakangas`_
+- Fix scaling in ``mne.viz.utils._setup_vmin_vmax`` by `Jaakko Leppakangas`_
- Fix order of component selection in :class:`mne.decoding.CSP` by `Clemens Brunner`_
diff --git a/doc/changes/v0.12.rst b/doc/changes/v0.12.rst
index cf01f8ff62c..b3b7aba1a39 100644
--- a/doc/changes/v0.12.rst
+++ b/doc/changes/v0.12.rst
@@ -129,7 +129,7 @@ BUG
- Fix bug in :func:`mne.io.Raw.save` where, in rare cases, automatically split files could end up writing an extra empty file that wouldn't be read properly by `Eric Larson`_
-- Fix :class:``mne.realtime.StimServer`` by removing superfluous argument ``ip`` used while initializing the object by `Mainak Jas`_.
+- Fix ``mne.realtime.StimServer`` by removing superfluous argument ``ip`` used while initializing the object by `Mainak Jas`_.
- Fix removal of projectors in :func:`mne.preprocessing.maxwell_filter` in ``st_only=True`` mode by `Eric Larson`_
@@ -175,37 +175,37 @@ Authors
The committer list for this release is the following (preceded by number of commits):
-* 348 Eric Larson
-* 347 Jaakko Leppakangas
-* 157 Alexandre Gramfort
-* 139 Jona Sassenhagen
-* 67 Jean-Remi King
-* 32 Chris Holdgraf
-* 31 Denis A. Engemann
-* 30 Mainak Jas
-* 16 Christopher J. Bailey
-* 13 Marijn van Vliet
-* 10 Mark Wronkiewicz
-* 9 Teon Brooks
-* 9 kaichogami
-* 8 Clément Moutard
-* 5 Camilo Lamus
-* 5 mmagnuski
-* 4 Christian Brodbeck
-* 4 Daniel McCloy
-* 4 Yousra Bekhti
-* 3 Fede Raimondo
-* 1 Jussi Nurminen
-* 1 MartinBaBer
-* 1 Mikolaj Magnuski
-* 1 Natalie Klein
-* 1 Niklas Wilming
-* 1 Richard Höchenberger
-* 1 Sagun Pai
-* 1 Sourav Singh
-* 1 Tom Dupré la Tour
-* 1 jona-sassenhagen@
-* 1 kambysese
-* 1 pbnsilva
-* 1 sviter
-* 1 zuxfoucault
+* 348 Eric Larson
+* 347 Jaakko Leppakangas
+* 157 Alexandre Gramfort
+* 139 Jona Sassenhagen
+* 67 Jean-Remi King
+* 32 Chris Holdgraf
+* 31 Denis A. Engemann
+* 30 Mainak Jas
+* 16 Christopher J. Bailey
+* 13 Marijn van Vliet
+* 10 Mark Wronkiewicz
+* 9 Teon Brooks
+* 9 kaichogami
+* 8 Clément Moutard
+* 5 Camilo Lamus
+* 5 mmagnuski
+* 4 Christian Brodbeck
+* 4 Daniel McCloy
+* 4 Yousra Bekhti
+* 3 Fede Raimondo
+* 1 Jussi Nurminen
+* 1 MartinBaBer
+* 1 Mikolaj Magnuski
+* 1 Natalie Klein
+* 1 Niklas Wilming
+* 1 Richard Höchenberger
+* 1 Sagun Pai
+* 1 Sourav Singh
+* 1 Tom Dupré la Tour
+* 1 jona-sassenhagen@
+* 1 kambysese
+* 1 pbnsilva
+* 1 sviter
+* 1 zuxfoucault
diff --git a/doc/changes/v0.13.rst b/doc/changes/v0.13.rst
index 425ba4c76a1..aee297d9d2d 100644
--- a/doc/changes/v0.13.rst
+++ b/doc/changes/v0.13.rst
@@ -198,7 +198,7 @@ API
- Deprecated ``mne.time_frequency.cwt_morlet`` and ``mne.time_frequency.single_trial_power`` in favour of :func:`mne.time_frequency.tfr_morlet` with parameter average=False, by `Jean-Remi King`_ and `Alex Gramfort`_
-- Add argument ``mask_type`` to func:`mne.read_events` and func:`mne.find_events` to support MNE-C style of trigger masking by `Teon Brooks`_ and `Eric Larson`_
+- Add argument ``mask_type`` to :func:`mne.read_events` and :func:`mne.find_events` to support MNE-C style of trigger masking by `Teon Brooks`_ and `Eric Larson`_
- Extended Infomax is now the new default in :func:`mne.preprocessing.infomax` (``extended=True``), by `Clemens Brunner`_
diff --git a/doc/changes/v0.15.rst b/doc/changes/v0.15.rst
index ada8180d4ac..e2de7301973 100644
--- a/doc/changes/v0.15.rst
+++ b/doc/changes/v0.15.rst
@@ -226,7 +226,7 @@ API
- ``mne.viz.decoding.plot_gat_times``, ``mne.viz.decoding.plot_gat_matrix`` are now deprecated. Use matplotlib instead as shown in the examples, by `Jean-Remi King`_ and `Alex Gramfort`_
-- Add ``norm_trace`` parameter to control single-epoch covariance normalization in :class:mne.decoding.CSP, by `Jean-Remi King`_
+- Add ``norm_trace`` parameter to control single-epoch covariance normalization in :class:`mne.decoding.CSP`, by `Jean-Remi King`_
- Allow passing a list of channel names as ``show_names`` in function :func:`mne.viz.plot_sensors` and methods :meth:`mne.Evoked.plot_sensors`, :meth:`mne.Epochs.plot_sensors` and :meth:`mne.io.Raw.plot_sensors` to show only a subset of channel names by `Jaakko Leppakangas`_
diff --git a/doc/changes/v0.17.rst b/doc/changes/v0.17.rst
index 40896b6f383..49e722c584d 100644
--- a/doc/changes/v0.17.rst
+++ b/doc/changes/v0.17.rst
@@ -234,7 +234,7 @@ API
In 0.19
The ``stim_channel`` keyword arguments will be removed from ``read_raw_...`` functions.
-- Calling :meth:``mne.io.pick.pick_info`` removing channels that are needed by compensation matrices (``info['comps']``) no longer raises ``RuntimeException`` but instead logs an info level message. By `Luke Bloy`_
+- Calling ``mne.io.pick.pick_info`` removing channels that are needed by compensation matrices (``info['comps']``) no longer raises ``RuntimeException`` but instead logs an info level message. By `Luke Bloy`_
- :meth:`mne.Epochs.save` now has the parameter ``fmt`` to specify the desired format (precision) saving epoched data, by `Stefan Repplinger`_, `Eric Larson`_ and `Alex Gramfort`_
@@ -274,44 +274,44 @@ Authors
People who contributed to this release (in alphabetical order):
-* Alexandre Gramfort
-* Antoine Gauthier
-* Britta Westner
-* Christian Brodbeck
-* Clemens Brunner
-* Daniel McCloy
-* David Sabbagh
-* Denis A. Engemann
-* Eric Larson
-* Ezequiel Mikulan
-* Henrich Kolkhorst
-* Hubert Banville
-* Jasper J.F. van den Bosch
-* Jen Evans
-* Joan Massich
-* Johan van der Meer
-* Jona Sassenhagen
-* Kambiz Tavabi
-* Lorenz Esch
-* Luke Bloy
-* Mainak Jas
-* Manu Sutela
-* Marcin Koculak
-* Marijn van Vliet
-* Mikolaj Magnuski
-* Peter J. Molfese
-* Sam Perry
-* Sara Sommariva
-* Sergey Antopolskiy
-* Sheraz Khan
-* Stefan Appelhoff
-* Stefan Repplinger
-* Steven Bethard
-* Teekuningas
-* Teon Brooks
-* Thomas Hartmann
-* Thomas Jochmann
-* Tom Dupré la Tour
-* Tristan Stenner
-* buildqa
-* jeythekey
+* Alexandre Gramfort
+* Antoine Gauthier
+* Britta Westner
+* Christian Brodbeck
+* Clemens Brunner
+* Daniel McCloy
+* David Sabbagh
+* Denis A. Engemann
+* Eric Larson
+* Ezequiel Mikulan
+* Henrich Kolkhorst
+* Hubert Banville
+* Jasper J.F. van den Bosch
+* Jen Evans
+* Joan Massich
+* Johan van der Meer
+* Jona Sassenhagen
+* Kambiz Tavabi
+* Lorenz Esch
+* Luke Bloy
+* Mainak Jas
+* Manu Sutela
+* Marcin Koculak
+* Marijn van Vliet
+* Mikolaj Magnuski
+* Peter J. Molfese
+* Sam Perry
+* Sara Sommariva
+* Sergey Antopolskiy
+* Sheraz Khan
+* Stefan Appelhoff
+* Stefan Repplinger
+* Steven Bethard
+* Teekuningas
+* Teon Brooks
+* Thomas Hartmann
+* Thomas Jochmann
+* Tom Dupré la Tour
+* Tristan Stenner
+* buildqa
+* jeythekey
diff --git a/doc/changes/v1.2.rst b/doc/changes/v1.2.rst
index b6a8b5a8edf..e292b472b03 100644
--- a/doc/changes/v1.2.rst
+++ b/doc/changes/v1.2.rst
@@ -63,7 +63,7 @@ Bugs
API changes
~~~~~~~~~~~
-- In meth:`mne.Evoked.plot`, the default value of the ``spatial_colors`` parameter has been changed to ``'auto'``, which will use spatial colors if channel locations are available (:gh:`11201` by :newcontrib:`Hüseyin Orkun Elmas` and `Daniel McCloy`_)
+- In :meth:`mne.Evoked.plot`, the default value of the ``spatial_colors`` parameter has been changed to ``'auto'``, which will use spatial colors if channel locations are available (:gh:`11201` by :newcontrib:`Hüseyin Orkun Elmas` and `Daniel McCloy`_)
- Starting with this release we now follow the Python convention of using ``FutureWarning`` instead of ``DeprecationWarning`` to signal user-facing changes to our API (:gh:`11120` by `Daniel McCloy`_)
- The ``names`` parameter of :func:`mne.viz.plot_arrowmap` and :func:`mne.viz.plot_regression_weights` has been deprecated; sensor names will be automatically drawn from the ``info_from`` or ``model`` parameter (respectively), and can be hidden, shown, or altered via the ``show_names`` parameter (:gh:`11123` by `Daniel McCloy`_)
- The ``bands`` parameter of :meth:`mne.Epochs.plot_psd_topomap` now accepts :class:`dict` input; legacy :class:`tuple` input is supported, but discouraged for new code (:gh:`11050` by `Daniel McCloy`_)
diff --git a/doc/conf.py b/doc/conf.py
index 7773be834fd..a00a34debc3 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -29,12 +29,13 @@
from mne.tests.test_docstring_parameters import error_ignores
from mne.utils import (
_assert_no_instances,
- linkcode_resolve, # noqa, analysis:ignore
+ linkcode_resolve,
run_subprocess,
sizeof_fmt,
)
from mne.viz import Brain # noqa
+assert linkcode_resolve is not None # avoid flake warnings, used by numpydoc
matplotlib.use("agg")
faulthandler.enable()
os.environ["_MNE_BROWSER_NO_BLOCK"] = "true"
@@ -62,12 +63,12 @@
# We need to triage which date type we use so that incremental builds work
# (Sphinx looks at variable changes and rewrites all files if some change)
-copyright = (
+copyright = ( # noqa: A001
f'2012–{td.year}, MNE Developers. Last updated \n' # noqa: E501
'' # noqa: E501
)
if os.getenv("MNE_FULL_DATE", "false").lower() != "true":
- copyright = f"2012–{td.year}, MNE Developers. Last updated locally."
+ copyright = f"2012–{td.year}, MNE Developers. Last updated locally." # noqa: A001
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -293,6 +294,7 @@
"RawNIRX": "mne.io.Raw",
"RawPersyst": "mne.io.Raw",
"RawSNIRF": "mne.io.Raw",
+ "Calibration": "mne.preprocessing.eyetracking.Calibration",
# dipy
"dipy.align.AffineMap": "dipy.align.imaffine.AffineMap",
"dipy.align.DiffeomorphicMap": "dipy.align.imwarp.DiffeomorphicMap",
@@ -445,16 +447,18 @@
# -- Sphinx-gallery configuration --------------------------------------------
-class Resetter(object):
+class Resetter:
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __init__(self):
self.t0 = time.time()
def __repr__(self):
+ """Make a stable repr."""
return f"<{self.__class__.__name__}>"
def __call__(self, gallery_conf, fname, when):
+ """Do the reset."""
import matplotlib.pyplot as plt
try:
@@ -744,6 +748,8 @@ def append_attr_meth_examples(app, what, name, obj, options, lines):
# Too slow
"https://speakerdeck.com/dengemann/",
"https://www.dtu.dk/english/service/phonebook/person",
+ # SSL problems sometimes
+ "http://ilabs.washington.edu",
]
linkcheck_anchors = False # saves a bit of time
linkcheck_timeout = 15 # some can be quite slow
@@ -1188,21 +1194,21 @@ def append_attr_meth_examples(app, what, name, obj, options, lines):
"carousel": [
dict(
title="Source Estimation",
- text="Distributed, sparse, mixed-norm, beam\u00ADformers, dipole fitting, and more.", # noqa E501
+ text="Distributed, sparse, mixed-norm, beam\u00adformers, dipole fitting, and more.", # noqa E501
url="auto_tutorials/inverse/index.html",
img="sphx_glr_30_mne_dspm_loreta_008.gif",
alt="dSPM",
),
dict(
title="Machine Learning",
- text="Advanced decoding models including time general\u00ADiza\u00ADtion.", # noqa E501
+ text="Advanced decoding models including time general\u00adiza\u00adtion.", # noqa E501
url="auto_tutorials/machine-learning/50_decoding.html",
img="sphx_glr_50_decoding_006.png",
alt="Decoding",
),
dict(
title="Encoding Models",
- text="Receptive field estima\u00ADtion with optional smooth\u00ADness priors.", # noqa E501
+ text="Receptive field estima\u00adtion with optional smooth\u00adness priors.", # noqa E501
url="auto_tutorials/machine-learning/30_strf.html",
img="sphx_glr_30_strf_001.png",
alt="STRF",
@@ -1216,7 +1222,7 @@ def append_attr_meth_examples(app, what, name, obj, options, lines):
),
dict(
title="Connectivity",
- text="All-to-all spectral and effective connec\u00ADtivity measures.", # noqa E501
+ text="All-to-all spectral and effective connec\u00adtivity measures.", # noqa E501
url="https://mne.tools/mne-connectivity/stable/auto_examples/mne_inverse_label_connectivity.html", # noqa E501
img="https://mne.tools/mne-connectivity/stable/_images/sphx_glr_mne_inverse_label_connectivity_001.png", # noqa E501
alt="Connectivity",
@@ -1753,7 +1759,7 @@ def reset_warnings(gallery_conf, fname):
def check_existing_redirect(path):
"""Make sure existing HTML files are redirects, before overwriting."""
if path.is_file():
- with open(path, "r") as fid:
+ with open(path) as fid:
for _ in range(8):
next(fid)
line = fid.readline()
diff --git a/doc/documentation/datasets.rst b/doc/documentation/datasets.rst
index 063d06da363..70da39cccd8 100644
--- a/doc/documentation/datasets.rst
+++ b/doc/documentation/datasets.rst
@@ -516,7 +516,7 @@ Contains both EEG (EGI) and eye-tracking (ASCII format) data recorded from a
pupillary light reflex experiment, stored in separate files. 1 participant fixated
on the screen while short light flashes appeared. Event onsets were recorded by a
photodiode attached to the screen and were sent to both the EEG and eye-tracking
-systems.
+systems.
.. topic:: Examples
diff --git a/doc/references.bib b/doc/references.bib
index 9263379209a..7a992b2c1fa 100644
--- a/doc/references.bib
+++ b/doc/references.bib
@@ -2450,6 +2450,37 @@ @article{TierneyEtAl2022
author = {Tierney, Tim M. and Mellor, Stephanie nd O'Neill, George C. and Timms, Ryan C. and Barnes, Gareth R.},
}
+@article{KumaravelEtAl2022,
+ doi = {10.3390/s22197314},
+ url = {https://doi.org/10.3390/s22197314},
+ year = {2022},
+ month = sep,
+ publisher = {{MDPI} {AG}},
+ volume = {22},
+ number = {19},
+ pages = {7314},
+ author = {Velu Prabhakar Kumaravel and Marco Buiatti and Eugenio Parise and Elisabetta Farella},
+ title = {Adaptable and Robust {EEG} Bad Channel Detection Using Local Outlier Factor ({LOF})},
+ journal = {Sensors}
+}
+
+@article{BreunigEtAl2000,
+ author = {Breunig, Markus M. and Kriegel, Hans-Peter and Ng, Raymond T. and Sander, J\"{o}rg},
+ title = {LOF: Identifying Density-Based Local Outliers},
+ year = {2000},
+ issue_date = {June 2000},
+ publisher = {Association for Computing Machinery},
+ address = {New York, NY, USA},
+ volume = {29},
+ number = {2},
+ url = {https://doi.org/10.1145/335191.335388},
+ doi = {10.1145/335191.335388},
+ journal = {SIGMOD Rec.},
+ month = {may},
+ pages = {93–104},
+ numpages = {12},
+ keywords = {outlier detection, database mining}
+}
@article{OyamaEtAl2015,
title = {Dry phantom for magnetoencephalography —{Configuration}, calibration, and contribution},
diff --git a/doc/sphinxext/contrib_avatars.py b/doc/sphinxext/contrib_avatars.py
index 5082618a9be..04583ac4c77 100644
--- a/doc/sphinxext/contrib_avatars.py
+++ b/doc/sphinxext/contrib_avatars.py
@@ -15,9 +15,9 @@ def generate_contrib_avatars(app, config):
MNE_ADD_CONTRIBUTOR_IMAGE=true in your environment to generate it.
"""
else:
from selenium import webdriver
+ from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
- from selenium.common.exceptions import WebDriverException
try:
options = webdriver.ChromeOptions()
diff --git a/doc/sphinxext/gen_commands.py b/doc/sphinxext/gen_commands.py
index 5fa9cd7418a..e50e243eb48 100644
--- a/doc/sphinxext/gen_commands.py
+++ b/doc/sphinxext/gen_commands.py
@@ -2,10 +2,9 @@
# Copyright the MNE-Python contributors.
import glob
from importlib import import_module
-import os
from pathlib import Path
-from mne.utils import _replace_md5, ArgvSetter
+from mne.utils import ArgvSetter, _replace_md5
def setup(app):
diff --git a/doc/sphinxext/gen_names.py b/doc/sphinxext/gen_names.py
index 1871ae0068c..fd667ec0951 100644
--- a/doc/sphinxext/gen_names.py
+++ b/doc/sphinxext/gen_names.py
@@ -25,7 +25,7 @@ def generate_name_links_rst(app=None):
)
with open(out_fname, "w", encoding="utf8") as fout:
fout.write(":orphan:\n\n")
- with open(names_path, "r") as fin:
+ with open(names_path) as fin:
for line in fin:
if line.startswith(".. _"):
fout.write(f"- {line[4:]}")
diff --git a/doc/sphinxext/gh_substitutions.py b/doc/sphinxext/gh_substitutions.py
index bccc16d13d0..890a71f1c47 100644
--- a/doc/sphinxext/gh_substitutions.py
+++ b/doc/sphinxext/gh_substitutions.py
@@ -4,7 +4,7 @@
from docutils.parsers.rst.roles import set_classes
-def gh_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
+def gh_role(name, rawtext, text, lineno, inliner, options={}, content=[]): # noqa: B006
"""Link to a GitHub issue.
adapted from
diff --git a/doc/sphinxext/mne_substitutions.py b/doc/sphinxext/mne_substitutions.py
index 6a5cdbb6797..bd415fc67f9 100644
--- a/doc/sphinxext/mne_substitutions.py
+++ b/doc/sphinxext/mne_substitutions.py
@@ -4,12 +4,12 @@
from docutils.parsers.rst import Directive
from docutils.statemachine import StringList
-from mne.defaults import DEFAULTS
from mne._fiff.pick import (
- _PICK_TYPES_DATA_DICT,
- _DATA_CH_TYPES_SPLIT,
_DATA_CH_TYPES_ORDER_DEFAULT,
+ _DATA_CH_TYPES_SPLIT,
+ _PICK_TYPES_DATA_DICT,
)
+from mne.defaults import DEFAULTS
class MNESubstitution(Directive): # noqa: D101
diff --git a/doc/sphinxext/newcontrib_substitutions.py b/doc/sphinxext/newcontrib_substitutions.py
index 41cf348c7c4..c38aeb86219 100644
--- a/doc/sphinxext/newcontrib_substitutions.py
+++ b/doc/sphinxext/newcontrib_substitutions.py
@@ -3,7 +3,7 @@
from docutils.nodes import reference, strong, target
-def newcontrib_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
+def newcontrib_role(name, rawtext, text, lineno, inliner, options={}, content=[]): # noqa: B006
"""Create a role to highlight new contributors in changelog entries."""
newcontrib = f"new contributor {text}"
alias_text = f" <{text}_>"
diff --git a/doc/sphinxext/unit_role.py b/doc/sphinxext/unit_role.py
index b882aedc6b1..4d9c9d94252 100644
--- a/doc/sphinxext/unit_role.py
+++ b/doc/sphinxext/unit_role.py
@@ -3,7 +3,7 @@
from docutils import nodes
-def unit_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
+def unit_role(name, rawtext, text, lineno, inliner, options={}, content=[]): # noqa: B006
parts = text.split()
def pass_error_to_sphinx(rawtext, text, lineno, inliner):
@@ -24,7 +24,7 @@ def pass_error_to_sphinx(rawtext, text, lineno, inliner):
except ValueError:
return pass_error_to_sphinx(rawtext, text, lineno, inliner)
# input is well-formatted: proceed
- node = nodes.Text("\u202F".join(parts))
+ node = nodes.Text("\u202f".join(parts))
return [node], []
diff --git a/examples/datasets/hf_sef_data.py b/examples/datasets/hf_sef_data.py
index ec6ef61bcb2..44aa6e8f9a4 100644
--- a/examples/datasets/hf_sef_data.py
+++ b/examples/datasets/hf_sef_data.py
@@ -14,7 +14,6 @@
# %%
-
import os
import mne
diff --git a/examples/decoding/decoding_csp_eeg.py b/examples/decoding/decoding_csp_eeg.py
index 893e7969c7a..6120bd5e5dd 100644
--- a/examples/decoding/decoding_csp_eeg.py
+++ b/examples/decoding/decoding_csp_eeg.py
@@ -20,7 +20,6 @@
# %%
-
import matplotlib.pyplot as plt
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
diff --git a/examples/decoding/decoding_csp_timefreq.py b/examples/decoding/decoding_csp_timefreq.py
index 2f36064b615..6f13175846e 100644
--- a/examples/decoding/decoding_csp_timefreq.py
+++ b/examples/decoding/decoding_csp_timefreq.py
@@ -21,7 +21,6 @@
# %%
-
import matplotlib.pyplot as plt
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
diff --git a/examples/decoding/ssd_spatial_filters.py b/examples/decoding/ssd_spatial_filters.py
index 5f4ea3fbcf7..b7c8c4f2c94 100644
--- a/examples/decoding/ssd_spatial_filters.py
+++ b/examples/decoding/ssd_spatial_filters.py
@@ -20,7 +20,6 @@
# %%
-
import matplotlib.pyplot as plt
import mne
diff --git a/examples/io/elekta_epochs.py b/examples/io/elekta_epochs.py
index 5619a0e5174..4afa0ad888d 100644
--- a/examples/io/elekta_epochs.py
+++ b/examples/io/elekta_epochs.py
@@ -15,7 +15,6 @@
# %%
-
import os
import mne
diff --git a/examples/preprocessing/css.py b/examples/preprocessing/css.py
index 9095094d93c..ba4e2385d0c 100644
--- a/examples/preprocessing/css.py
+++ b/examples/preprocessing/css.py
@@ -75,9 +75,9 @@ def subcortical_waveform(times):
labels=[postcenlab, hiplab],
data_fun=cortical_waveform,
)
-stc.data[
- np.where(np.isin(stc.vertices[0], hiplab.vertices))[0], :
-] = subcortical_waveform(times)
+stc.data[np.where(np.isin(stc.vertices[0], hiplab.vertices))[0], :] = (
+ subcortical_waveform(times)
+)
evoked = simulate_evoked(fwd, stc, raw.info, cov, nave=15)
###############################################################################
diff --git a/examples/preprocessing/eog_artifact_histogram.py b/examples/preprocessing/eog_artifact_histogram.py
index d883fa427f8..8a89f9d8a44 100644
--- a/examples/preprocessing/eog_artifact_histogram.py
+++ b/examples/preprocessing/eog_artifact_histogram.py
@@ -15,7 +15,6 @@
# %%
-
import matplotlib.pyplot as plt
import numpy as np
diff --git a/examples/preprocessing/epochs_metadata.py b/examples/preprocessing/epochs_metadata.py
new file mode 100644
index 00000000000..d1ea9a85996
--- /dev/null
+++ b/examples/preprocessing/epochs_metadata.py
@@ -0,0 +1,171 @@
+"""
+.. _epochs-metadata:
+
+===============================================================
+Automated epochs metadata generation with variable time windows
+===============================================================
+
+When working with :class:`~mne.Epochs`, :ref:`metadata ` can be
+invaluable. There is an extensive tutorial on
+:ref:`how it can be generated automatically `.
+In the brief examples below, we will demonstrate different ways to bound the time
+windows used to generate the metadata.
+
+"""
+# Authors: Richard Höchenberger
+#
+# License: BSD-3-Clause
+# Copyright the MNE-Python contributors.
+
+# %%
+# We will use data from an EEG recording during an Eriksen flanker task. For the
+# purpose of demonstration, we'll only load the first 60 seconds of data.
+
+import mne
+
+data_dir = mne.datasets.erp_core.data_path()
+infile = data_dir / "ERP-CORE_Subject-001_Task-Flankers_eeg.fif"
+
+raw = mne.io.read_raw(infile, preload=True)
+raw.crop(tmax=60).filter(l_freq=0.1, h_freq=40)
+
+# %%
+# Visualizing the events
+# ^^^^^^^^^^^^^^^^^^^^^^
+#
+# All experimental events are stored in the :class:`~mne.io.Raw` instance as
+# :class:`~mne.Annotations`. We first need to convert these to events and the
+# corresponding mapping from event codes to event names (``event_id``). We then
+# visualize the events.
+all_events, all_event_id = mne.events_from_annotations(raw)
+mne.viz.plot_events(events=all_events, event_id=all_event_id, sfreq=raw.info["sfreq"])
+
+
+# %%
+# As you can see, there are four types of ``stimulus`` and two types of ``response``
+# events.
+#
+# Declaring "row events"
+# ^^^^^^^^^^^^^^^^^^^^^^
+#
+# For the sake of this example, we will assume that during analysis our epochs will be
+# time-locked to the stimulus onset events. Hence, we would like to create metadata with
+# one row per ``stimulus``. We can achieve this by specifying all stimulus event names
+# as ``row_events``.
+
+row_events = [
+ "stimulus/compatible/target_left",
+ "stimulus/compatible/target_right",
+ "stimulus/incompatible/target_left",
+ "stimulus/incompatible/target_right",
+]
+
+# %%
+# Specifying metadata time windows
+# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+#
+# Now, we will explore different ways of specifying the time windows around the
+# ``row_events`` when generating metadata. Any events falling within the same time
+# window will be added to the same row in the metadata table.
+#
+# Fixed time window
+# ~~~~~~~~~~~~~~~~~
+#
+# A simple way to specify the time window extent is by specifying the time in seconds
+# relative to the row event. In the following example, the time window spans from the
+# row event (time point zero) up until three seconds later.
+
+metadata_tmin = 0.0
+metadata_tmax = 3.0
+
+metadata, events, event_id = mne.epochs.make_metadata(
+ events=all_events,
+ event_id=all_event_id,
+ tmin=metadata_tmin,
+ tmax=metadata_tmax,
+ sfreq=raw.info["sfreq"],
+ row_events=row_events,
+)
+
+metadata
+
+# %%
+# This looks good at the first glance. However, for example in the 2nd and 3rd row, we
+# have two responses listed (left and right). This is because the 3-second time window
+# is obviously a bit too wide and captures more than one trial. While we could make it
+# narrower, this could lead to a loss of events – if the window might become **too**
+# narrow. Ultimately, this problem arises because the response time varies from trial
+# to trial, so it's difficult for us to set a fixed upper bound for the time window.
+#
+# Fixed time window with ``keep_first``
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# One workaround is using the ``keep_first`` parameter, which will create a new column
+# containing the first event of the specified type.
+
+metadata_tmin = 0.0
+metadata_tmax = 3.0
+keep_first = "response" # <-- new
+
+metadata, events, event_id = mne.epochs.make_metadata(
+ events=all_events,
+ event_id=all_event_id,
+ tmin=metadata_tmin,
+ tmax=metadata_tmax,
+ sfreq=raw.info["sfreq"],
+ row_events=row_events,
+ keep_first=keep_first, # <-- new
+)
+
+metadata
+
+# %%
+# As you can see, a new column ``response`` was created with the time of the first
+# response event falling inside the time window. The ``first_response`` column specifies
+# **which** response occurred first (left or right).
+#
+# Variable time window
+# ~~~~~~~~~~~~~~~~~~~~
+#
+# Another way to address the challenge of variable time windows **without** the need to
+# create new columns is by specifying ``tmin`` and ``tmax`` as event names. In this
+# example, we use ``tmin=row_events``, because we want the time window to start
+# with the time-locked event. ``tmax``, on the other hand, are the response events:
+# The first response event following ``tmin`` will be used to determine the duration of
+# the time window.
+
+metadata_tmin = row_events
+metadata_tmax = ["response/left", "response/right"]
+
+metadata, events, event_id = mne.epochs.make_metadata(
+ events=all_events,
+ event_id=all_event_id,
+ tmin=metadata_tmin,
+ tmax=metadata_tmax,
+ sfreq=raw.info["sfreq"],
+ row_events=row_events,
+)
+
+metadata
+
+# %%
+# Variable time window (simplified)
+# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# We can slightly simplify the above code: Since ``tmin`` shall be set to the
+# ``row_events``, we can paass ``tmin=None``, which is a more convenient way to express
+# ``tmin=row_events``. The resulting metadata looks the same as in the previous example.
+
+metadata_tmin = None # <-- new
+metadata_tmax = ["response/left", "response/right"]
+
+metadata, events, event_id = mne.epochs.make_metadata(
+ events=all_events,
+ event_id=all_event_id,
+ tmin=metadata_tmin,
+ tmax=metadata_tmax,
+ sfreq=raw.info["sfreq"],
+ row_events=row_events,
+)
+
+metadata
diff --git a/examples/preprocessing/xdawn_denoising.py b/examples/preprocessing/xdawn_denoising.py
index 6fc38a55b94..20a6abc72fb 100644
--- a/examples/preprocessing/xdawn_denoising.py
+++ b/examples/preprocessing/xdawn_denoising.py
@@ -25,7 +25,6 @@
# %%
-
from mne import Epochs, compute_raw_covariance, io, pick_types, read_events
from mne.datasets import sample
from mne.preprocessing import Xdawn
diff --git a/examples/visualization/eyetracking_plot_heatmap.py b/examples/visualization/eyetracking_plot_heatmap.py
index e1826efb6f7..9225493ef88 100644
--- a/examples/visualization/eyetracking_plot_heatmap.py
+++ b/examples/visualization/eyetracking_plot_heatmap.py
@@ -24,7 +24,6 @@
# :ref:`example data `: eye-tracking data recorded from SR research's
# ``'.asc'`` file format.
-
import matplotlib.pyplot as plt
import mne
@@ -35,6 +34,12 @@
stim_fpath = task_fpath / "stim" / "naturalistic.png"
raw = mne.io.read_raw_eyelink(et_fpath)
+calibration = mne.preprocessing.eyetracking.read_eyelink_calibration(
+ et_fpath,
+ screen_resolution=(1920, 1080),
+ screen_size=(0.53, 0.3),
+ screen_distance=0.9,
+)[0]
# %%
# Process and epoch the data
@@ -58,9 +63,8 @@
# screen resolution of the participant screen (1920x1080) as the width and height. We
# can also use the sigma parameter to smooth the plot.
-px_width, px_height = 1920, 1080
cmap = plt.get_cmap("viridis")
-plot_gaze(epochs["natural"], width=px_width, height=px_height, cmap=cmap, sigma=50)
+plot_gaze(epochs["natural"], calibration=calibration, cmap=cmap, sigma=50)
# %%
# Overlaying plots with images
@@ -77,10 +81,26 @@
ax.imshow(plt.imread(stim_fpath))
plot_gaze(
epochs["natural"],
- width=px_width,
- height=px_height,
+ calibration=calibration,
vlim=(0.0003, None),
sigma=50,
cmap=cmap,
axes=ax,
)
+
+# %%
+# Displaying the heatmap in units of visual angle
+# -----------------------------------------------
+#
+# In scientific publications it is common to report gaze data as the visual angle
+# from the participants eye to the screen. We can convert the units of our gaze data to
+# radians of visual angle before plotting the heatmap:
+
+# %%
+epochs.load_data()
+mne.preprocessing.eyetracking.convert_units(epochs, calibration, to="radians")
+plot_gaze(
+ epochs["natural"],
+ calibration=calibration,
+ sigma=50,
+)
diff --git a/examples/visualization/topo_compare_conditions.py b/examples/visualization/topo_compare_conditions.py
index 7572eab47e5..3ab4e46d5f2 100644
--- a/examples/visualization/topo_compare_conditions.py
+++ b/examples/visualization/topo_compare_conditions.py
@@ -19,7 +19,6 @@
# %%
-
import matplotlib.pyplot as plt
import mne
diff --git a/examples/visualization/topo_customized.py b/examples/visualization/topo_customized.py
index 2d3c6662ebc..2303961f9da 100644
--- a/examples/visualization/topo_customized.py
+++ b/examples/visualization/topo_customized.py
@@ -19,7 +19,6 @@
# %%
-
import matplotlib.pyplot as plt
import numpy as np
diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py
index 462a34cb6d6..797e3d4bbaa 100644
--- a/mne/_fiff/meas_info.py
+++ b/mne/_fiff/meas_info.py
@@ -1104,9 +1104,9 @@ class Info(dict, SetChannelsMixin, MontageMixin, ContainsMixin):
The transformation from 4D/CTF head coordinates to Neuromag head
coordinates. This is only present in 4D/CTF data.
custom_ref_applied : int
- Whether a custom (=other than average) reference has been applied to
- the EEG data. This flag is checked by some algorithms that require an
- average reference to be set.
+ Whether a custom (=other than an average projector) reference has been
+ applied to the EEG data. This flag is checked by some algorithms that
+ require an average reference to be set.
description : str | None
String description of the recording.
dev_ctf_t : Transform | None
diff --git a/mne/_fiff/tests/test_meas_info.py b/mne/_fiff/tests/test_meas_info.py
index 3cf1f79cceb..8552585eec4 100644
--- a/mne/_fiff/tests/test_meas_info.py
+++ b/mne/_fiff/tests/test_meas_info.py
@@ -350,8 +350,9 @@ def test_read_write_info(tmp_path):
@testing.requires_testing_data
def test_dir_warning():
"""Test that trying to read a bad filename emits a warning before an error."""
- with pytest.raises(OSError, match="directory"), pytest.warns(
- RuntimeWarning, match="does not conform"
+ with (
+ pytest.raises(OSError, match="directory"),
+ pytest.warns(RuntimeWarning, match="does not conform"),
):
read_info(ctf_fname)
diff --git a/mne/annotations.py b/mne/annotations.py
index f0f88783b68..a6be1f7a62d 100644
--- a/mne/annotations.py
+++ b/mne/annotations.py
@@ -1529,6 +1529,7 @@ def events_from_annotations(
regexp=r"^(?![Bb][Aa][Dd]|[Ee][Dd][Gg][Ee]).*$",
use_rounding=True,
chunk_duration=None,
+ tol=1e-8,
verbose=None,
):
"""Get :term:`events` and ``event_id`` from an Annotations object.
@@ -1572,6 +1573,11 @@ def events_from_annotations(
they fit within the annotation duration spaced according to
``chunk_duration``. As a consequence annotations with duration shorter
than ``chunk_duration`` will not contribute events.
+ tol : float
+ The tolerance used to check if a chunk fits within an annotation when
+ ``chunk_duration`` is not ``None``. If the duration from a computed
+ chunk onset to the end of the annotation is smaller than
+ ``chunk_duration`` minus ``tol``, the onset will be discarded.
%(verbose)s
Returns
@@ -1617,7 +1623,7 @@ def events_from_annotations(
for annot in annotations[event_sel]:
annot_offset = annot["onset"] + annot["duration"]
_onsets = np.arange(annot["onset"], annot_offset, chunk_duration)
- good_events = annot_offset - _onsets >= chunk_duration
+ good_events = annot_offset - _onsets >= chunk_duration - tol
if good_events.any():
_onsets = _onsets[good_events]
_inds = raw.time_as_index(
diff --git a/mne/beamformer/resolution_matrix.py b/mne/beamformer/resolution_matrix.py
index 108fb7a4dbf..ce55a09584b 100644
--- a/mne/beamformer/resolution_matrix.py
+++ b/mne/beamformer/resolution_matrix.py
@@ -1,4 +1,5 @@
"""Compute resolution matrix for beamformers."""
+
# Authors: olaf.hauk@mrc-cbu.cam.ac.uk
#
# License: BSD-3-Clause
diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py
index f6c7ef20492..509afbcf79e 100644
--- a/mne/beamformer/tests/test_lcmv.py
+++ b/mne/beamformer/tests/test_lcmv.py
@@ -589,9 +589,11 @@ def test_make_lcmv_sphere(pick_ori, weight_norm):
fwd_sphere = mne.make_forward_solution(evoked.info, None, src, sphere)
# Test that we get an error if not reducing rank
- with pytest.raises(
- ValueError, match="Singular matrix detected"
- ), _record_warnings(), pytest.warns(RuntimeWarning, match="positive semidefinite"):
+ with (
+ pytest.raises(ValueError, match="Singular matrix detected"),
+ _record_warnings(),
+ pytest.warns(RuntimeWarning, match="positive semidefinite"),
+ ):
make_lcmv(
evoked.info,
fwd_sphere,
diff --git a/mne/channels/channels.py b/mne/channels/channels.py
index aee085891c4..0d0af8279cb 100644
--- a/mne/channels/channels.py
+++ b/mne/channels/channels.py
@@ -839,6 +839,8 @@ def interpolate_bads(
- ``"meg"`` channels support ``"MNE"`` (default) and ``"nan"``
- ``"eeg"`` channels support ``"spline"`` (default), ``"MNE"`` and ``"nan"``
- ``"fnirs"`` channels support ``"nearest"`` (default) and ``"nan"``
+ - ``"ecog"`` channels support ``"spline"`` (default) and ``"nan"``
+ - ``"seeg"`` channels support ``"spline"`` (default) and ``"nan"``
None is an alias for::
diff --git a/mne/commands/mne_bti2fiff.py b/mne/commands/mne_bti2fiff.py
index c8664ca5a35..2c4e4083df1 100644
--- a/mne/commands/mne_bti2fiff.py
+++ b/mne/commands/mne_bti2fiff.py
@@ -30,7 +30,6 @@
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
-
import sys
import mne
diff --git a/mne/commands/mne_clean_eog_ecg.py b/mne/commands/mne_clean_eog_ecg.py
index 8f18f16f6cb..10b84540756 100644
--- a/mne/commands/mne_clean_eog_ecg.py
+++ b/mne/commands/mne_clean_eog_ecg.py
@@ -14,7 +14,6 @@
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
-
import sys
import mne
diff --git a/mne/commands/mne_make_scalp_surfaces.py b/mne/commands/mne_make_scalp_surfaces.py
index 91ed2fdae60..85b7acd2883 100644
--- a/mne/commands/mne_make_scalp_surfaces.py
+++ b/mne/commands/mne_make_scalp_surfaces.py
@@ -17,6 +17,7 @@
$ mne make_scalp_surfaces --overwrite --subject sample
"""
+
import os
import sys
diff --git a/mne/conftest.py b/mne/conftest.py
index 4bab9dc1186..2d153f92f40 100644
--- a/mne/conftest.py
+++ b/mne/conftest.py
@@ -10,6 +10,7 @@
import shutil
import sys
import warnings
+from collections import defaultdict
from contextlib import contextmanager
from pathlib import Path
from textwrap import dedent
@@ -199,6 +200,7 @@ def pytest_configure(config):
ignore:Python 3\.14 will, by default, filter extracted tar archives.*:DeprecationWarning
# pandas
ignore:\n*Pyarrow will become a required dependency of pandas.*:DeprecationWarning
+ ignore:np\.find_common_type is deprecated.*:DeprecationWarning
# pyvista <-> NumPy 2.0
ignore:__array_wrap__ must accept context and return_scalar arguments.*:DeprecationWarning
""" # noqa: E501
@@ -796,14 +798,13 @@ def mixed_fwd_cov_evoked(_evoked_cov_sphere, _all_src_types_fwd):
@pytest.fixture(scope="session")
-@pytest.mark.slowtest
-@pytest.mark.parametrize(params=[testing._pytest_param()])
def src_volume_labels():
"""Create a 7mm source space with labels."""
pytest.importorskip("nibabel")
volume_labels = mne.get_volume_labels_from_aseg(fname_aseg)
- with _record_warnings(), pytest.warns(
- RuntimeWarning, match="Found no usable.*t-vessel.*"
+ with (
+ _record_warnings(),
+ pytest.warns(RuntimeWarning, match="Found no usable.*t-vessel.*"),
):
src = mne.setup_volume_source_space(
"sample",
@@ -901,11 +902,10 @@ def protect_config():
def _test_passed(request):
- try:
- outcome = request.node.harvest_rep_call
- except Exception:
- outcome = "passed"
- return outcome == "passed"
+ if _phase_report_key not in request.node.stash:
+ return True
+ report = request.node.stash[_phase_report_key]
+ return "call" in report and report["call"].outcome == "passed"
@pytest.fixture()
@@ -932,7 +932,6 @@ def brain_gc(request):
ignore = set(id(o) for o in gc.get_objects())
yield
close_func()
- # no need to warn if the test itself failed, pytest-harvest helps us here
if not _test_passed(request):
return
_assert_no_instances(Brain, "after")
@@ -961,16 +960,14 @@ def pytest_sessionfinish(session, exitstatus):
if n is None:
return
print("\n")
- try:
- import pytest_harvest
- except ImportError:
- print("Module-level timings require pytest-harvest")
- return
# get the number to print
- res = pytest_harvest.get_session_synthesis_dct(session)
- files = dict()
- for key, val in res.items():
- parts = Path(key.split(":")[0]).parts
+ files = defaultdict(lambda: 0.0)
+ for item in session.items:
+ if _phase_report_key not in item.stash:
+ continue
+ report = item.stash[_phase_report_key]
+ dur = sum(x.duration for x in report.values())
+ parts = Path(item.nodeid.split(":")[0]).parts
# split mne/tests/test_whatever.py into separate categories since these
# are essentially submodule-level tests. Keeping just [:3] works,
# except for mne/viz where we want level-4 granulatity
@@ -979,7 +976,7 @@ def pytest_sessionfinish(session, exitstatus):
if not parts[-1].endswith(".py"):
parts = parts + ("",)
file_key = "/".join(parts)
- files[file_key] = files.get(file_key, 0) + val["pytest_duration_s"]
+ files[file_key] += dur
files = sorted(list(files.items()), key=lambda x: x[1])[::-1]
# print
_files[:] = files[:n]
@@ -1000,7 +997,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
writer.line(f"{timing.ljust(15)}{name}")
-def pytest_report_header(config, startdir):
+def pytest_report_header(config, startdir=None):
"""Add information to the pytest run header."""
return f"MNE {mne.__version__} -- {str(Path(mne.__file__).parent)}"
@@ -1123,7 +1120,6 @@ def run(nbexec=nbexec, code=code):
return
-@pytest.mark.filterwarnings("ignore:.*Extraction of measurement.*:")
@pytest.fixture(
params=(
[nirsport2, nirsport2_snirf, testing._pytest_param()],
@@ -1161,8 +1157,7 @@ def qt_windows_closed(request):
if "allow_unclosed_pyside2" in marks and API_NAME.lower() == "pyside2":
return
# Don't check when the test fails
- report = request.node.stash[_phase_report_key]
- if ("call" not in report) or report["call"].failed:
+ if not _test_passed(request):
return
widgets = app.topLevelWidgets()
n_after = len(widgets)
@@ -1179,3 +1174,53 @@ def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
item.stash.setdefault(_phase_report_key, {})[rep.when] = rep
+
+
+@pytest.fixture(scope="function")
+def eyetrack_cal():
+ """Create a toy calibration instance."""
+ screen_size = (0.4, 0.225) # width, height in meters
+ screen_resolution = (1920, 1080)
+ screen_distance = 0.7 # meters
+ onset = 0
+ model = "HV9"
+ eye = "R"
+ avg_error = 0.5
+ max_error = 1.0
+ positions = np.zeros((9, 2))
+ offsets = np.zeros((9,))
+ gaze = np.zeros((9, 2))
+ cal = mne.preprocessing.eyetracking.Calibration(
+ screen_size=screen_size,
+ screen_distance=screen_distance,
+ screen_resolution=screen_resolution,
+ eye=eye,
+ model=model,
+ positions=positions,
+ offsets=offsets,
+ gaze=gaze,
+ onset=onset,
+ avg_error=avg_error,
+ max_error=max_error,
+ )
+ return cal
+
+
+@pytest.fixture(scope="function")
+def eyetrack_raw():
+ """Create a toy raw instance with eyetracking channels."""
+ # simulate a steady fixation at the center pixel of a 1920x1080 resolution screen
+ shape = (1, 100) # x or y, time
+ data = np.vstack([np.full(shape, 960), np.full(shape, 540), np.full(shape, 0)])
+
+ info = info = mne.create_info(
+ ch_names=["xpos", "ypos", "pupil"], sfreq=100, ch_types="eyegaze"
+ )
+ more_info = dict(
+ xpos=("eyegaze", "px", "right", "x"),
+ ypos=("eyegaze", "px", "right", "y"),
+ pupil=("pupil", "au", "right"),
+ )
+ raw = mne.io.RawArray(data, info)
+ raw = mne.preprocessing.eyetracking.set_channel_types_eyetrack(raw, more_info)
+ return raw
diff --git a/mne/datasets/__init__.pyi b/mne/datasets/__init__.pyi
index 22cb6acce7b..44cee84fe7f 100644
--- a/mne/datasets/__init__.pyi
+++ b/mne/datasets/__init__.pyi
@@ -66,7 +66,7 @@ from . import (
)
from ._fetch import fetch_dataset
from ._fsaverage.base import fetch_fsaverage
-from ._infant.base import fetch_infant_template
+from ._infant import fetch_infant_template
from ._phantom.base import fetch_phantom
from .utils import (
_download_all_example_data,
diff --git a/mne/datasets/_infant/__init__.py b/mne/datasets/_infant/__init__.py
new file mode 100644
index 00000000000..7347d36fcd0
--- /dev/null
+++ b/mne/datasets/_infant/__init__.py
@@ -0,0 +1 @@
+from .base import fetch_infant_template
diff --git a/mne/datasets/config.py b/mne/datasets/config.py
index 238b61998d6..fb9a04e1e40 100644
--- a/mne/datasets/config.py
+++ b/mne/datasets/config.py
@@ -89,7 +89,7 @@
# update the checksum in the MNE_DATASETS dict below, and change version
# here: ↓↓↓↓↓↓↓↓
RELEASES = dict(
- testing="0.151",
+ testing="0.152",
misc="0.27",
phantom_kit="0.2",
)
@@ -116,7 +116,7 @@
# Testing and misc are at the top as they're updated most often
MNE_DATASETS["testing"] = dict(
archive_name=f"{TESTING_VERSIONED}.tar.gz",
- hash="md5:5832b4d44f0423d22305fa61cb75bc25",
+ hash="md5:df48cdabcf13ebeaafc617cb8e55b6fc",
url=(
"https://codeload.github.com/mne-tools/mne-testing-data/"
f'tar.gz/{RELEASES["testing"]}'
diff --git a/mne/decoding/base.py b/mne/decoding/base.py
index e44fcd13f29..8e36ee412a8 100644
--- a/mne/decoding/base.py
+++ b/mne/decoding/base.py
@@ -11,6 +11,7 @@
import numbers
import numpy as np
+from scipy.sparse import issparse
from ..fixes import BaseEstimator, _check_fit_params, _get_check_scoring
from ..parallel import parallel_func
@@ -106,6 +107,12 @@ def fit(self, X, y, **fit_params):
self : instance of LinearModel
Returns the modified instance.
"""
+ # Once we require sklearn 1.1+ we should do:
+ # from sklearn.utils import check_array
+ # X = check_array(X, input_name="X")
+ # y = check_array(y, dtype=None, ensure_2d=False, input_name="y")
+ if issparse(X):
+ raise TypeError("X should be a dense array, got sparse instead.")
X, y = np.asarray(X), np.asarray(y)
if X.ndim != 2:
raise ValueError(
diff --git a/mne/decoding/search_light.py b/mne/decoding/search_light.py
index 369efd7bba3..c8d56b88d6e 100644
--- a/mne/decoding/search_light.py
+++ b/mne/decoding/search_light.py
@@ -5,6 +5,7 @@
import logging
import numpy as np
+from scipy.sparse import issparse
from ..fixes import _get_check_scoring
from ..parallel import parallel_func
@@ -254,6 +255,12 @@ def decision_function(self, X):
def _check_Xy(self, X, y=None):
"""Aux. function to check input data."""
+ # Once we require sklearn 1.1+ we should do something like:
+ # from sklearn.utils import check_array
+ # X = check_array(X, ensure_2d=False, input_name="X")
+ # y = check_array(y, dtype=None, ensure_2d=False, input_name="y")
+ if issparse(X):
+ raise TypeError("X should be a dense array, got sparse instead.")
X = np.asarray(X)
if y is not None:
y = np.asarray(y)
diff --git a/mne/defaults.py b/mne/defaults.py
index b9e6702edec..31fc53299e9 100644
--- a/mne/defaults.py
+++ b/mne/defaults.py
@@ -235,8 +235,8 @@
eeg_scale=4e-3,
eegp_scale=20e-3,
eegp_height=0.1,
- ecog_scale=5e-3,
- seeg_scale=5e-3,
+ ecog_scale=2e-3,
+ seeg_scale=2e-3,
meg_scale=1.0, # sensors are already in SI units
ref_meg_scale=1.0,
dbs_scale=5e-3,
diff --git a/mne/epochs.py b/mne/epochs.py
index 952f0b27d96..83b427ac394 100644
--- a/mne/epochs.py
+++ b/mne/epochs.py
@@ -16,6 +16,7 @@
from collections import Counter
from copy import deepcopy
from functools import partial
+from inspect import getfullargspec
import numpy as np
from scipy.interpolate import interp1d
@@ -74,6 +75,7 @@
from .html_templates import _get_html_template
from .parallel import parallel_func
from .time_frequency.spectrum import EpochsSpectrum, SpectrumMixin, _validate_method
+from .time_frequency.tfr import EpochsTFR
from .utils import (
ExtendedTimeMixin,
GetEpochsMixin,
@@ -1972,22 +1974,52 @@ def apply_function(
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
+ args = getfullargspec(fun).args + getfullargspec(fun).kwonlyargs
+ if channel_wise is False:
+ if ("ch_idx" in args) or ("ch_name" in args):
+ raise ValueError(
+ "apply_function cannot access ch_idx or ch_name "
+ "when channel_wise=False"
+ )
+ if "ch_idx" in args:
+ logger.info("apply_function requested to access ch_idx")
+ if "ch_name" in args:
+ logger.info("apply_function requested to access ch_name")
+
if channel_wise:
parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs)
if n_jobs == 1:
- _fun = partial(_check_fun, fun, **kwargs)
+ _fun = partial(_check_fun, fun)
# modify data inplace to save memory
- for idx in picks:
- self._data[:, idx, :] = np.apply_along_axis(
- _fun, -1, data_in[:, idx, :]
+ for ch_idx in picks:
+ if "ch_idx" in args:
+ kwargs.update(ch_idx=ch_idx)
+ if "ch_name" in args:
+ kwargs.update(ch_name=self.info["ch_names"][ch_idx])
+ self._data[:, ch_idx, :] = np.apply_along_axis(
+ _fun, -1, data_in[:, ch_idx, :], **kwargs
)
else:
# use parallel function
+ _fun = partial(np.apply_along_axis, fun, -1)
data_picks_new = parallel(
- p_fun(fun, data_in[:, p, :], **kwargs) for p in picks
+ p_fun(
+ _fun,
+ data_in[:, ch_idx, :],
+ **kwargs,
+ **{
+ k: v
+ for k, v in [
+ ("ch_name", self.info["ch_names"][ch_idx]),
+ ("ch_idx", ch_idx),
+ ]
+ if k in args
+ },
+ )
+ for ch_idx in picks
)
- for pp, p in enumerate(picks):
- self._data[:, p, :] = data_picks_new[pp]
+ for run_idx, ch_idx in enumerate(picks):
+ self._data[:, ch_idx, :] = data_picks_new[run_idx]
else:
self._data = _check_fun(fun, data_in, **kwargs)
@@ -2180,7 +2212,14 @@ def save(
)
# check for file existence and expand `~` if present
- fname = str(_check_fname(fname=fname, overwrite=overwrite))
+ fname = str(
+ _check_fname(
+ fname=fname,
+ overwrite=overwrite,
+ check_bids_split=True,
+ name="fname",
+ )
+ )
split_size_bytes = _get_split_size(split_size)
@@ -2448,8 +2487,8 @@ def equalize_event_counts(self, event_ids=None, method="mintime"):
for eq in event_ids:
eq_inds.append(self._keys_to_idx(eq))
- event_times = [self.events[e, 0] for e in eq_inds]
- indices = _get_drop_indices(event_times, method)
+ sample_nums = [self.events[e, 0] for e in eq_inds]
+ indices = _get_drop_indices(sample_nums, method)
# need to re-index indices
indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
self.drop(indices, reason="EQUALIZED_COUNT")
@@ -2790,14 +2829,15 @@ def make_metadata(
A mapping from event names (keys) to event IDs (values). The event
names will be incorporated as columns of the returned metadata
:class:`~pandas.DataFrame`.
- tmin, tmax : float | None
- Start and end of the time interval for metadata generation in seconds, relative
- to the time-locked event of the respective time window (the "row events").
+ tmin, tmax : float | str | list of str | None
+ If float, start and end of the time interval for metadata generation in seconds,
+ relative to the time-locked event of the respective time window (the "row
+ events").
.. note::
If you are planning to attach the generated metadata to
`~mne.Epochs` and intend to include only events that fall inside
- your epochs time interval, pass the same ``tmin`` and ``tmax``
+ your epoch's time interval, pass the same ``tmin`` and ``tmax``
values here as you use for your epochs.
If ``None``, the time window used for metadata generation is bounded by the
@@ -2810,8 +2850,17 @@ def make_metadata(
the first row event. If ``tmax=None``, the last time window for metadata
generation ends with the last event in ``events``.
+ If a string or a list of strings, the events bounding the metadata around each
+ "row event". For ``tmin``, the events are assumed to occur **before** the row
+ event, and for ``tmax``, the events are assumed to occur **after** – unless
+ ``tmin`` or ``tmax`` are equal to a row event, in which case the row event
+ serves as the bound.
+
.. versionchanged:: 1.6.0
Added support for ``None``.
+
+ .. versionadded:: 1.7.0
+ Added support for strings.
sfreq : float
The sampling frequency of the data from which the events array was
extracted.
@@ -2897,8 +2946,8 @@ def make_metadata(
be attached; it may well be much shorter or longer, or not overlap at all,
if desired. This can be useful, for example, to include events that
occurred before or after an epoch, e.g. during the inter-trial interval.
- If either ``tmin``, ``tmax``, or both are ``None``, the time window will
- typically vary, too.
+ If either ``tmin``, ``tmax``, or both are ``None``, or a string referring e.g. to a
+ response event, the time window will typically vary, too.
.. versionadded:: 0.23
@@ -2911,11 +2960,11 @@ def make_metadata(
_validate_type(events, types=("array-like",), item_name="events")
_validate_type(event_id, types=(dict,), item_name="event_id")
_validate_type(sfreq, types=("numeric",), item_name="sfreq")
- _validate_type(tmin, types=("numeric", None), item_name="tmin")
- _validate_type(tmax, types=("numeric", None), item_name="tmax")
- _validate_type(row_events, types=(None, str, list, tuple), item_name="row_events")
- _validate_type(keep_first, types=(None, str, list, tuple), item_name="keep_first")
- _validate_type(keep_last, types=(None, str, list, tuple), item_name="keep_last")
+ _validate_type(tmin, types=("numeric", str, "array-like", None), item_name="tmin")
+ _validate_type(tmax, types=("numeric", str, "array-like", None), item_name="tmax")
+ _validate_type(row_events, types=(None, str, "array-like"), item_name="row_events")
+ _validate_type(keep_first, types=(None, str, "array-like"), item_name="keep_first")
+ _validate_type(keep_last, types=(None, str, "array-like"), item_name="keep_last")
if not event_id:
raise ValueError("event_id dictionary must contain at least one entry")
@@ -2932,6 +2981,19 @@ def _ensure_list(x):
keep_first = _ensure_list(keep_first)
keep_last = _ensure_list(keep_last)
+ # Turn tmin, tmax into a list if they're strings or arrays of strings
+ try:
+ _validate_type(tmin, types=(str, "array-like"), item_name="tmin")
+ tmin = _ensure_list(tmin)
+ except TypeError:
+ pass
+
+ try:
+ _validate_type(tmax, types=(str, "array-like"), item_name="tmax")
+ tmax = _ensure_list(tmax)
+ except TypeError:
+ pass
+
keep_first_and_last = set(keep_first) & set(keep_last)
if keep_first_and_last:
raise ValueError(
@@ -2951,18 +3013,40 @@ def _ensure_list(x):
f"{param_name}, cannot be found in event_id dictionary"
)
- event_name_diff = sorted(set(row_events) - set(event_id.keys()))
- if event_name_diff:
- raise ValueError(
- f"Present in row_events, but missing from event_id: "
- f'{", ".join(event_name_diff)}'
+ # If tmin, tmax are strings, ensure these event names are present in event_id
+ def _diff_input_strings_vs_event_id(input_strings, input_name, event_id):
+ event_name_diff = sorted(set(input_strings) - set(event_id.keys()))
+ if event_name_diff:
+ raise ValueError(
+ f"Present in {input_name}, but missing from event_id: "
+ f'{", ".join(event_name_diff)}'
+ )
+
+ _diff_input_strings_vs_event_id(
+ input_strings=row_events, input_name="row_events", event_id=event_id
+ )
+ if isinstance(tmin, list):
+ _diff_input_strings_vs_event_id(
+ input_strings=tmin, input_name="tmin", event_id=event_id
+ )
+ if isinstance(tmax, list):
+ _diff_input_strings_vs_event_id(
+ input_strings=tmax, input_name="tmax", event_id=event_id
)
- del event_name_diff
# First and last sample of each epoch, relative to the time-locked event
# This follows the approach taken in mne.Epochs
- start_sample = None if tmin is None else int(round(tmin * sfreq))
- stop_sample = None if tmax is None else int(round(tmax * sfreq)) + 1
+ # For strings and None, we don't know the start and stop samples in advance as the
+ # time window can vary.
+ if isinstance(tmin, (type(None), list)):
+ start_sample = None
+ else:
+ start_sample = int(round(tmin * sfreq))
+
+ if isinstance(tmax, (type(None), list)):
+ stop_sample = None
+ else:
+ stop_sample = int(round(tmax * sfreq)) + 1
# Make indexing easier
# We create the DataFrame before subsetting the events so we end up with
@@ -3016,14 +3100,47 @@ def _ensure_list(x):
metadata.loc[row_idx, "event_name"] = id_to_name_map[row_event.id]
# Determine which events fall into the current time window
- if start_sample is None:
+ if start_sample is None and isinstance(tmin, list):
+ # Lower bound is the the current or the closest previpus event with a name
+ # in "tmin"; if there is no such event (e.g., beginning of the recording is
+ # being approached), the upper lower becomes the last event in the
+ # recording.
+ prev_matching_events = events_df.loc[
+ (events_df["sample"] <= row_event.sample)
+ & (events_df["id"].isin([event_id[name] for name in tmin])),
+ :,
+ ]
+ if prev_matching_events.size == 0:
+ # No earlier matching event. Use the current one as the beginning of the
+ # time window. This may occur at the beginning of a recording.
+ window_start_sample = row_event.sample
+ else:
+ # At least one earlier matching event. Use the closest one.
+ window_start_sample = prev_matching_events.iloc[-1]["sample"]
+ elif start_sample is None:
# Lower bound is the current event.
window_start_sample = row_event.sample
else:
# Lower bound is determined by tmin.
window_start_sample = row_event.sample + start_sample
- if stop_sample is None:
+ if stop_sample is None and isinstance(tmax, list):
+ # Upper bound is the the current or the closest following event with a name
+ # in "tmax"; if there is no such event (e.g., end of the recording is being
+ # approached), the upper bound becomes the last event in the recording.
+ next_matching_events = events_df.loc[
+ (events_df["sample"] >= row_event.sample)
+ & (events_df["id"].isin([event_id[name] for name in tmax])),
+ :,
+ ]
+ if next_matching_events.size == 0:
+ # No matching event after the current one; use the end of the recording
+ # as upper bound. This may occur at the end of a recording.
+ window_stop_sample = events_df["sample"].iloc[-1]
+ else:
+ # At least one matching later event. Use the closest one..
+ window_stop_sample = next_matching_events.iloc[0]["sample"]
+ elif stop_sample is None:
# Upper bound: next event of the same type, or the last event (of
# any type) if no later event of the same type can be found.
next_events = events_df.loc[
@@ -3585,7 +3702,7 @@ def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
def equalize_epoch_counts(epochs_list, method="mintime"):
- """Equalize the number of trials in multiple Epoch instances.
+ """Equalize the number of trials in multiple Epochs or EpochsTFR instances.
Parameters
----------
@@ -3612,33 +3729,32 @@ def equalize_epoch_counts(epochs_list, method="mintime"):
--------
>>> equalize_epoch_counts([epochs1, epochs2]) # doctest: +SKIP
"""
- if not all(isinstance(e, BaseEpochs) for e in epochs_list):
+ if not all(isinstance(epoch, (BaseEpochs, EpochsTFR)) for epoch in epochs_list):
raise ValueError("All inputs must be Epochs instances")
# make sure bad epochs are dropped
- for e in epochs_list:
- if not e._bad_dropped:
- e.drop_bad()
- event_times = [e.events[:, 0] for e in epochs_list]
- indices = _get_drop_indices(event_times, method)
- for e, inds in zip(epochs_list, indices):
- e.drop(inds, reason="EQUALIZED_COUNT")
+ for epoch in epochs_list:
+ if not epoch._bad_dropped:
+ epoch.drop_bad()
+ sample_nums = [epoch.events[:, 0] for epoch in epochs_list]
+ indices = _get_drop_indices(sample_nums, method)
+ for epoch, inds in zip(epochs_list, indices):
+ epoch.drop(inds, reason="EQUALIZED_COUNT")
-def _get_drop_indices(event_times, method):
+def _get_drop_indices(sample_nums, method):
"""Get indices to drop from multiple event timing lists."""
- small_idx = np.argmin([e.shape[0] for e in event_times])
- small_e_times = event_times[small_idx]
+ small_idx = np.argmin([e.shape[0] for e in sample_nums])
+ small_epoch_indices = sample_nums[small_idx]
_check_option("method", method, ["mintime", "truncate"])
indices = list()
- for e in event_times:
+ for event in sample_nums:
if method == "mintime":
- mask = _minimize_time_diff(small_e_times, e)
+ mask = _minimize_time_diff(small_epoch_indices, event)
else:
- mask = np.ones(e.shape[0], dtype=bool)
- mask[small_e_times.shape[0] :] = False
+ mask = np.ones(event.shape[0], dtype=bool)
+ mask[small_epoch_indices.shape[0] :] = False
indices.append(np.where(np.logical_not(mask))[0])
-
return indices
diff --git a/mne/evoked.py b/mne/evoked.py
index 1f694f7c11b..36831db8ce0 100644
--- a/mne/evoked.py
+++ b/mne/evoked.py
@@ -9,6 +9,7 @@
# Copyright the MNE-Python contributors.
from copy import deepcopy
+from inspect import getfullargspec
from typing import Union
import numpy as np
@@ -258,7 +259,15 @@ def get_data(self, picks=None, units=None, tmin=None, tmax=None):
@verbose
def apply_function(
- self, fun, picks=None, dtype=None, n_jobs=None, verbose=None, **kwargs
+ self,
+ fun,
+ picks=None,
+ dtype=None,
+ n_jobs=None,
+ channel_wise=True,
+ *,
+ verbose=None,
+ **kwargs,
):
"""Apply a function to a subset of channels.
@@ -271,6 +280,9 @@ def apply_function(
%(dtype_applyfun)s
%(n_jobs)s Ignored if ``channel_wise=False`` as the workload
is split across channels.
+ %(channel_wise_applyfun)s
+
+ .. versionadded:: 1.6
%(verbose)s
%(kwargs_fun)s
@@ -289,21 +301,55 @@ def apply_function(
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
+ args = getfullargspec(fun).args + getfullargspec(fun).kwonlyargs
+ if channel_wise is False:
+ if ("ch_idx" in args) or ("ch_name" in args):
+ raise ValueError(
+ "apply_function cannot access ch_idx or ch_name "
+ "when channel_wise=False"
+ )
+ if "ch_idx" in args:
+ logger.info("apply_function requested to access ch_idx")
+ if "ch_name" in args:
+ logger.info("apply_function requested to access ch_name")
+
# check the dimension of the incoming evoked data
_check_option("evoked.ndim", self._data.ndim, [2])
- parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs)
- if n_jobs == 1:
- # modify data inplace to save memory
- for idx in picks:
- self._data[idx, :] = _check_fun(fun, data_in[idx, :], **kwargs)
+ if channel_wise:
+ parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs)
+ if n_jobs == 1:
+ # modify data inplace to save memory
+ for ch_idx in picks:
+ if "ch_idx" in args:
+ kwargs.update(ch_idx=ch_idx)
+ if "ch_name" in args:
+ kwargs.update(ch_name=self.info["ch_names"][ch_idx])
+ self._data[ch_idx, :] = _check_fun(
+ fun, data_in[ch_idx, :], **kwargs
+ )
+ else:
+ # use parallel function
+ data_picks_new = parallel(
+ p_fun(
+ fun,
+ data_in[ch_idx, :],
+ **kwargs,
+ **{
+ k: v
+ for k, v in [
+ ("ch_name", self.info["ch_names"][ch_idx]),
+ ("ch_idx", ch_idx),
+ ]
+ if k in args
+ },
+ )
+ for ch_idx in picks
+ )
+ for run_idx, ch_idx in enumerate(picks):
+ self._data[ch_idx, :] = data_picks_new[run_idx]
else:
- # use parallel function
- data_picks_new = parallel(
- p_fun(fun, data_in[p, :], **kwargs) for p in picks
- )
- for pp, p in enumerate(picks):
- self._data[p, :] = data_picks_new[pp]
+ self._data[picks, :] = _check_fun(fun, data_in[picks, :], **kwargs)
return self
diff --git a/mne/export/_brainvision.py b/mne/export/_brainvision.py
index 0da7647ecb7..d705d8cef9d 100644
--- a/mne/export/_brainvision.py
+++ b/mne/export/_brainvision.py
@@ -4,11 +4,150 @@
# Copyright the MNE-Python contributors.
import os
+from pathlib import Path
-from ..utils import _check_pybv_installed
+import numpy as np
+
+from mne.channels.channels import _unit2human
+from mne.io.constants import FIFF
+from mne.utils import _check_pybv_installed, warn
_check_pybv_installed()
-from pybv._export import _export_mne_raw # noqa: E402
+from pybv import write_brainvision # noqa: E402
+
+
+def _export_mne_raw(*, raw, fname, events=None, overwrite=False):
+ """Export raw data from MNE-Python.
+
+ Parameters
+ ----------
+ raw : mne.io.Raw
+ The raw data to export.
+ fname : str | pathlib.Path
+ The name of the file where raw data will be exported to. Must end with
+ ``".vhdr"``, and accompanying *.vmrk* and *.eeg* files will be written inside
+ the same directory.
+ events : np.ndarray | None
+ Events to be written to the marker file (*.vmrk*). If array, must be in
+ `MNE-Python format `_. If
+ ``None`` (default), events will be written based on ``raw.annotations``.
+ overwrite : bool
+ Whether or not to overwrite existing data. Defaults to ``False``.
+
+ """
+ # prepare file location
+ if not str(fname).endswith(".vhdr"):
+ raise ValueError("`fname` must have the '.vhdr' extension for BrainVision.")
+ fname = Path(fname)
+ folder_out = fname.parents[0]
+ fname_base = fname.stem
+
+ # prepare data from raw
+ data = raw.get_data() # gets data starting from raw.first_samp
+ sfreq = raw.info["sfreq"] # in Hz
+ meas_date = raw.info["meas_date"] # datetime.datetime
+ ch_names = raw.ch_names
+
+ # write voltage units as micro-volts and all other units without scaling
+ # write units that we don't know as n/a
+ unit = []
+ for ch in raw.info["chs"]:
+ if ch["unit"] == FIFF.FIFF_UNIT_V:
+ unit.append("µV")
+ elif ch["unit"] == FIFF.FIFF_UNIT_CEL:
+ unit.append("°C")
+ else:
+ unit.append(_unit2human.get(ch["unit"], "n/a"))
+ unit = [u if u != "NA" else "n/a" for u in unit]
+
+ # enforce conversion to float32 format
+ # XXX: Could add a feature that checks data and optimizes `unit`, `resolution`, and
+ # `format` so that raw.orig_format could be retained if reasonable.
+ if raw.orig_format != "single":
+ warn(
+ f"Encountered data in '{raw.orig_format}' format. Converting to float32.",
+ RuntimeWarning,
+ )
+
+ fmt = "binary_float32"
+ resolution = 0.1
+
+ # handle events
+ # if we got an ndarray, this is in MNE-Python format
+ msg = "`events` must be None or array in MNE-Python format."
+ if events is not None:
+ # subtract raw.first_samp because brainvision marks events starting from the
+ # first available data point and ignores the raw.first_samp
+ assert isinstance(events, np.ndarray), msg
+ assert events.ndim == 2, msg
+ assert events.shape[-1] == 3, msg
+ events[:, 0] -= raw.first_samp
+ events = events[:, [0, 2]] # reorder for pybv required order
+ else: # else, prepare pybv style events from raw.annotations
+ events = _mne_annots2pybv_events(raw)
+
+ # no information about reference channels in mne currently
+ ref_ch_names = None
+
+ # write to BrainVision
+ write_brainvision(
+ data=data,
+ sfreq=sfreq,
+ ch_names=ch_names,
+ ref_ch_names=ref_ch_names,
+ fname_base=fname_base,
+ folder_out=folder_out,
+ overwrite=overwrite,
+ events=events,
+ resolution=resolution,
+ unit=unit,
+ fmt=fmt,
+ meas_date=meas_date,
+ )
+
+
+def _mne_annots2pybv_events(raw):
+ """Convert mne Annotations to pybv events."""
+ events = []
+ for annot in raw.annotations:
+ # handle onset and duration: seconds to sample, relative to
+ # raw.first_samp / raw.first_time
+ onset = annot["onset"] - raw.first_time
+ onset = raw.time_as_index(onset).astype(int)[0]
+ duration = int(annot["duration"] * raw.info["sfreq"])
+
+ # triage type and description
+ # defaults to type="Comment" and the full description
+ etype = "Comment"
+ description = annot["description"]
+ for start in ["Stimulus/S", "Response/R", "Comment/"]:
+ if description.startswith(start):
+ etype = start.split("/")[0]
+ description = description.replace(start, "")
+ break
+
+ if etype in ["Stimulus", "Response"] and description.strip().isdigit():
+ description = int(description.strip())
+ else:
+ # if cannot convert to int, we must use this as "Comment"
+ etype = "Comment"
+
+ event_dict = dict(
+ onset=onset, # in samples
+ duration=duration, # in samples
+ description=description,
+ type=etype,
+ )
+
+ if "ch_names" in annot:
+ # handle channels
+ channels = list(annot["ch_names"])
+ event_dict["channels"] = channels
+
+ # add a "pybv" event
+ events += [event_dict]
+
+ return events
def _export_raw(fname, raw, overwrite):
diff --git a/mne/export/tests/test_export.py b/mne/export/tests/test_export.py
index fc5e68c9225..808b020bfb4 100644
--- a/mne/export/tests/test_export.py
+++ b/mne/export/tests/test_export.py
@@ -78,8 +78,9 @@ def test_export_raw_pybv(tmp_path, meas_date, orig_time, ext):
raw.set_annotations(annots)
temp_fname = tmp_path / ("test" + ext)
- with _record_warnings(), pytest.warns(
- RuntimeWarning, match="'short' format. Converting"
+ with (
+ _record_warnings(),
+ pytest.warns(RuntimeWarning, match="'short' format. Converting"),
):
raw.export(temp_fname)
raw_read = read_raw_brainvision(str(temp_fname).replace(".eeg", ".vhdr"))
@@ -303,8 +304,9 @@ def test_export_edf_signal_clipping(tmp_path, physical_range, exceeded_bound):
raw = read_raw_fif(fname_raw)
raw.pick(picks=["eeg", "ecog", "seeg"]).load_data()
temp_fname = tmp_path / "test.edf"
- with _record_warnings(), pytest.warns(
- RuntimeWarning, match=f"The {exceeded_bound}"
+ with (
+ _record_warnings(),
+ pytest.warns(RuntimeWarning, match=f"The {exceeded_bound}"),
):
raw.export(temp_fname, physical_range=physical_range)
raw_read = read_raw_edf(temp_fname, preload=True)
diff --git a/mne/filter.py b/mne/filter.py
index 99fdf7f3b00..477434a7ca4 100644
--- a/mne/filter.py
+++ b/mne/filter.py
@@ -2476,7 +2476,7 @@ def savgol_filter(self, h_freq, verbose=None):
Returns
-------
- inst : instance of Epochs or Evoked
+ inst : instance of Epochs, Evoked or SourceEstimate
The object with the filtering applied.
See Also
@@ -2489,6 +2489,8 @@ def savgol_filter(self, h_freq, verbose=None):
https://gist.github.com/larsoner/bbac101d50176611136b
+ When working on SourceEstimates the sample rate of the original data is inferred from tstep.
+
.. versionadded:: 0.9.0
References
@@ -2504,13 +2506,19 @@ def savgol_filter(self, h_freq, verbose=None):
>>> evoked.savgol_filter(10.) # low-pass at around 10 Hz # doctest:+SKIP
>>> evoked.plot() # doctest:+SKIP
""" # noqa: E501
+ from .source_estimate import _BaseSourceEstimate
+
_check_preload(self, "inst.savgol_filter")
+ if not isinstance(self, _BaseSourceEstimate):
+ s_freq = self.info["sfreq"]
+ else:
+ s_freq = 1 / self.tstep
h_freq = float(h_freq)
- if h_freq >= self.info["sfreq"] / 2.0:
+ if h_freq >= s_freq / 2.0:
raise ValueError("h_freq must be less than half the sample rate")
# savitzky-golay filtering
- window_length = (int(np.round(self.info["sfreq"] / h_freq)) // 2) * 2 + 1
+ window_length = (int(np.round(s_freq / h_freq)) // 2) * 2 + 1
logger.info("Using savgol length %d" % window_length)
self._data[:] = signal.savgol_filter(
self._data, axis=-1, polyorder=5, window_length=window_length
@@ -2537,7 +2545,7 @@ def filter(
*,
verbose=None,
):
- """Filter a subset of channels.
+ """Filter a subset of channels/vertices.
Parameters
----------
@@ -2561,7 +2569,7 @@ def filter(
Returns
-------
- inst : instance of Epochs, Evoked, or Raw
+ inst : instance of Epochs, Evoked, SourceEstimate, or Raw
The filtered data.
See Also
@@ -2598,6 +2606,9 @@ def filter(
``len(picks) * n_times`` additional time points need to
be temporarily stored in memory.
+ When working on SourceEstimates the sample rate of the original
+ data is inferred from tstep.
+
For more information, see the tutorials
:ref:`disc-filtering` and :ref:`tut-filter-resample` and
:func:`mne.filter.create_filter`.
@@ -2606,11 +2617,16 @@ def filter(
"""
from .annotations import _annotations_starts_stops
from .io import BaseRaw
+ from .source_estimate import _BaseSourceEstimate
_check_preload(self, "inst.filter")
+ if not isinstance(self, _BaseSourceEstimate):
+ update_info, picks = _filt_check_picks(self.info, picks, l_freq, h_freq)
+ s_freq = self.info["sfreq"]
+ else:
+ s_freq = 1.0 / self.tstep
if pad is None and method != "iir":
pad = "edge"
- update_info, picks = _filt_check_picks(self.info, picks, l_freq, h_freq)
if isinstance(self, BaseRaw):
# Deal with annotations
onsets, ends = _annotations_starts_stops(
@@ -2629,7 +2645,7 @@ def filter(
use_verbose = verbose if si == max_idx else "error"
filter_data(
self._data[:, start:stop],
- self.info["sfreq"],
+ s_freq,
l_freq,
h_freq,
picks,
@@ -2646,9 +2662,10 @@ def filter(
pad=pad,
verbose=use_verbose,
)
- # update info if filter is applied to all data channels,
+ # update info if filter is applied to all data channels/vertices,
# and it's not a band-stop filter
- _filt_update_info(self.info, update_info, l_freq, h_freq)
+ if not isinstance(self, _BaseSourceEstimate):
+ _filt_update_info(self.info, update_info, l_freq, h_freq)
return self
@verbose
@@ -2703,7 +2720,7 @@ def resample(
from .evoked import Evoked
# Should be guaranteed by our inheritance, and the fact that
- # mne.io.BaseRaw overrides this method
+ # mne.io.BaseRaw and _BaseSourceEstimate overrides this method
assert isinstance(self, (BaseEpochs, Evoked))
sfreq = float(sfreq)
@@ -2740,13 +2757,13 @@ def resample(
def apply_hilbert(
self, picks=None, envelope=False, n_jobs=None, n_fft="auto", *, verbose=None
):
- """Compute analytic signal or envelope for a subset of channels.
+ """Compute analytic signal or envelope for a subset of channels/vertices.
Parameters
----------
%(picks_all_data_noref)s
envelope : bool
- Compute the envelope signal of each channel. Default False.
+ Compute the envelope signal of each channel/vertex. Default False.
See Notes.
%(n_jobs)s
n_fft : int | None | str
@@ -2758,19 +2775,19 @@ def apply_hilbert(
Returns
-------
- self : instance of Raw, Epochs, or Evoked
+ self : instance of Raw, Epochs, Evoked or SourceEstimate
The raw object with transformed data.
Notes
-----
**Parameters**
- If ``envelope=False``, the analytic signal for the channels defined in
+ If ``envelope=False``, the analytic signal for the channels/vertices defined in
``picks`` is computed and the data of the Raw object is converted to
a complex representation (the analytic signal is complex valued).
If ``envelope=True``, the absolute value of the analytic signal for the
- channels defined in ``picks`` is computed, resulting in the envelope
+ channels/vertices defined in ``picks`` is computed, resulting in the envelope
signal.
.. warning: Do not use ``envelope=True`` if you intend to compute
@@ -2803,7 +2820,15 @@ def apply_hilbert(
by computing the analytic signal in sensor space, applying the MNE
inverse, and computing the envelope in source space.
"""
+ from .source_estimate import _BaseSourceEstimate
+
+ if not isinstance(self, _BaseSourceEstimate):
+ use_info = self.info
+ else:
+ use_info = len(self._data)
_check_preload(self, "inst.apply_hilbert")
+ picks = _picks_to_idx(use_info, picks, exclude=(), with_ref_meg=False)
+
if n_fft is None:
n_fft = len(self.times)
elif isinstance(n_fft, str):
@@ -2819,7 +2844,6 @@ def apply_hilbert(
f"{len(self.times)})"
)
dtype = None if envelope else np.complex128
- picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False)
args, kwargs = (), dict(n_fft=n_fft, envelope=envelope)
data_in = self._data
diff --git a/mne/fixes.py b/mne/fixes.py
index 4759366f386..55e56261866 100644
--- a/mne/fixes.py
+++ b/mne/fixes.py
@@ -31,9 +31,8 @@
###############################################################################
# distutils
-# distutils has been deprecated since Python 3.10 and is scheduled for removal
-# from the standard library with the release of Python 3.12. For version
-# comparisons, we use setuptools's `parse_version` if available.
+# distutils has been deprecated since Python 3.10 and was removed
+# from the standard library with the release of Python 3.12.
def _compare_version(version_a, operator, version_b):
diff --git a/mne/forward/tests/test_forward.py b/mne/forward/tests/test_forward.py
index dd73d1099f1..7442c68959c 100644
--- a/mne/forward/tests/test_forward.py
+++ b/mne/forward/tests/test_forward.py
@@ -230,8 +230,9 @@ def test_apply_forward():
# Evoked
evoked = read_evokeds(fname_evoked, condition=0)
evoked.pick(picks="meg")
- with _record_warnings(), pytest.warns(
- RuntimeWarning, match="only .* positive values"
+ with (
+ _record_warnings(),
+ pytest.warns(RuntimeWarning, match="only .* positive values"),
):
evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop)
data = evoked.data
diff --git a/mne/gui/tests/test_gui_api.py b/mne/gui/tests/test_gui_api.py
index 004c670a5ca..ae04124dd14 100644
--- a/mne/gui/tests/test_gui_api.py
+++ b/mne/gui/tests/test_gui_api.py
@@ -11,10 +11,9 @@
pytest.importorskip("nibabel")
-def test_gui_api(renderer_notebook, nbexec, *, n_warn=0, backend="qt"):
+def test_gui_api(renderer_notebook, nbexec, *, backend="qt"):
"""Test GUI API."""
import contextlib
- import sys
import warnings
import mne
@@ -25,7 +24,6 @@ def test_gui_api(renderer_notebook, nbexec, *, n_warn=0, backend="qt"):
except Exception:
# Notebook standalone mode
backend = "notebook"
- n_warn = 0
# nbexec does not expose renderer_notebook so I use a
# temporary variable to synchronize the tests
if backend == "notebook":
@@ -44,8 +42,7 @@ def test_gui_api(renderer_notebook, nbexec, *, n_warn=0, backend="qt"):
with mne.utils._record_warnings() as w:
renderer._window_set_theme("dark")
w = [ww for ww in w if "is not yet supported" in str(ww.message)]
- if sys.platform != "darwin": # sometimes this is fine
- assert len(w) == n_warn, [ww.message for ww in w]
+ assert len(w) == 0, [ww.message for ww in w]
# window without 3d plotter
if backend == "qt":
@@ -387,10 +384,9 @@ def _check_widget_trigger(
def test_gui_api_qt(renderer_interactive_pyvistaqt):
"""Test GUI API with the Qt backend."""
_, api = _check_qt_version(return_api=True)
- n_warn = int(api in ("PySide6", "PyQt6"))
# TODO: After merging https://github.com/mne-tools/mne-python/pull/11567
# The Qt CI run started failing about 50% of the time, so let's skip this
# for now.
if api == "PySide6":
pytest.skip("PySide6 causes segfaults on CIs sometimes")
- test_gui_api(None, None, n_warn=n_warn, backend="qt")
+ test_gui_api(None, None, backend="qt")
diff --git a/mne/io/_read_raw.py b/mne/io/_read_raw.py
index c226bf63285..6df23ee02f1 100644
--- a/mne/io/_read_raw.py
+++ b/mne/io/_read_raw.py
@@ -5,7 +5,6 @@
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
-
from functools import partial
from pathlib import Path
diff --git a/mne/io/artemis123/tests/test_artemis123.py b/mne/io/artemis123/tests/test_artemis123.py
index 2dac9645c33..ec4d3d4017f 100644
--- a/mne/io/artemis123/tests/test_artemis123.py
+++ b/mne/io/artemis123/tests/test_artemis123.py
@@ -97,8 +97,9 @@ def test_dev_head_t():
assert_equal(raw.info["sfreq"], 5000.0)
# test with head loc and digitization
- with pytest.warns(RuntimeWarning, match="consistency"), pytest.warns(
- RuntimeWarning, match="Large difference"
+ with (
+ pytest.warns(RuntimeWarning, match="consistency"),
+ pytest.warns(RuntimeWarning, match="Large difference"),
):
raw = read_raw_artemis123(
short_HPI_dip_fname, add_head_trans=True, pos_fname=dig_fname
diff --git a/mne/io/base.py b/mne/io/base.py
index bb40075335c..99a8e658fc4 100644
--- a/mne/io/base.py
+++ b/mne/io/base.py
@@ -18,6 +18,7 @@
from copy import deepcopy
from dataclasses import dataclass, field
from datetime import timedelta
+from inspect import getfullargspec
import numpy as np
@@ -1087,19 +1088,50 @@ def apply_function(
if dtype is not None and dtype != self._data.dtype:
self._data = self._data.astype(dtype)
+ args = getfullargspec(fun).args + getfullargspec(fun).kwonlyargs
+ if channel_wise is False:
+ if ("ch_idx" in args) or ("ch_name" in args):
+ raise ValueError(
+ "apply_function cannot access ch_idx or ch_name "
+ "when channel_wise=False"
+ )
+ if "ch_idx" in args:
+ logger.info("apply_function requested to access ch_idx")
+ if "ch_name" in args:
+ logger.info("apply_function requested to access ch_name")
+
if channel_wise:
parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs)
if n_jobs == 1:
# modify data inplace to save memory
- for idx in picks:
- self._data[idx, :] = _check_fun(fun, data_in[idx, :], **kwargs)
+ for ch_idx in picks:
+ if "ch_idx" in args:
+ kwargs.update(ch_idx=ch_idx)
+ if "ch_name" in args:
+ kwargs.update(ch_name=self.info["ch_names"][ch_idx])
+ self._data[ch_idx, :] = _check_fun(
+ fun, data_in[ch_idx, :], **kwargs
+ )
else:
# use parallel function
data_picks_new = parallel(
- p_fun(fun, data_in[p], **kwargs) for p in picks
+ p_fun(
+ fun,
+ data_in[ch_idx],
+ **kwargs,
+ **{
+ k: v
+ for k, v in [
+ ("ch_name", self.info["ch_names"][ch_idx]),
+ ("ch_idx", ch_idx),
+ ]
+ if k in args
+ },
+ )
+ for ch_idx in picks
)
- for pp, p in enumerate(picks):
- self._data[p, :] = data_picks_new[pp]
+ for run_idx, ch_idx in enumerate(picks):
+ self._data[ch_idx, :] = data_picks_new[run_idx]
else:
self._data[picks, :] = _check_fun(fun, data_in[picks, :], **kwargs)
@@ -1662,7 +1694,13 @@ def save(
endings_err = (".fif", ".fif.gz")
# convert to str, check for overwrite a few lines later
- fname = _check_fname(fname, overwrite=True, verbose="error")
+ fname = _check_fname(
+ fname,
+ overwrite=True,
+ verbose="error",
+ check_bids_split=True,
+ name="fname",
+ )
check_fname(fname, "raw", endings, endings_err=endings_err)
split_size = _get_split_size(split_size)
diff --git a/mne/io/besa/tests/test_besa.py b/mne/io/besa/tests/test_besa.py
index aeecf48cd63..2ee2843840b 100644
--- a/mne/io/besa/tests/test_besa.py
+++ b/mne/io/besa/tests/test_besa.py
@@ -1,6 +1,7 @@
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
"""Test reading BESA fileformats."""
+
import inspect
from pathlib import Path
diff --git a/mne/io/brainvision/brainvision.py b/mne/io/brainvision/brainvision.py
index 5aabdbb626c..9a8531a22d1 100644
--- a/mne/io/brainvision/brainvision.py
+++ b/mne/io/brainvision/brainvision.py
@@ -447,7 +447,7 @@ def _aux_hdr_info(hdr_fname):
params, settings = settings.split("[Comment]")
else:
params, settings = settings, ""
- cfg = configparser.ConfigParser()
+ cfg = configparser.ConfigParser(interpolation=None)
with StringIO(params) as fid:
cfg.read_file(fid)
diff --git a/mne/io/brainvision/tests/test_brainvision.py b/mne/io/brainvision/tests/test_brainvision.py
index 166c3564fae..309e44e3cf8 100644
--- a/mne/io/brainvision/tests/test_brainvision.py
+++ b/mne/io/brainvision/tests/test_brainvision.py
@@ -613,8 +613,9 @@ def test_brainvision_vectorized_data():
def test_coodinates_extraction():
"""Test reading of [Coordinates] section if present."""
# vhdr 2 has a Coordinates section
- with _record_warnings(), pytest.warns(
- RuntimeWarning, match="coordinate information"
+ with (
+ _record_warnings(),
+ pytest.warns(RuntimeWarning, match="coordinate information"),
):
raw = read_raw_brainvision(vhdr_v2_path)
diff --git a/mne/io/cnt/cnt.py b/mne/io/cnt/cnt.py
index 03d5f2a8030..b91d648e52b 100644
--- a/mne/io/cnt/cnt.py
+++ b/mne/io/cnt/cnt.py
@@ -476,7 +476,8 @@ def _get_cnt_info(
# Header has a field for number of samples, but it does not seem to be
# too reliable. That's why we have option for setting n_bytes manually.
fid.seek(864)
- n_samples = np.fromfile(fid, dtype=" n_samples:
+ n_bytes = 4
+ n_samples = n_samples_header
+ warn(
+ "Annotations are outside data range. "
+ "Changing data format to 'int32'."
+ )
else:
n_bytes = data_size // (n_samples * n_channels)
else:
n_bytes = 2 if data_format == "int16" else 4
n_samples = data_size // (n_bytes * n_channels)
+ # See PR #12393
+ if n_samples_header != 0:
+ n_samples = n_samples_header
# Channel offset refers to the size of blocks per channel in the file.
cnt_info["channel_offset"] = np.fromfile(fid, dtype=" 1:
@@ -730,6 +743,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
channel_offset = self._raw_extras[fi]["channel_offset"]
baselines = self._raw_extras[fi]["baselines"]
n_bytes = self._raw_extras[fi]["n_bytes"]
+ n_samples = self._raw_extras[fi]["n_samples"]
dtype = "`__.
+ Currently, only reading of the ``.ncs files`` is supported.
+
+ ``raw.info["meas_date"]`` is read from the ``recording_opened`` property
+ of the first ``.ncs`` file (i.e. channel) in the dataset (a warning is issued
+ if files have different dates of acquisition).
+
+ Channel-specific high and lowpass frequencies of online filters are determined
+ based on the ``DspLowCutFrequency`` and ``DspHighCutFrequency`` header fields,
+ respectively. If no filters were used for a channel, the default lowpass is set
+ to the Nyquist frequency and the default highpass is set to 0.
+ If channels have different high/low cutoffs, ``raw.info["highpass"]`` and
+ ``raw.info["lowpass"]`` are then set to the maximum highpass and minimumlowpass
+ values across channels, respectively.
+
+ Other header variables can be inspected using Neo directly. For example::
+
+ from neo.io import NeuralynxIO # doctest: +SKIP
+ fname = 'path/to/your/data' # doctest: +SKIP
+ nlx_reader = NeuralynxIO(dirname=fname) # doctest: +SKIP
+ print(nlx_reader.header) # doctest: +SKIP
+ print(nlx_reader.file_headers.items()) # doctest: +SKIP
"""
return RawNeuralynx(
fname,
@@ -148,6 +128,61 @@ def __init__(
sfreq=nlx_reader.get_signal_sampling_rate(),
)
+ ncs_fnames = nlx_reader.ncs_filenames.values()
+ ncs_hdrs = [
+ hdr
+ for hdr_key, hdr in nlx_reader.file_headers.items()
+ if hdr_key in ncs_fnames
+ ]
+
+ # if all files have the same recording_opened date, write it to info
+ meas_dates = np.array([hdr["recording_opened"] for hdr in ncs_hdrs])
+ # to be sure, only write if all dates are the same
+ meas_diff = []
+ for md in meas_dates:
+ meas_diff.append((md - meas_dates[0]).total_seconds())
+
+ # tolerate a +/-1 second meas_date difference (arbitrary threshold)
+ # else issue a warning
+ warn_meas = (np.abs(meas_diff) > 1.0).any()
+ if warn_meas:
+ logger.warning(
+ "Not all .ncs files have the same recording_opened date. "
+ + "Writing meas_date based on the first .ncs file."
+ )
+
+ # Neuarlynx allows channel specific low/highpass filters
+ # if not enabled, assume default lowpass = nyquist, highpass = 0
+ default_lowpass = info["sfreq"] / 2 # nyquist
+ default_highpass = 0
+
+ has_hp = [hdr["DSPLowCutFilterEnabled"] for hdr in ncs_hdrs]
+ has_lp = [hdr["DSPHighCutFilterEnabled"] for hdr in ncs_hdrs]
+ if not all(has_hp) or not all(has_lp):
+ logger.warning(
+ "Not all .ncs files have the same high/lowpass filter settings. "
+ + "Assuming default highpass = 0, lowpass = nyquist."
+ )
+
+ highpass_freqs = [
+ float(hdr["DspLowCutFrequency"])
+ if hdr["DSPLowCutFilterEnabled"]
+ else default_highpass
+ for hdr in ncs_hdrs
+ ]
+
+ lowpass_freqs = [
+ float(hdr["DspHighCutFrequency"])
+ if hdr["DSPHighCutFilterEnabled"]
+ else default_lowpass
+ for hdr in ncs_hdrs
+ ]
+
+ with info._unlock():
+ info["meas_date"] = meas_dates[0].astimezone(datetime.timezone.utc)
+ info["highpass"] = np.max(highpass_freqs)
+ info["lowpass"] = np.min(lowpass_freqs)
+
# Neo reads only valid contiguous .ncs samples grouped as segments
n_segments = nlx_reader.header["nb_segment"][0]
block_id = 0 # assumes there's only one block of recording
@@ -258,8 +293,9 @@ def __init__(
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
- from neo import Segment
+ from neo import AnalogSignal, Segment
from neo.io import NeuralynxIO
+ from neo.io.proxyobjects import AnalogSignalProxy
# quantities is a dependency of neo so we are guaranteed it exists
from quantities import Hz
@@ -311,9 +347,9 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
sel_samples_local[0:-1, 1] = (
sel_samples_global[0:-1, 1] - sel_samples_global[0:-1, 0]
)
- sel_samples_local[
- 1::, 0
- ] = 0 # now set the start sample for all segments after the first to 0
+ sel_samples_local[1::, 0] = (
+ 0 # now set the start sample for all segments after the first to 0
+ )
sel_samples_local[0, 0] = (
start - sel_samples_global[0, 0]
@@ -338,7 +374,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
)
for seg, n in zip(gap_segments, gap_samples):
- asig = AnalogSignalGap(
+ asig = AnalogSignal(
signal=np.zeros((n, n_chans)), units="uV", sampling_rate=sfreq * Hz
)
seg.analogsignals.append(asig)
@@ -351,13 +387,16 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
segments_arr[~isgap] = neo_block[0].segments
segments_arr[isgap] = gap_segments
- # now load data from selected segments/channels via
- # neo.Segment.AnalogSignal.load() or AnalogSignalGap.load()
+ # now load data for selected segments/channels via
+ # neo.Segment.AnalogSignalProxy.load() or
+ # pad directly as AnalogSignal.magnitude for any gap data
all_data = np.concatenate(
[
signal.load(channel_indexes=idx).magnitude[
samples[0] : samples[-1] + 1, :
]
+ if isinstance(signal, AnalogSignalProxy)
+ else signal.magnitude[samples[0] : samples[-1] + 1, :]
for seg, samples in zip(
segments_arr[first_seg : last_seg + 1], sel_samples_local
)
diff --git a/mne/io/neuralynx/tests/test_neuralynx.py b/mne/io/neuralynx/tests/test_neuralynx.py
index 14e030df23c..ceebdd3c975 100644
--- a/mne/io/neuralynx/tests/test_neuralynx.py
+++ b/mne/io/neuralynx/tests/test_neuralynx.py
@@ -2,6 +2,7 @@
# Copyright the MNE-Python contributors.
import os
from ast import literal_eval
+from datetime import datetime, timezone
import numpy as np
import pytest
@@ -103,7 +104,12 @@ def _read_nlx_mat_chan_keep_gaps(matfile: str) -> np.ndarray:
return x
+# set known values for the Neuralynx data for testing
expected_chan_names = ["LAHC1", "LAHC2", "LAHC3", "xAIR1", "xEKG1"]
+expected_hp_freq = 0.1
+expected_lp_freq = 500.0
+expected_sfreq = 2000.0
+expected_meas_date = datetime.strptime("2023/11/02 13:39:27", "%Y/%m/%d %H:%M:%S")
@requires_testing_data
@@ -125,6 +131,14 @@ def test_neuralynx():
exclude_fname_patterns=fname_patterns,
)
+ # test that we picked the right info from headers
+ assert raw.info["highpass"] == expected_hp_freq, "highpass freq not set correctly"
+ assert raw.info["lowpass"] == expected_lp_freq, "lowpass freq not set correctly"
+ assert raw.info["sfreq"] == expected_sfreq, "sampling freq not set correctly"
+
+ meas_date_utc = expected_meas_date.astimezone(timezone.utc)
+ assert raw.info["meas_date"] == meas_date_utc, "meas_date not set correctly"
+
# test that channel selection worked
assert (
raw.ch_names == expected_chan_names
diff --git a/mne/io/snirf/_snirf.py b/mne/io/snirf/_snirf.py
index a7d081983af..0974394a700 100644
--- a/mne/io/snirf/_snirf.py
+++ b/mne/io/snirf/_snirf.py
@@ -168,7 +168,7 @@ def natural_keys(text):
for c in channels
]
)
- sources = [f"S{int(s)}" for s in sources]
+ sources = {int(s): f"S{int(s)}" for s in sources}
if "detectorLabels_disabled" in dat["nirs/probe"]:
# This is disabled as
@@ -185,7 +185,7 @@ def natural_keys(text):
for c in channels
]
)
- detectors = [f"D{int(d)}" for d in detectors]
+ detectors = {int(d): f"D{int(d)}" for d in detectors}
# Extract source and detector locations
# 3D positions are optional in SNIRF,
@@ -224,9 +224,6 @@ def natural_keys(text):
"location information"
)
- assert len(sources) == srcPos3D.shape[0]
- assert len(detectors) == detPos3D.shape[0]
-
chnames = []
ch_types = []
for chan in channels:
@@ -248,9 +245,9 @@ def natural_keys(text):
)[0]
)
ch_name = (
- sources[src_idx - 1]
+ sources[src_idx]
+ "_"
- + detectors[det_idx - 1]
+ + detectors[det_idx]
+ " "
+ str(fnirs_wavelengths[wve_idx - 1])
)
@@ -265,7 +262,7 @@ def natural_keys(text):
# Convert between SNIRF processed names and MNE type names
dt_id = dt_id.lower().replace("dod", "fnirs_od")
- ch_name = sources[src_idx - 1] + "_" + detectors[det_idx - 1]
+ ch_name = sources[src_idx] + "_" + detectors[det_idx]
if dt_id == "fnirs_od":
wve_idx = int(
diff --git a/mne/io/tests/test_apply_function.py b/mne/io/tests/test_apply_function.py
index b1869e1dae6..f250e9489b9 100644
--- a/mne/io/tests/test_apply_function.py
+++ b/mne/io/tests/test_apply_function.py
@@ -63,3 +63,32 @@ def test_apply_function_verbose():
assert out is raw
raw.apply_function(printer, verbose=True)
assert sio.getvalue().count("\n") == n_chan
+
+
+def test_apply_function_ch_access():
+ """Test apply_function is able to access channel idx."""
+
+ def _bad_ch_idx(x, ch_idx):
+ assert x[0] == ch_idx
+ return x
+
+ def _bad_ch_name(x, ch_name):
+ assert isinstance(ch_name, str)
+ assert x[0] == float(ch_name)
+ return x
+
+ data = np.full((2, 10), np.arange(2).reshape(-1, 1))
+ raw = RawArray(data, create_info(2, 1.0, "mag"))
+
+ # test ch_idx access in both code paths (parallel / 1 job)
+ raw.apply_function(_bad_ch_idx)
+ raw.apply_function(_bad_ch_idx, n_jobs=2)
+ raw.apply_function(_bad_ch_name)
+ raw.apply_function(_bad_ch_name, n_jobs=2)
+
+ # test input catches
+ with pytest.raises(
+ ValueError,
+ match="cannot access.*when channel_wise=False",
+ ):
+ raw.apply_function(_bad_ch_idx, channel_wise=False)
diff --git a/mne/minimum_norm/resolution_matrix.py b/mne/minimum_norm/resolution_matrix.py
index 3dd24ac6847..655ca991914 100644
--- a/mne/minimum_norm/resolution_matrix.py
+++ b/mne/minimum_norm/resolution_matrix.py
@@ -1,4 +1,5 @@
"""Compute resolution matrix for linear estimators."""
+
# Authors: olaf.hauk@mrc-cbu.cam.ac.uk
#
# License: BSD-3-Clause
diff --git a/mne/minimum_norm/spatial_resolution.py b/mne/minimum_norm/spatial_resolution.py
index d68be423494..c9d28aef4d8 100644
--- a/mne/minimum_norm/spatial_resolution.py
+++ b/mne/minimum_norm/spatial_resolution.py
@@ -7,6 +7,7 @@
Resolution metrics: localisation error, spatial extent, relative amplitude.
Metrics can be computed for point-spread and cross-talk functions (PSFs/CTFs).
"""
+
import numpy as np
from ..source_estimate import SourceEstimate
diff --git a/mne/preprocessing/__init__.pyi b/mne/preprocessing/__init__.pyi
index 0ea66345687..54f1c825c13 100644
--- a/mne/preprocessing/__init__.pyi
+++ b/mne/preprocessing/__init__.pyi
@@ -21,6 +21,7 @@ __all__ = [
"create_eog_epochs",
"equalize_bads",
"eyetracking",
+ "find_bad_channels_lof",
"find_bad_channels_maxwell",
"find_ecg_events",
"find_eog_events",
@@ -54,6 +55,7 @@ from ._fine_cal import (
read_fine_calibration,
write_fine_calibration,
)
+from ._lof import find_bad_channels_lof
from ._peak_finder import peak_finder
from ._regress import EOGRegression, read_eog_regression, regress_artifact
from .artifact_detection import (
diff --git a/mne/preprocessing/_lof.py b/mne/preprocessing/_lof.py
new file mode 100644
index 00000000000..6d777599a8a
--- /dev/null
+++ b/mne/preprocessing/_lof.py
@@ -0,0 +1,99 @@
+"""Bad channel detection using Local Outlier Factor (LOF)."""
+
+# Authors: Velu Prabhakar Kumaravel
+#
+# License: BSD-3-Clause
+# Copyright the MNE-Python contributors.
+
+import numpy as np
+
+from .._fiff.pick import _picks_to_idx
+from ..io.base import BaseRaw
+from ..utils import _soft_import, _validate_type, logger, verbose
+
+
+@verbose
+def find_bad_channels_lof(
+ raw,
+ n_neighbors=20,
+ *,
+ picks=None,
+ metric="euclidean",
+ threshold=1.5,
+ return_scores=False,
+ verbose=None,
+):
+ """Find bad channels using Local Outlier Factor (LOF) algorithm.
+
+ Parameters
+ ----------
+ raw : instance of Raw
+ Raw data to process.
+ n_neighbors : int
+ Number of neighbors defining the local neighborhood (default is 20).
+ Smaller values will lead to higher LOF scores.
+ %(picks_good_data)s
+ metric : str
+ Metric to use for distance computation. Default is “euclidean”,
+ see :func:`sklearn.metrics.pairwise.distance_metrics` for details.
+ threshold : float
+ Threshold to define outliers. Theoretical threshold ranges anywhere
+ between 1.0 and any positive integer. Default: 1.5
+ It is recommended to consider this as an hyperparameter to optimize.
+ return_scores : bool
+ If ``True``, return a dictionary with LOF scores for each
+ evaluated channel. Default is ``False``.
+ %(verbose)s
+
+ Returns
+ -------
+ noisy_chs : list
+ List of bad M/EEG channels that were automatically detected.
+ scores : ndarray, shape (n_picks,)
+ Only returned when ``return_scores`` is ``True``. It contains the
+ LOF outlier score for each channel in ``picks``.
+
+ See Also
+ --------
+ maxwell_filter
+ annotate_amplitude
+
+ Notes
+ -----
+ See :footcite:`KumaravelEtAl2022` and :footcite:`BreunigEtAl2000` for background on
+ choosing ``threshold``.
+
+ .. versionadded:: 1.7
+
+ References
+ ----------
+ .. footbibliography::
+ """ # noqa: E501
+ _soft_import("sklearn", "using LOF detection", strict=True)
+ from sklearn.neighbors import LocalOutlierFactor
+
+ _validate_type(raw, BaseRaw, "raw")
+ # Get the channel types
+ channel_types = raw.get_channel_types()
+ picks = _picks_to_idx(raw.info, picks=picks, none="data", exclude="bads")
+ picked_ch_types = set(channel_types[p] for p in picks)
+
+ # Check if there are different channel types
+ if len(picked_ch_types) != 1:
+ raise ValueError(
+ f"Need exactly one channel type in picks, got {sorted(picked_ch_types)}"
+ )
+ ch_names = [raw.ch_names[pick] for pick in picks]
+ data = raw.get_data(picks=picks)
+ clf = LocalOutlierFactor(n_neighbors=n_neighbors, metric=metric)
+ clf.fit_predict(data)
+ scores_lof = clf.negative_outlier_factor_
+ bad_channel_indices = [
+ i for i, v in enumerate(np.abs(scores_lof)) if v >= threshold
+ ]
+ bads = [ch_names[idx] for idx in bad_channel_indices]
+ logger.info(f"LOF: Detected bad channel(s): {bads}")
+ if return_scores:
+ return bads, scores_lof
+ else:
+ return bads
diff --git a/mne/preprocessing/artifact_detection.py b/mne/preprocessing/artifact_detection.py
index d2bed58fd78..514eadb00a9 100644
--- a/mne/preprocessing/artifact_detection.py
+++ b/mne/preprocessing/artifact_detection.py
@@ -25,7 +25,15 @@
apply_trans,
quat_to_rot,
)
-from ..utils import _mask_to_onsets_offsets, _pl, _validate_type, logger, verbose
+from ..utils import (
+ _check_option,
+ _mask_to_onsets_offsets,
+ _pl,
+ _validate_type,
+ logger,
+ verbose,
+ warn,
+)
@verbose
@@ -94,16 +102,13 @@ def annotate_muscle_zscore(
ch_type = "eeg"
else:
raise ValueError(
- "No M/EEG channel types found, please specify a"
- " ch_type or provide M/EEG sensor data"
+ "No M/EEG channel types found, please specify a 'ch_type' or provide "
+ "M/EEG sensor data."
)
- logger.info("Using %s sensors for muscle artifact detection" % (ch_type))
-
- if ch_type in ("mag", "grad"):
- raw_copy.pick(ch_type)
+ logger.info("Using %s sensors for muscle artifact detection", ch_type)
else:
- ch_type = {"meg": False, ch_type: True}
- raw_copy.pick(**ch_type)
+ _check_option("ch_type", ch_type, ["mag", "grad", "eeg"])
+ raw_copy.pick(ch_type)
raw_copy.filter(
filter_freq[0],
@@ -289,7 +294,8 @@ def annotate_movement(
return annot, disp
-def compute_average_dev_head_t(raw, pos):
+@verbose
+def compute_average_dev_head_t(raw, pos, *, verbose=None):
"""Get new device to head transform based on good segments.
Segments starting with "BAD" annotations are not included for calculating
@@ -297,19 +303,59 @@ def compute_average_dev_head_t(raw, pos):
Parameters
----------
- raw : instance of Raw
- Data to compute head position.
- pos : array, shape (N, 10)
- The position and quaternion parameters from cHPI fitting.
+ raw : instance of Raw | list of Raw
+ Data to compute head position. Can be a list containing multiple raw
+ instances.
+ pos : array, shape (N, 10) | list of ndarray
+ The position and quaternion parameters from cHPI fitting. Can be
+ a list containing multiple position arrays, one per raw instance passed.
+ %(verbose)s
Returns
-------
dev_head_t : instance of Transform
New ``dev_head_t`` transformation using the averaged good head positions.
+
+ Notes
+ -----
+ .. versionchanged:: 1.7
+ Support for multiple raw instances and position arrays was added.
"""
+ # Get weighted head pos trans and rot
+ if not isinstance(raw, (list, tuple)):
+ raw = [raw]
+ if not isinstance(pos, (list, tuple)):
+ pos = [pos]
+ if len(pos) != len(raw):
+ raise ValueError(
+ f"Number of head positions ({len(pos)}) must match the number of raw "
+ f"instances ({len(raw)})"
+ )
+ hp = list()
+ dt = list()
+ for ri, (r, p) in enumerate(zip(raw, pos)):
+ _validate_type(r, BaseRaw, f"raw[{ri}]")
+ _validate_type(p, np.ndarray, f"pos[{ri}]")
+ hp_, dt_ = _raw_hp_weights(r, p)
+ hp.append(hp_)
+ dt.append(dt_)
+ hp = np.concatenate(hp, axis=0)
+ dt = np.concatenate(dt, axis=0)
+ dt /= dt.sum()
+ best_q = _average_quats(hp[:, 1:4], weights=dt)
+ trans = np.eye(4)
+ trans[:3, :3] = quat_to_rot(best_q)
+ trans[:3, 3] = dt @ hp[:, 4:7]
+ dist = np.linalg.norm(trans[:3, 3])
+ if dist > 1: # less than 1 meter is sane
+ warn(f"Implausible head position detected: {dist} meters from device origin")
+ dev_head_t = Transform("meg", "head", trans)
+ return dev_head_t
+
+
+def _raw_hp_weights(raw, pos):
sfreq = raw.info["sfreq"]
seg_good = np.ones(len(raw.times))
- trans_pos = np.zeros(3)
hp = pos.copy()
hp_ts = hp[:, 0] - raw._first_time
@@ -349,19 +395,7 @@ def compute_average_dev_head_t(raw, pos):
assert (dt >= 0).all()
dt = dt / sfreq
del seg_good, idx
-
- # Get weighted head pos trans and rot
- trans_pos += np.dot(dt, hp[:, 4:7])
-
- rot_qs = hp[:, 1:4]
- best_q = _average_quats(rot_qs, weights=dt)
-
- trans = np.eye(4)
- trans[:3, :3] = quat_to_rot(best_q)
- trans[:3, 3] = trans_pos / dt.sum()
- assert np.linalg.norm(trans[:3, 3]) < 1 # less than 1 meter is sane
- dev_head_t = Transform("meg", "head", trans)
- return dev_head_t
+ return hp, dt
def _annotations_from_mask(times, mask, annot_name, orig_time=None):
diff --git a/mne/preprocessing/eyetracking/__init__.py b/mne/preprocessing/eyetracking/__init__.py
index 01a30bf4436..efab0fb079d 100644
--- a/mne/preprocessing/eyetracking/__init__.py
+++ b/mne/preprocessing/eyetracking/__init__.py
@@ -5,6 +5,7 @@
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
-from .eyetracking import set_channel_types_eyetrack
+from .eyetracking import set_channel_types_eyetrack, convert_units
from .calibration import Calibration, read_eyelink_calibration
from ._pupillometry import interpolate_blinks
+from .utils import get_screen_visual_angle
diff --git a/mne/preprocessing/eyetracking/_pupillometry.py b/mne/preprocessing/eyetracking/_pupillometry.py
index 956c37cb114..8da124b2e1f 100644
--- a/mne/preprocessing/eyetracking/_pupillometry.py
+++ b/mne/preprocessing/eyetracking/_pupillometry.py
@@ -77,6 +77,7 @@ def _interpolate_blinks(raw, buffer, blink_annots, interpolate_gaze):
logger.info("Interpolating missing data during blinks...")
pre_buffer, post_buffer = buffer
# iterate over each eyetrack channel and interpolate the blinks
+ interpolated_chs = []
for ci, ch_info in enumerate(raw.info["chs"]):
if interpolate_gaze: # interpolate over all eyetrack channels
if ch_info["kind"] != FIFF.FIFFV_EYETRACK_CH:
@@ -107,3 +108,10 @@ def _interpolate_blinks(raw, buffer, blink_annots, interpolate_gaze):
)
# Replace the samples at the blink_indices with the interpolated values
raw._data[ci, blink_indices] = interpolated_samples
+ interpolated_chs.append(ch_info["ch_name"])
+ if interpolated_chs:
+ logger.info(
+ f"Interpolated {len(interpolated_chs)} channels: {interpolated_chs}"
+ )
+ else:
+ warn("No channels were interpolated.")
diff --git a/mne/preprocessing/eyetracking/eyetracking.py b/mne/preprocessing/eyetracking/eyetracking.py
index f6b1b0fd0d4..883cf1934c6 100644
--- a/mne/preprocessing/eyetracking/eyetracking.py
+++ b/mne/preprocessing/eyetracking/eyetracking.py
@@ -8,6 +8,12 @@
import numpy as np
from ..._fiff.constants import FIFF
+from ...epochs import BaseEpochs
+from ...evoked import Evoked
+from ...io import BaseRaw
+from ...utils import _check_option, _validate_type, logger, warn
+from .calibration import Calibration
+from .utils import _check_calibration
# specific function to set eyetrack channels
@@ -164,3 +170,162 @@ def _convert_mm_to_m(array):
def _convert_deg_to_rad(array):
return array * np.pi / 180.0
+
+
+def convert_units(inst, calibration, to="radians"):
+ """Convert Eyegaze data from pixels to radians of visual angle or vice versa.
+
+ .. warning::
+ Currently, depending on the units (pixels or radians), eyegaze channels may not
+ be reported correctly in visualization functions like :meth:`mne.io.Raw.plot`.
+ They will be shown correctly in :func:`mne.viz.eyetracking.plot_gaze`.
+ See :gh:`11879` for more information.
+
+ .. Important::
+ There are important considerations to keep in mind when using this function,
+ see the Notes section below.
+
+ Parameters
+ ----------
+ inst : instance of Raw, Epochs, or Evoked
+ The Raw, Epochs, or Evoked instance with eyegaze channels.
+ calibration : Calibration
+ Instance of Calibration, containing information about the screen size
+ (in meters), viewing distance (in meters), and the screen resolution
+ (in pixels).
+ to : str
+ Must be either ``"radians"`` or ``"pixels"``, indicating the desired unit.
+
+ Returns
+ -------
+ inst : instance of Raw | Epochs | Evoked
+ The Raw, Epochs, or Evoked instance, modified in place.
+
+ Notes
+ -----
+ There are at least two important considerations to keep in mind when using this
+ function:
+
+ 1. Converting between on-screen pixels and visual angle is not a linear
+ transformation. If the visual angle subtends less than approximately ``.44``
+ radians (``25`` degrees), the conversion could be considered to be approximately
+ linear. However, as the visual angle increases, the conversion becomes
+ increasingly non-linear. This may lead to unexpected results after converting
+ between pixels and visual angle.
+
+ * This function assumes that the head is fixed in place and aligned with the center
+ of the screen, such that gaze to the center of the screen results in a visual
+ angle of ``0`` radians.
+
+ .. versionadded:: 1.7
+ """
+ _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "inst")
+ _validate_type(calibration, Calibration, "calibration")
+ _check_option("to", to, ("radians", "pixels"))
+ _check_calibration(calibration)
+
+ # get screen parameters
+ screen_size = calibration["screen_size"]
+ screen_resolution = calibration["screen_resolution"]
+ dist = calibration["screen_distance"]
+
+ # loop through channels and convert units
+ converted_chs = []
+ for ch_dict in inst.info["chs"]:
+ if ch_dict["coil_type"] != FIFF.FIFFV_COIL_EYETRACK_POS:
+ continue
+ unit = ch_dict["unit"]
+ name = ch_dict["ch_name"]
+
+ if ch_dict["loc"][4] == -1: # x-coordinate
+ size = screen_size[0]
+ res = screen_resolution[0]
+ elif ch_dict["loc"][4] == 1: # y-coordinate
+ size = screen_size[1]
+ res = screen_resolution[1]
+ else:
+ raise ValueError(
+ f"loc array not set properly for channel '{name}'. Index 4 should"
+ f" be -1 or 1, but got {ch_dict['loc'][4]}"
+ )
+ # check unit, convert, and set new unit
+ if to == "radians":
+ if unit != FIFF.FIFF_UNIT_PX:
+ raise ValueError(
+ f"Data must be in pixels in order to convert to radians."
+ f" Got {unit} for {name}"
+ )
+ inst.apply_function(_pix_to_rad, picks=name, size=size, res=res, dist=dist)
+ ch_dict["unit"] = FIFF.FIFF_UNIT_RAD
+ elif to == "pixels":
+ if unit != FIFF.FIFF_UNIT_RAD:
+ raise ValueError(
+ f"Data must be in radians in order to convert to pixels."
+ f" Got {unit} for {name}"
+ )
+ inst.apply_function(_rad_to_pix, picks=name, size=size, res=res, dist=dist)
+ ch_dict["unit"] = FIFF.FIFF_UNIT_PX
+ converted_chs.append(name)
+ if converted_chs:
+ logger.info(f"Converted {converted_chs} to {to}.")
+ if to == "radians":
+ # check if any values are greaater than .44 radians
+ # (25 degrees) and warn user
+ data = inst.get_data(picks=converted_chs)
+ if np.any(np.abs(data) > 0.52):
+ warn(
+ "Some visual angle values subtend greater than .52 radians "
+ "(30 degrees), meaning that the conversion between pixels "
+ "and visual angle may be very non-linear. Take caution when "
+ "interpreting these values. Max visual angle value in data:"
+ f" {np.nanmax(data):0.2f} radians.",
+ UserWarning,
+ )
+ else:
+ warn("Could not find any eyegaze channels. Doing nothing.", UserWarning)
+ return inst
+
+
+def _pix_to_rad(data, size, res, dist):
+ """Convert pixel coordinates to radians of visual angle.
+
+ Parameters
+ ----------
+ data : array-like, shape (n_samples,)
+ A vector of pixel coordinates.
+ size : float
+ The width or height of the screen, in meters.
+ res : int
+ The screen resolution in pixels, along the x or y axis.
+ dist : float
+ The viewing distance from the screen, in meters.
+
+ Returns
+ -------
+ rad : ndarray, shape (n_samples)
+ the data in radians.
+ """
+ # Center the data so that 0 radians will be the center of the screen
+ data -= res / 2
+ # How many meters is the pixel width or height
+ px_size = size / res
+ # Convert to radians
+ return np.arctan((data * px_size) / dist)
+
+
+def _rad_to_pix(data, size, res, dist):
+ """Convert radians of visual angle to pixel coordinates.
+
+ See the parameters section of _pix_to_rad for more information.
+
+ Returns
+ -------
+ pix : ndarray, shape (n_samples)
+ the data in pixels.
+ """
+ # How many meters is the pixel width or height
+ px_size = size / res
+ # 1. calculate length of opposite side of triangle (in meters)
+ # 2. convert meters to pixel coordinates
+ # 3. add half of screen resolution to uncenter the pixel data (0,0 is top left)
+ return np.tan(data) * dist / px_size + res / 2
diff --git a/mne/preprocessing/eyetracking/tests/test_eyetracking.py b/mne/preprocessing/eyetracking/tests/test_eyetracking.py
new file mode 100644
index 00000000000..8bea006d9fd
--- /dev/null
+++ b/mne/preprocessing/eyetracking/tests/test_eyetracking.py
@@ -0,0 +1,78 @@
+import numpy as np
+import pytest
+from numpy.testing import assert_allclose
+
+import mne
+from mne._fiff.constants import FIFF
+from mne.utils import _record_warnings
+
+
+def test_set_channel_types_eyetrack(eyetrack_raw):
+ """Test that set_channel_types_eyetrack worked on the fixture."""
+ assert eyetrack_raw.info["chs"][0]["kind"] == FIFF.FIFFV_EYETRACK_CH
+ assert eyetrack_raw.info["chs"][1]["coil_type"] == FIFF.FIFFV_COIL_EYETRACK_POS
+ assert eyetrack_raw.info["chs"][0]["unit"] == FIFF.FIFF_UNIT_PX
+ assert eyetrack_raw.info["chs"][2]["unit"] == FIFF.FIFF_UNIT_NONE
+
+
+def test_convert_units(eyetrack_raw, eyetrack_cal):
+ """Test unit conversion."""
+ raw, cal = eyetrack_raw, eyetrack_cal # shorter names
+
+ # roundtrip conversion should be identical to original data
+ data_orig = raw.get_data(picks=[0]) # take the first x-coord channel
+ mne.preprocessing.eyetracking.convert_units(raw, cal, "radians")
+ assert raw.info["chs"][0]["unit"] == FIFF.FIFF_UNIT_RAD
+ # Gaze was to center of screen, so x-coord and y-coord should now be 0 radians
+ assert_allclose(raw.get_data(picks=[0, 1]), 0)
+
+ # Should raise an error if we try to convert to radians again
+ with pytest.raises(ValueError, match="Data must be in"):
+ mne.preprocessing.eyetracking.convert_units(raw, cal, "radians")
+
+ # Convert back to pixels
+ mne.preprocessing.eyetracking.convert_units(raw, cal, "pixels")
+ assert raw.info["chs"][1]["unit"] == FIFF.FIFF_UNIT_PX
+ data_new = raw.get_data(picks=[0])
+ assert_allclose(data_orig, data_new)
+
+ # Should raise an error if we try to convert to pixels again
+ with pytest.raises(ValueError, match="Data must be in"):
+ mne.preprocessing.eyetracking.convert_units(raw, cal, "pixels")
+
+ # Finally, check that we raise other errors or warnings when we should
+ # warn if no eyegaze channels found
+ raw_misc = raw.copy()
+ with _record_warnings(): # channel units change warning
+ raw_misc.set_channel_types({ch: "misc" for ch in raw_misc.ch_names})
+ with pytest.warns(UserWarning, match="Could not"):
+ mne.preprocessing.eyetracking.convert_units(raw_misc, cal, "radians")
+
+ # raise an error if the calibration is missing a key
+ bad_cal = cal.copy()
+ bad_cal.pop("screen_size")
+ bad_cal["screen_distance"] = None
+ with pytest.raises(KeyError, match="Calibration object must have the following"):
+ mne.preprocessing.eyetracking.convert_units(raw, bad_cal, "radians")
+
+ # warn if visual angle is too large
+ cal_tmp = cal.copy()
+ cal_tmp["screen_distance"] = 0.1
+ raw_tmp = raw.copy()
+ raw_tmp._data[0, :10] = 1900 # gaze to extremity of screen
+ with pytest.warns(UserWarning, match="Some visual angle values"):
+ mne.preprocessing.eyetracking.convert_units(raw_tmp, cal_tmp, "radians")
+
+ # raise an error if channel locations not set
+ raw_missing = raw.copy()
+ raw_missing.info["chs"][0]["loc"] = np.zeros(12)
+ with pytest.raises(ValueError, match="loc array not set"):
+ mne.preprocessing.eyetracking.convert_units(raw_missing, cal, "radians")
+
+
+def test_get_screen_visual_angle(eyetrack_cal):
+ """Test calculating the radians of visual angle for a screen."""
+ # Our toy calibration should subtend .56 x .32 radians i.e 31.5 x 18.26 degrees
+ viz_angle = mne.preprocessing.eyetracking.get_screen_visual_angle(eyetrack_cal)
+ assert viz_angle.shape == (2,)
+ np.testing.assert_allclose(np.round(viz_angle, 2), (0.56, 0.32))
diff --git a/mne/preprocessing/eyetracking/utils.py b/mne/preprocessing/eyetracking/utils.py
new file mode 100644
index 00000000000..89c379c9760
--- /dev/null
+++ b/mne/preprocessing/eyetracking/utils.py
@@ -0,0 +1,41 @@
+import numpy as np
+
+from ...utils import _validate_type
+from .calibration import Calibration
+
+
+def _check_calibration(
+ calibration, want_keys=("screen_size", "screen_resolution", "screen_distance")
+):
+ missing_keys = []
+ for key in want_keys:
+ if calibration.get(key, None) is None:
+ missing_keys.append(key)
+
+ if missing_keys:
+ raise KeyError(
+ "Calibration object must have the following keys with valid values:"
+ f" {', '.join(missing_keys)}"
+ )
+ else:
+ return True
+
+
+def get_screen_visual_angle(calibration):
+ """Calculate the radians of visual angle that the participant screen subtends.
+
+ Parameters
+ ----------
+ calibration : Calibration
+ An instance of Calibration. Must have valid values for ``"screen_size"`` and
+ ``"screen_distance"`` keys.
+
+ Returns
+ -------
+ visual angle in radians : ndarray, shape (2,)
+ The visual angle of the monitor width and height, respectively.
+ """
+ _validate_type(calibration, Calibration, "calibration")
+ _check_calibration(calibration, want_keys=("screen_size", "screen_distance"))
+ size = np.array(calibration["screen_size"])
+ return 2 * np.arctan(size / (2 * calibration["screen_distance"]))
diff --git a/mne/preprocessing/nirs/_beer_lambert_law.py b/mne/preprocessing/nirs/_beer_lambert_law.py
index f6f17a1ae04..9a39a342e50 100644
--- a/mne/preprocessing/nirs/_beer_lambert_law.py
+++ b/mne/preprocessing/nirs/_beer_lambert_law.py
@@ -25,8 +25,11 @@ def beer_lambert_law(raw, ppf=6.0):
----------
raw : instance of Raw
The optical density data.
- ppf : float
- The partial pathlength factor.
+ ppf : tuple | float
+ The partial pathlength factors for each wavelength.
+
+ .. versionchanged:: 1.7
+ Support for different factors for the two wavelengths.
Returns
-------
@@ -35,8 +38,15 @@ def beer_lambert_law(raw, ppf=6.0):
"""
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, "raw")
- _validate_type(ppf, "numeric", "ppf")
- ppf = float(ppf)
+ _validate_type(ppf, ("numeric", "array-like"), "ppf")
+ ppf = np.array(ppf, float)
+ if ppf.ndim == 0: # upcast single float to shape (2,)
+ ppf = np.array([ppf, ppf])
+ if ppf.shape != (2,):
+ raise ValueError(
+ f"ppf must be float or array-like of shape (2,), got shape {ppf.shape}"
+ )
+ ppf = ppf[:, np.newaxis] # shape (2, 1)
picks = _validate_nirs_info(raw.info, fnirs="od", which="Beer-lambert")
# This is the one place we *really* need the actual/accurate frequencies
freqs = np.array([raw.info["chs"][pick]["loc"][9] for pick in picks], float)
diff --git a/mne/preprocessing/nirs/tests/test_beer_lambert_law.py b/mne/preprocessing/nirs/tests/test_beer_lambert_law.py
index 29dd6b3bd4d..da5341b17d5 100644
--- a/mne/preprocessing/nirs/tests/test_beer_lambert_law.py
+++ b/mne/preprocessing/nirs/tests/test_beer_lambert_law.py
@@ -78,7 +78,7 @@ def test_beer_lambert_v_matlab():
pymatreader = pytest.importorskip("pymatreader")
raw = read_raw_nirx(fname_nirx_15_0)
raw = optical_density(raw)
- raw = beer_lambert_law(raw, ppf=0.121)
+ raw = beer_lambert_law(raw, ppf=(0.121, 0.121))
raw._data *= 1e6 # Scale to uM for comparison to MATLAB
matlab_fname = (
diff --git a/mne/preprocessing/tests/test_artifact_detection.py b/mne/preprocessing/tests/test_artifact_detection.py
index af01fa4416d..6aa386d0b05 100644
--- a/mne/preprocessing/tests/test_artifact_detection.py
+++ b/mne/preprocessing/tests/test_artifact_detection.py
@@ -18,6 +18,7 @@
compute_average_dev_head_t,
)
from mne.tests.test_annotations import _assert_annotations_equal
+from mne.transforms import _angle_dist_between_rigid, quat_to_rot, rot_to_quat
data_path = testing.data_path(download=False)
sss_path = data_path / "SSS"
@@ -35,6 +36,7 @@ def test_movement_annotation_head_correction(meas_date):
raw.set_meas_date(None)
else:
assert meas_date == "orig"
+ raw_unannot = raw.copy()
# Check 5 rotation segments are detected
annot_rot, [] = annotate_movement(raw, pos, rotation_velocity_limit=5)
@@ -67,7 +69,7 @@ def test_movement_annotation_head_correction(meas_date):
_assert_annotations_equal(annot_all_2, annot_all)
assert annot_all.orig_time == raw.info["meas_date"]
raw.set_annotations(annot_all)
- dev_head_t = compute_average_dev_head_t(raw, pos)
+ dev_head_t = compute_average_dev_head_t(raw, pos)["trans"]
dev_head_t_ori = np.array(
[
@@ -78,13 +80,83 @@ def test_movement_annotation_head_correction(meas_date):
]
)
- assert_allclose(dev_head_t_ori, dev_head_t["trans"], rtol=1e-5, atol=0)
+ assert_allclose(dev_head_t_ori, dev_head_t, rtol=1e-5, atol=0)
+
+ with pytest.raises(ValueError, match="Number of .* must match .*"):
+ compute_average_dev_head_t([raw], [pos] * 2)
+ # Using two identical ones should be identical ...
+ dev_head_t_double = compute_average_dev_head_t([raw] * 2, [pos] * 2)["trans"]
+ assert_allclose(dev_head_t, dev_head_t_double)
+ # ... unannotated and annotated versions differ ...
+ dev_head_t_unannot = compute_average_dev_head_t(raw_unannot, pos)["trans"]
+ rot_tol = 1.5e-3
+ mov_tol = 1e-3
+ assert not np.allclose(
+ dev_head_t_unannot[:3, :3],
+ dev_head_t[:3, :3],
+ atol=rot_tol,
+ rtol=0,
+ )
+ assert not np.allclose(
+ dev_head_t_unannot[:3, 3],
+ dev_head_t[:3, 3],
+ atol=mov_tol,
+ rtol=0,
+ )
+ # ... and Averaging the two is close to (but not identical!) to operating on the two
+ # files. Note they shouldn't be identical because there are more time points
+ # included in the unannotated version!
+ dev_head_t_naive = np.eye(4)
+ dev_head_t_naive[:3, :3] = quat_to_rot(
+ np.mean(
+ rot_to_quat(np.array([dev_head_t[:3, :3], dev_head_t_unannot[:3, :3]])),
+ axis=0,
+ )
+ )
+ dev_head_t_naive[:3, 3] = np.mean(
+ [dev_head_t[:3, 3], dev_head_t_unannot[:3, 3]], axis=0
+ )
+ dev_head_t_combo = compute_average_dev_head_t([raw, raw_unannot], [pos] * 2)[
+ "trans"
+ ]
+ unit_kw = dict(distance_units="mm", angle_units="deg")
+ deg_annot_combo, mm_annot_combo = _angle_dist_between_rigid(
+ dev_head_t,
+ dev_head_t_combo,
+ **unit_kw,
+ )
+ deg_unannot_combo, mm_unannot_combo = _angle_dist_between_rigid(
+ dev_head_t_unannot,
+ dev_head_t_combo,
+ **unit_kw,
+ )
+ deg_annot_unannot, mm_annot_unannot = _angle_dist_between_rigid(
+ dev_head_t,
+ dev_head_t_unannot,
+ **unit_kw,
+ )
+ deg_combo_naive, mm_combo_naive = _angle_dist_between_rigid(
+ dev_head_t_combo,
+ dev_head_t_naive,
+ **unit_kw,
+ )
+ # combo<->naive closer than combo<->annotated closer than annotated<->unannotated
+ assert 0.05 < deg_combo_naive < deg_annot_combo < deg_annot_unannot < 1.5
+ assert 0.1 < mm_combo_naive < mm_annot_combo < mm_annot_unannot < 2
+ # combo<->naive closer than combo<->unannotated closer than annotated<->unannotated
+ assert 0.05 < deg_combo_naive < deg_unannot_combo < deg_annot_unannot < 1.5
+ assert 0.12 < mm_combo_naive < mm_unannot_combo < mm_annot_unannot < 2.0
# Smoke test skipping time due to previous annotations.
raw.set_annotations(Annotations([raw.times[0]], 0.1, "bad"))
annot_dis, _ = annotate_movement(raw, pos, mean_distance_limit=0.02)
assert annot_dis.duration.size == 1
+ # really far should warn
+ pos[:, 4] += 5
+ with pytest.warns(RuntimeWarning, match="Implausible head position"):
+ compute_average_dev_head_t(raw, pos)
+
@testing.requires_testing_data
@pytest.mark.parametrize("meas_date", (None, "orig"))
diff --git a/mne/preprocessing/tests/test_fine_cal.py b/mne/preprocessing/tests/test_fine_cal.py
index 95c9e7d63ba..2b3d4df0e3f 100644
--- a/mne/preprocessing/tests/test_fine_cal.py
+++ b/mne/preprocessing/tests/test_fine_cal.py
@@ -18,7 +18,7 @@
write_fine_calibration,
)
from mne.preprocessing.tests.test_maxwell import _assert_shielding
-from mne.transforms import _angle_between_quats, rot_to_quat
+from mne.transforms import _angle_dist_between_rigid
from mne.utils import object_diff
# Define fine calibration filepaths
@@ -75,16 +75,17 @@ def test_compute_fine_cal():
orig_trans = _loc_to_coil_trans(orig_locs)
want_trans = _loc_to_coil_trans(want_locs)
got_trans = _loc_to_coil_trans(got_locs)
- dist = np.linalg.norm(got_trans[:, :3, 3] - want_trans[:, :3, 3], axis=1)
- assert_allclose(dist, 0.0, atol=1e-6)
- dist = np.linalg.norm(got_trans[:, :3, 3] - orig_trans[:, :3, 3], axis=1)
- assert_allclose(dist, 0.0, atol=1e-6)
- orig_quat = rot_to_quat(orig_trans[:, :3, :3])
- want_quat = rot_to_quat(want_trans[:, :3, :3])
- got_quat = rot_to_quat(got_trans[:, :3, :3])
- want_orig_angles = np.rad2deg(_angle_between_quats(want_quat, orig_quat))
- got_want_angles = np.rad2deg(_angle_between_quats(got_quat, want_quat))
- got_orig_angles = np.rad2deg(_angle_between_quats(got_quat, orig_quat))
+ want_orig_angles, want_orig_dist = _angle_dist_between_rigid(
+ want_trans, orig_trans, angle_units="deg"
+ )
+ got_want_angles, got_want_dist = _angle_dist_between_rigid(
+ got_trans, want_trans, angle_units="deg"
+ )
+ got_orig_angles, got_orig_dist = _angle_dist_between_rigid(
+ got_trans, orig_trans, angle_units="deg"
+ )
+ assert_allclose(got_want_dist, 0.0, atol=1e-6)
+ assert_allclose(got_orig_dist, 0.0, atol=1e-6)
for key in ("mag", "grad"):
# imb_cals value
p = pick_types(raw.info, meg=key, exclude=())
diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py
index 67aabf14b12..299b1e961b3 100644
--- a/mne/preprocessing/tests/test_ica.py
+++ b/mne/preprocessing/tests/test_ica.py
@@ -173,8 +173,9 @@ def test_ica_simple(method):
info = create_info(data.shape[-2], 1000.0, "eeg")
cov = make_ad_hoc_cov(info)
ica = ICA(n_components=n_components, method=method, random_state=0, noise_cov=cov)
- with pytest.warns(RuntimeWarning, match="high-pass filtered"), pytest.warns(
- RuntimeWarning, match="No average EEG.*"
+ with (
+ pytest.warns(RuntimeWarning, match="high-pass filtered"),
+ pytest.warns(RuntimeWarning, match="No average EEG.*"),
):
ica.fit(RawArray(data, info))
transform = ica.unmixing_matrix_ @ ica.pca_components_ @ A
@@ -1259,8 +1260,9 @@ def test_fit_params_epochs_vs_raw(param_name, param_val, tmp_path):
ica = ICA(n_components=n_components, max_iter=max_iter, method=method)
fit_params = {param_name: param_val}
- with _record_warnings(), pytest.warns(
- RuntimeWarning, match="parameters.*will be ignored"
+ with (
+ _record_warnings(),
+ pytest.warns(RuntimeWarning, match="parameters.*will be ignored"),
):
ica.fit(inst=epochs, **fit_params)
assert ica.reject_ == reject
diff --git a/mne/preprocessing/tests/test_lof.py b/mne/preprocessing/tests/test_lof.py
new file mode 100644
index 00000000000..858fa0e4432
--- /dev/null
+++ b/mne/preprocessing/tests/test_lof.py
@@ -0,0 +1,39 @@
+# Authors: Velu Prabhakar Kumaravel
+#
+# License: BSD-3-Clause
+# Copyright the MNE-Python contributors.
+
+from pathlib import Path
+
+import pytest
+
+from mne.io import read_raw_fif
+from mne.preprocessing import find_bad_channels_lof
+
+base_dir = Path(__file__).parent.parent.parent / "io" / "tests" / "data"
+raw_fname = base_dir / "test_raw.fif"
+
+
+@pytest.mark.parametrize(
+ "n_neighbors, ch_type, n_ch, n_bad",
+ [
+ (8, "eeg", 60, 8),
+ (10, "grad", 204, 2),
+ (20, "mag", 102, 0),
+ (30, "grad", 204, 2),
+ ],
+)
+def test_lof(n_neighbors, ch_type, n_ch, n_bad):
+ """Test LOF detection."""
+ pytest.importorskip("sklearn")
+ raw = read_raw_fif(raw_fname).load_data()
+ assert raw.info["bads"] == []
+ bads, scores = find_bad_channels_lof(
+ raw, n_neighbors, picks=ch_type, return_scores=True
+ )
+ bads_2 = find_bad_channels_lof(raw, n_neighbors, picks=ch_type)
+ assert len(scores) == n_ch
+ assert len(bads) == n_bad
+ assert bads == bads_2
+ with pytest.raises(ValueError, match="channel type"):
+ find_bad_channels_lof(raw)
diff --git a/mne/preprocessing/tests/test_ssp.py b/mne/preprocessing/tests/test_ssp.py
index 36bfa3505c1..a6ece5ea2e1 100644
--- a/mne/preprocessing/tests/test_ssp.py
+++ b/mne/preprocessing/tests/test_ssp.py
@@ -151,8 +151,9 @@ def test_compute_proj_eog(average, short_raw):
assert projs == []
raw._data[raw.ch_names.index("EOG 061"), :] = 1.0
- with _record_warnings(), pytest.warns(
- RuntimeWarning, match="filter.*longer than the signal"
+ with (
+ _record_warnings(),
+ pytest.warns(RuntimeWarning, match="filter.*longer than the signal"),
):
projs, events = compute_proj_eog(raw=raw, tmax=dur_use, ch_name="EOG 061")
diff --git a/mne/report/js_and_css/bootstrap-icons/gen_css_for_mne.py b/mne/report/js_and_css/bootstrap-icons/gen_css_for_mne.py
index 95b99c306f7..7eac8ecdaa0 100644
--- a/mne/report/js_and_css/bootstrap-icons/gen_css_for_mne.py
+++ b/mne/report/js_and_css/bootstrap-icons/gen_css_for_mne.py
@@ -15,7 +15,6 @@
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
-
import base64
from pathlib import Path
diff --git a/mne/report/report.py b/mne/report/report.py
index 534377d62e3..43c3d7c7ac4 100644
--- a/mne/report/report.py
+++ b/mne/report/report.py
@@ -1092,6 +1092,7 @@ def add_epochs(
*,
psd=True,
projs=None,
+ image_kwargs=None,
topomap_kwargs=None,
drop_log_ignore=("IGNORED",),
tags=("epochs",),
@@ -1120,6 +1121,18 @@ def add_epochs(
If ``True``, add PSD plots based on all ``epochs``. If ``False``,
do not add PSD plots.
%(projs_report)s
+ image_kwargs : dict | None
+ Keyword arguments to pass to the "epochs image"-generating
+ function (:meth:`mne.Epochs.plot_image`).
+ Keys are channel types, values are dicts containing kwargs to pass.
+ For example, to use the rejection limits per channel type you could pass::
+
+ image_kwargs=dict(
+ grad=dict(vmin=-reject['grad'], vmax=-reject['grad']),
+ mag=dict(vmin=-reject['mag'], vmax=reject['mag']),
+ )
+
+ .. versionadded:: 1.7
%(topomap_kwargs)s
drop_log_ignore : array-like of str
The drop reasons to ignore when creating the drop log bar plot.
@@ -1130,7 +1143,7 @@ def add_epochs(
Notes
-----
- .. versionadded:: 0.24.0
+ .. versionadded:: 0.24
"""
tags = _check_tags(tags)
add_projs = self.projs if projs is None else projs
@@ -1138,6 +1151,7 @@ def add_epochs(
epochs=epochs,
psd=psd,
add_projs=add_projs,
+ image_kwargs=image_kwargs,
topomap_kwargs=topomap_kwargs,
drop_log_ignore=drop_log_ignore,
section=title,
@@ -3828,63 +3842,9 @@ def _add_epochs_metadata(self, *, epochs, section, tags, replace):
metadata.index.name = "Epoch #"
assert metadata.index.is_unique
- index_name = metadata.index.name # store for later use
+ data_id = metadata.index.name # store for later use
metadata = metadata.reset_index() # We want "proper" columns only
- html = metadata.to_html(
- border=0,
- index=False,
- show_dimensions=True,
- justify="unset",
- float_format=lambda x: f"{round(x, 3):.3f}",
- classes="table table-hover table-striped "
- "table-sm table-responsive small",
- )
- del metadata
-
- # Massage the table such that it woks nicely with bootstrap-table
- htmls = html.split("\n")
- header_pattern = "