diff --git a/.all-contributorsrc b/.all-contributorsrc index 4808d9c24..8ea53af9c 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -85,7 +85,10 @@ "ideas", "doc", "tutorial", - "question" + "question", + "bug", + "test", + "review" ] }, { @@ -111,6 +114,15 @@ "review", "talk" ] + }, + { + "login": "monicayao", + "name": "Monica Yao", + "avatar_url": "https://avatars1.githubusercontent.com/u/35382166?v=4", + "profile": "https://github.com/monicayao", + "contributions": [ + "doc" + ] } ], "contributorsPerLine": 5, diff --git a/.circleci/config.yml b/.circleci/config.yml index e7f94bb3a..17079ec1b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -34,7 +34,7 @@ jobs: else mkdir -p /tmp/data curl -L --create-dirs -o \ - /tmp/data/three-echo/three_echo_Cornell_zcat.nii.gz https://osf.io/e3hsn/download + /tmp/data/three-echo/three_echo_Cornell_zcat.nii.gz https://osf.io/8fzse/download fi - run: name: Download test five-echo data diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 726411f9a..c97c505b8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,6 +18,11 @@ Already know what you're looking for in this guide? Jump to the following sectio * [Structuring contributions](#style-guide) * [Recognizing contributors](#recognizing-contributions) +Don't know where to get started? +Read [Joining the conversation](#joining-the-conversation) and pop into +Gitter to introduce yourself! Let us know what your interests are and we +will help you find an issue to contribute to. Thanks so much! + ## Joining the conversation `tedana` is a young project maintained by a growing group of enthusiastic developers— and we're excited to have you join! @@ -163,6 +168,8 @@ Pull requests should be submitted early and often! If your pull request is not yet ready to be merged, please also include the **[WIP]** prefix. This tells the development team that your pull request is a "work-in-progress", and that you plan to continue working on it. +We request that you do not use the Draft PR feature at this time, +as it interferes with our Continuous Integration tool, Travis. You can also combine the tags above, for example if you are updating both a test and the documentation: **[TST, DOC]**. @@ -197,8 +204,13 @@ And, if you have any questions, please don't hesitate to ask! ## Recognizing contributors -We welcome and recognize all contributions from documentation to testing to code development. -You can see a list of current contributors in the [contributors tab][link_contributors]. +We welcome and recognize [all contributions][link_all-contributors-spec] +from documentation to testing to code development. +You can see a list of current contributors in the +README +(kept up to date by the [all contributors bot][link_all-contributors-bot]). +You can see [here][link_all-contributors-bot-usage] for instructions on +how to use the bot. ## Thank you! @@ -245,4 +257,7 @@ You're awesome. :wave::smiley: [link_rst_guide]: http://docs.sphinxdocs.com/en/latest/step-1.html [link_contributors]: https://github.com/ME-ICA/tedana/graphs/contributors +[link_all-contributors-spec]: https://allcontributors.org/docs/en/specification +[link_all-contributors-bot]: https://allcontributors.org/docs/en/bot/overview +[link_all-contributors-bot-usage]: https://allcontributors.org/docs/en/bot/usage [link_stemmrolemodels]: https://github.com/KirstieJane/STEMMRoleModels diff --git a/README.md b/README.md index a2f932386..267a20c6f 100644 --- a/README.md +++ b/README.md @@ -13,30 +13,29 @@ multi-echo functional magnetic resonance imaging (fMRI) data. [![Documentation Status](https://readthedocs.org/projects/tedana/badge/?version=latest)](http://tedana.readthedocs.io/en/latest/?badge=latest) [![Codecov](https://codecov.io/gh/me-ica/tedana/branch/master/graph/badge.svg)](https://codecov.io/gh/me-ica/tedana) [![Join the chat at https://gitter.im/ME-ICA/tedana](https://badges.gitter.im/ME-ICA/tedana.svg)](https://gitter.im/ME-ICA/tedana?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![All Contributors](https://img.shields.io/badge/all_contributors-9-orange.svg?style=flat-square)](#contributors) +[![Join our tinyletter mailing list](https://img.shields.io/badge/receive-our%20newsletter%20❀%EF%B8%8F-blueviolet.svg)](https://tinyletter.com/tedana-devs) +[![All Contributors](https://img.shields.io/badge/all_contributors-10-orange.svg)](#contributors) ## About ``tedana`` originally came about as a part of the [ME-ICA](https://github.com/me-ica/me-ica) pipeline. -The ME-ICA pipeline originally performed both pre-processing and TE-dependent -analysis of multi-echo fMRI data; however, ``tedana`` now assumes that you're -working with data which has been previously preprocessed. +The ME-ICA pipeline originally performed both pre-processing and TE-dependent analysis of multi-echo fMRI data; however, ``tedana`` now assumes that you're working with data which has been previously preprocessed. ![http://tedana.readthedocs.io/](https://user-images.githubusercontent.com/7406227/40031156-57b7cbb8-57bc-11e8-8c51-5b29f2e86a48.png) -More information and documentation can be found at https://tedana.readthedocs.io/. +More information and documentation can be found at https://tedana.readthedocs.io. ## Installation You'll need to set up a working development environment to use `tedana`. To set up a local environment, you will need Python >=3.5 and the following packages will need to be installed: -[numpy](http://www.numpy.org/) -[scipy](https://www.scipy.org/) -[scikit-learn](http://scikit-learn.org/stable/) -[nilearn](https://nilearn.github.io/) -[nibabel>=2.1.0](http://nipy.org/nibabel/) +* [numpy](http://www.numpy.org/) +* [scipy](https://www.scipy.org/) +* [scikit-learn](http://scikit-learn.org/stable/) +* [nilearn](https://nilearn.github.io/) +* [nibabel>=2.1.0](http://nipy.org/nibabel/) You can then install `tedana` with @@ -45,20 +44,21 @@ pip install tedana ``` ### Creating a miniconda environment for use with `tedana` + In using `tedana`, you can optionally configure [a conda environment](https://conda.io/docs/user-guide/tasks/manage-environments.html). We recommend using [miniconda3](https://conda.io/miniconda.html). After installation, you can use the following commands to create an environment for `tedana`: ```bash -conda create -n ENVIRONMENT_NAME python=3 pip mdp numpy scikit-learn scipy +conda create -n ENVIRONMENT_NAME python=3 pip mdp numpy scikit-learn scipy conda activate ENVIRONMENT_NAME pip install nilearn nibabel pip install tedana ``` `tedana` will then be available in your path. -This will also allow any previously existing tedana installations to remain untouched. +This will also allow any previously existing `tedana` installations to remain untouched. To exit this conda environment, use @@ -66,10 +66,8 @@ To exit this conda environment, use conda deactivate ``` -NOTE: Conda < 4.6 users will need to use the soon-to-be-deprecated option -`source` rather than `conda` for the activation and deactivation steps. -You can read more about managing conda environments and this discrepancy here: -[here](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) +NOTE: Conda < 4.6 users will need to use the soon-to-be-deprecated option `source` rather than `conda` for the activation and deactivation steps. +You can read more about managing conda environments and this discrepancy [here](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). ## Getting involved @@ -80,7 +78,13 @@ Want to learn more about our plans for developing ``tedana``? Have a question, comment, or suggestion? Open or comment on one of [our issues](https://github.com/ME-ICA/tedana/issues)! -We ask that all contributions to ``tedana`` respect our [code of conduct](https://github.com/ME-ICA/tedana/blob/master/CODE_OF_CONDUCT.md). +If you're not sure where to begin, feel free to pop into [Gitter](https://gitter.im/ME-ICA/tedana) and introduce yourself! +We will be happy to help you find somewhere to get started. + +If you don't want to get lots of notifications, we send out newsletters approximately once per month though our TinyLetter mailing list. +You can view the [previous newsletters](https://tinyletter.com/tedana-devs/archive) and/or sign up to receive future ones at [https://tinyletter.com/tedana-devs](https://tinyletter.com/tedana-devs). + +We ask that all contributors to ``tedana`` across all project-related spaces (including but not limited to: GitHub, Gitter, and project emails), adhere to our [code of conduct](https://github.com/ME-ICA/tedana/blob/master/CODE_OF_CONDUCT.md). ## Contributors @@ -88,7 +92,8 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d -
Logan Dowdle
Logan Dowdle

πŸ’» πŸ’¬ 🎨 πŸ›
Elizabeth DuPre
Elizabeth DuPre

πŸ’» πŸ“– πŸ€” πŸš‡ πŸ‘€ πŸ’‘ ⚠️ πŸ’¬
javiergcas
Javier Gonzalez-Castillo

πŸ€”
Dan Handwerker
Dan Handwerker

🎨 πŸ“– πŸ’‘ πŸ‘€
prantikk
Prantik Kundu

πŸ’» πŸ€”
Ross Markello
Ross Markello

πŸ’» πŸš‡ πŸ’¬
Taylor Salo
Taylor Salo

πŸ’» πŸ€” πŸ“– βœ… πŸ’¬
Joshua Teves
Joshua Teves

πŸ“† πŸ“– πŸ‘€ 🚧
Kirstie Whitaker
Kirstie Whitaker

πŸ“– πŸ“† πŸ‘€ πŸ“’
+
Logan Dowdle
Logan Dowdle

πŸ’» πŸ’¬ 🎨 πŸ›
Elizabeth DuPre
Elizabeth DuPre

πŸ’» πŸ“– πŸ€” πŸš‡
πŸ‘€ πŸ’‘ ⚠️ πŸ’¬
Javier Gonzalez-Castillo
Javier Gonzalez-Castillo

πŸ€”
Dan Handwerker
Dan Handwerker

🎨 πŸ“– πŸ’‘ πŸ‘€
Prantik Kundu
Prantik Kundu

πŸ’» πŸ€”
Ross Markello
Ross Markello

πŸ’» πŸš‡ πŸ’¬
Taylor Salo
Taylor Salo

πŸ’» πŸ€” πŸ“– βœ…
πŸ’¬πŸ› ⚠️ πŸ‘€
Joshua Teves
Joshua Teves

πŸ“† πŸ“– πŸ‘€ 🚧
Kirstie Whitaker
Kirstie Whitaker

πŸ“– πŸ“† πŸ‘€ πŸ“’
Monica Yao
Monica Yao

πŸ“–
+ diff --git a/docs/_static/12_pca_whitened_data.png b/docs/_static/12_pca_reduced_data.png similarity index 100% rename from docs/_static/12_pca_whitened_data.png rename to docs/_static/12_pca_reduced_data.png diff --git a/docs/_static/example_Component_Overview.png b/docs/_static/example_Component_Overview.png new file mode 100644 index 000000000..267701715 Binary files /dev/null and b/docs/_static/example_Component_Overview.png differ diff --git a/docs/_static/example_Kappa_vs_Rho_Scatter.png b/docs/_static/example_Kappa_vs_Rho_Scatter.png new file mode 100644 index 000000000..54dfd8715 Binary files /dev/null and b/docs/_static/example_Kappa_vs_Rho_Scatter.png differ diff --git a/docs/_static/example_bad_component.png b/docs/_static/example_bad_component.png new file mode 100644 index 000000000..d5c8eda3d Binary files /dev/null and b/docs/_static/example_bad_component.png differ diff --git a/docs/_static/example_good_component.png b/docs/_static/example_good_component.png new file mode 100644 index 000000000..4cff2e90b Binary files /dev/null and b/docs/_static/example_good_component.png differ diff --git a/docs/api.rst b/docs/api.rst index 3f475441f..c3d839eb3 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -80,23 +80,23 @@ API .. _calibration_ref: -:mod:`tedana.model`: Computing TE-dependence metrics +:mod:`tedana.metrics`: Computing TE-dependence metrics ---------------------------------------------------- -.. automodule:: tedana.model +.. automodule:: tedana.metrics :no-members: :no-inherited-members: -.. autosummary:: tedana.model +.. autosummary:: tedana.metrics :toctree: generated/ :template: function.rst - tedana.model.dependence_metrics - tedana.model.kundu_metrics + tedana.metrics.dependence_metrics + tedana.metrics.kundu_metrics :template: module.rst - tedana.model.fit + tedana.metrics.fit .. currentmodule:: tedana diff --git a/docs/approach.rst b/docs/approach.rst index 5af05ef08..8114632de 100644 --- a/docs/approach.rst +++ b/docs/approach.rst @@ -2,14 +2,15 @@ Processing pipeline details =========================== ``tedana`` works by decomposing multi-echo BOLD data via PCA and ICA. -These components are then analyzed to determine whether they are TE-dependent -or -independent. TE-dependent components are classified as BOLD, while -TE-independent components are classified as non-BOLD, and are discarded as part -of data cleaning. +The resulting components are then analyzed to determine whether they are +TE-dependent or -independent. +TE-dependent components are classified as BOLD, while TE-independent components +are classified as non-BOLD, and are discarded as part of data cleaning. In ``tedana``, we take the time series from all the collected TEs, combine them, and decompose the resulting data into components that can be classified as BOLD -or non-BOLD. This is performed in a series of steps, including: +or non-BOLD. +This is performed in a series of steps, including: * Principal components analysis * Independent components analysis @@ -38,12 +39,14 @@ Adaptive mask generation ```````````````````````` Longer echo times are more susceptible to signal dropout, which means that certain brain regions (e.g., orbitofrontal cortex, temporal poles) will only -have good signal for some echoes. In order to avoid using bad signal from -affected echoes in calculating :math:`T_{2}^*` and :math:`S_{0}` for a given voxel, -``tedana`` generates an adaptive mask, where the value for each voxel is the -number of echoes with "good" signal. When :math:`T_{2}^*` and :math:`S_{0}` are -calculated below, each voxel's values are only calculated from the first :math:`n` -echoes, where :math:`n` is the value for that voxel in the adaptive mask. +have good signal for some echoes. +In order to avoid using bad signal from affected echoes in calculating +:math:`T_{2}^*` and :math:`S_{0}` for a given voxel, ``tedana`` generates an +adaptive mask, where the value for each voxel is the number of echoes with +"good" signal. +When :math:`T_{2}^*` and :math:`S_{0}` are calculated below, each voxel's values +are only calculated from the first :math:`n` echoes, where :math:`n` is the +value for that voxel in the adaptive mask. .. note:: ``tedana`` allows users to provide their own mask. @@ -65,8 +68,9 @@ The next step is to fit a monoexponential decay model to the data in order to estimate voxel-wise :math:`T_{2}^*` and :math:`S_0`. In order to make it easier to fit the decay model to the data, ``tedana`` -transforms the data. The BOLD data are transformed as :math:`log(|S|+1)`, where -:math:`S` is the BOLD signal. The echo times are also multiplied by -1. +transforms the data. +The BOLD data are transformed as :math:`log(|S|+1)`, where :math:`S` is the BOLD signal. +The echo times are also multiplied by -1. .. image:: /_static/04_echo_log_value_distributions.png :width: 400 px @@ -80,10 +84,12 @@ this voxel), so the line is fit to all available data. .. note:: ``tedana`` actually performs and uses two sets of :math:`T_{2}^*`/:math:`S_0` model fits. In one case, ``tedana`` estimates :math:`T_{2}^*` and :math:`S_0` for voxels with good signal in at - least two echoes. The resulting "limited" :math:`T_{2}^*` and :math:`S_0` maps are used throughout - most of the pipeline. In the other case, ``tedana`` estimates :math:`T_{2}^*` and :math:`S_0` for voxels - with good data in only one echo as well, but uses the first two echoes for - those voxels. The resulting "full" :math:`T_{2}^*` and :math:`S_0` maps are used to generate the + least two echoes. + The resulting "limited" :math:`T_{2}^*` and :math:`S_0` maps are used throughout + most of the pipeline. + In the other case, ``tedana`` estimates :math:`T_{2}^*` and :math:`S_0` for voxels + with good data in only one echo as well, but uses the first two echoes for those voxels. + The resulting "full" :math:`T_{2}^*` and :math:`S_0` maps are used to generate the optimally combined data. .. image:: /_static/05_loglinear_regression.png @@ -116,12 +122,13 @@ We can also see where :math:`T_{2}^*` lands on this curve. Optimal combination ``````````````````` Using the :math:`T_{2}^*` estimates, ``tedana`` combines signal across echoes using a -weighted average. The echoes are weighted according to the formula +weighted average. +The echoes are weighted according to the formula .. math:: w_{TE} = TE * e^{\frac{-TE}{T_{2}^*}} -The weights are then normalized across echoes. For the example voxel, the -resulting weights are: +The weights are then normalized across echoes. +For the example voxel, the resulting weights are: .. image:: /_static/08_optimal_combination_echo_weights.png :width: 400 px @@ -144,25 +151,27 @@ of the other echoes (which it is). An alternative method for optimal combination that does not use :math:`T_{2}^*`, is the parallel-acquired inhomogeneity desensitized (PAID) ME-fMRI combination method (`Poser et al., 2006`_). - This method specifically assumes that noise in the acquired echoes is "isotopic and - homogeneous throughout the image," meaning it should be used on smoothed data. - As we do not recommend performing tedana denoising on smoothed data, + This method specifically assumes that noise in the acquired echoes is "isotopic and + homogeneous throughout the image," meaning it should be used on smoothed data. + As we do not recommend performing tedana denoising on smoothed data, we discourage using PAID within the tedana workflow. - We do, however, make it accessible as an alternative combination method + We do, however, make it accessible as an alternative combination method in the t2smap workflow. TEDPCA `````` -The next step is to identify and temporarily remove Gaussian (thermal) noise -with TE-dependent principal components analysis (PCA). TEDPCA applies PCA to -the optimally combined data in order to decompose it into component maps and -time series. Here we can see time series for some example components (we don't -really care about the maps): +The next step is to dimensionally reduce the data with TE-dependent principal +components analysis (PCA). +The goal of this step is to make it easier for the later ICA decomposition to converge. +Dimensionality reduction is a common step prior to ICA. +TEDPCA applies PCA to the optimally combined data in order to decompose it into component maps and +time series. +Here we can see time series for some example components (we don't really care about the maps): .. image:: /_static/11_pca_component_timeseries.png -These components are subjected to component selection, the -specifics of which vary according to algorithm. +These components are subjected to component selection, the specifics of which +vary according to algorithm. In the simplest approach, ``tedana`` uses Minka’s MLE to estimate the dimensionality of the data, which disregards low-variance components. @@ -172,11 +181,19 @@ discard PCA components which, in addition to not explaining much variance, are also not significantly TE-dependent (i.e., have low Kappa) or TE-independent (i.e., have low Rho). +.. note:: + This process (also performed in TEDICA) can be broadly separated into three + steps: decomposition, metric calculation, and component selection. + Decomposition identifies components in the data. + Metric calculation derives relevant summary statistics for each component. + Component selection uses the summary statistics to identify components that + should be kept or discarded. + After component selection is performed, the retained components and their associated betas are used to reconstruct the optimally combined data, resulting -in a dimensionally reduced (i.e., whitened) version of the dataset. +in a dimensionally reduced version of the dataset. -.. image:: /_static/12_pca_whitened_data.png +.. image:: /_static/12_pca_reduced_data.png TEDICA `````` @@ -188,14 +205,15 @@ order to fit a mixing matrix to the whitened data. .. image:: /_static/13_ica_component_timeseries.png Linear regression is used to fit the component time series to each voxel in each -echo from the original, echo-specific data. This way, the thermal noise is -retained in the data, but is ignored by the TEDICA process. This results in -echo- and voxel-specific betas for each of the components. +echo from the original, echo-specific data. +This way, low-variance information (originally discarded by TEDPCA) is retained +in the data, but is ignored by the TEDICA process. +This results in echo- and voxel-specific betas for each of the components. TE-dependence (:math:`R_2`) and TE-independence (:math:`S_0`) models can then -be fit to these betas. These models allow calculation of F-statistics for the -:math:`R_2` and :math:`S_0` models (referred to as :math:`\kappa` and -:math:`\rho`, respectively). +be fit to these betas. +These models allow calculation of F-statistics for the :math:`R_2` and :math:`S_0` +models (referred to as :math:`\kappa` and :math:`\rho`, respectively). .. image:: /_static/14_te_dependence_models_component_0.png :width: 400 px @@ -211,24 +229,27 @@ be fit to these betas. These models allow calculation of F-statistics for the A decision tree is applied to :math:`\kappa`, :math:`\rho`, and other metrics in order to classify ICA components as TE-dependent (BOLD signal), TE-independent -(non-BOLD noise), or neither (to be ignored). The actual decision tree is -dependent on the component selection algorithm employed. ``tedana`` includes -two options: `kundu_v2_5` (which uses hardcoded thresholds applied to each of -the metrics) and `kundu_v3_2` (which trains a classifier to select components). +(non-BOLD noise), or neither (to be ignored). +The actual decision tree is dependent on the component selection algorithm employed. +``tedana`` includes two options: `kundu_v2_5` (which uses hardcoded thresholds +applied to each of the metrics) and `kundu_v3_2` (which trains a classifier to +select components). .. image:: /_static/15_denoised_data_timeseries.png Removal of spatially diffuse noise (optional) ````````````````````````````````````````````` -Due to the constraints of ICA, MEICA is able to identify and remove spatially +Due to the constraints of ICA, TEDICA is able to identify and remove spatially localized noise components, but it cannot identify components that are spread out throughout the whole brain. See `Power et al. (2018)`_ for more information about this issue. One of several post-processing strategies may be applied to the ME-DN or ME-HK datasets in order to remove spatially diffuse (ostensibly respiration-related) -noise. Methods which have been employed in the past include global signal +noise. +Methods which have been employed in the past include global signal regression (GSR), T1c-GSR, anatomical CompCor, Go Decomposition (GODEC), and robust PCA. +Currently, ``tedana`` implements GSR and T1c-GSR. .. image:: /_static/16_t1c_denoised_data_timeseries.png diff --git a/docs/index.rst b/docs/index.rst index b2653d081..67734175d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -40,6 +40,10 @@ multi-echo functional magnetic resonance imaging (fMRI) data. :target: https://gitter.im/ME-ICA/tedana :alt: Join the chat +.. image:: https://img.shields.io/badge/receive-our%20newsletter%20❀%EF%B8%8F-blueviolet.svg + :target: https://tinyletter.com/tedana-devs + :alt: Join our tinyletter mailing list + About ----- @@ -51,8 +55,15 @@ The ME-ICA pipeline originally performed both pre-processing and TE-dependent analysis of multi-echo fMRI data; however, ``tedana`` now assumes that you're working with data which has been previously preprocessed. -.. _ME-ICA: https://github.com/me-ica/me-ica +For a summary of multi-echo fMRI, which is the imaging technique ``tedana`` builds on, +visit `Multi-echo fMRI`_. +For a detailed procedure of how ``tedana`` analyzes the data from multi-echo fMRI, +visit `Processing pipeline details`_. + +.. _ME-ICA: https://github.com/me-ica/me-ica +.. _Multi-echo fMRI: https://tedana.readthedocs.io/en/latest/multi-echo.html +.. _Processing pipeline details: https://tedana.readthedocs.io/en/latest/approach.html# Citations --------- diff --git a/docs/multi-echo.rst b/docs/multi-echo.rst index 132ad8fef..36d48181d 100644 --- a/docs/multi-echo.rst +++ b/docs/multi-echo.rst @@ -1,7 +1,9 @@ Multi-echo fMRI =============== -In multi-echo (ME) fMRI, data are acquired for multiple echo times, resulting in -multiple time series for each voxel. +In most echo-planar image (EPI) fMRI sequences, only one brain image is acquired +at each repetition time, at the rate of radio frequency (RF). In contrast, in +multi-echo (ME) fMRI, data are acquired for multiple echo times, resulting in +multiple volumes with varying levels of contrast acquired per RF. The physics of multi-echo fMRI ------------------------------ @@ -35,7 +37,7 @@ This allows researchers to benchmark their results. For optimally combined: Rather than analyzing single-echo time series separately, we can combine them into a "optimally combined time series". -For more information on this combination, see :ref:`approach`. +For more information on this combination, see `processing pipeline details`_. Optimally combined data exhibits higher SNR and improves statistical power of analyses in regions traditionally affected by drop-out. @@ -49,7 +51,35 @@ Collecting multi-echo EPI allows us to leverage all of the information available as well as additional information only available when looking at signal decay across multiple TEs. We can use this information to denoise the optimally combined time series. -.. _Pruim et al., 2015: https://www.sciencedirect.com/science/article/pii/S1053811915001822 +.. _processing pipeline details: https://tedana.readthedocs.io/en/latest/approach.html#optimal-combination +.. _Pruim et al. (2015): https://www.sciencedirect.com/science/article/pii/S1053811915001822 + +Recommendations on multi-echo use for someone planning a new study +------------------------------------------------------------------ +Multi-echo fMRI acquisition sequences and analysis methods are rapidly maturing. Someone who has access +to a multi-echo fMRI sequence should seriously consider using it. Multiple studies have shown that a +weighted average of the echoes to optimize T2* weighting, sometimes called "optimally combined," +gives a reliable, modest boost in data quality. The optimal combination of echoes can currently be +calculated in several software packages including AFNI, fMRIPrep, and tedana. In tedana, the weighted +average can be calculated with `t2smap`_ If no other +acquisition compromises are necessary to acquire multi-echo data, this boost is worthwhile. If other +compromises are necessary, consider the life of the data set. If data is being acquired for a discrete +study that will be acquired, analyzed, and published in a year or two, it might not be worth making +compromises to acquire multi-echo data. If a data set is expected to be used for future analyses in later +years, it is likely that more powerful approaches to multi-echo denoising will sufficiently mature and add +even more value to a data set. + +Other multi-echo denoising methods, such as MEICA, the predecessor to tedana, have shown the potential for +much greater data quality improvements, as well as the ability to more accurately separate visually similar +signal vs noise, such as scanner based drifts vs slow changes in BOLD signal. These more powerful methods are +still being improved, and the algorithms are still changing. Users need to have the time and knowledge to look +at the denoising output from every run to make sure denoising worked as intended. If someone wants a push-button +way to use multi-echo data to improve data quality, that doesn't require as deep an inspection of every output, +stick with using the weighted average. The developers of tedana look forward to when tedana and other methods +have sufficiently stable algorithms, which have been validated on a wide range data data sets, so that we can +recommended the wide use of tedana. + +.. _t2smap: https://tedana.readthedocs.io/en/latest/usage.html#run-t2smap Acquisition Parameter Recommendations ------------------------------------- @@ -62,7 +92,7 @@ spatial coverage, sample rate, signal-to-noise ratio, signal drop-out, distortio The one difference with multi-echo is a slight time cost. For multi-echo fMRI, the shortest echo time (TE) is essentially free since it is collected in the -gap between the radio frequency (RF) pulse and the single-echo acquisition. +gap between the RF pulse and the single-echo acquisition. The second echo tends to roughly match the single-echo TE. Additional echoes require more time. For example, on a 3T MRI, if the T2* weighted TE is 30ms for single echo fMRI, @@ -115,6 +145,22 @@ Videos .. _educational session from OHBM 2017: https://www.pathlms.com/ohbm/courses/5158/sections/7788/video_presentations/75977 .. _series of lectures from the OHBM 2017 multi-echo session: https://www.pathlms.com/ohbm/courses/5158/sections/7822 +Available multi-echo fMRI sequences for multiple vendors +******************************************************** + +Information on multi-echo sequences from Siemens, GE, and Phillips will be added here. + +Multi-echo preprocessing software +********************************* + +tedana requires data that has already been preprocessed for head motion, alignment, etc. +More details on software packages that include preprocessing options specifically for multi-echo +fMRI data, such as AFNI and fMRIPrep will be added here. + +Other software that uses multi-echo fMRI +**************************************** + +Information and links to other approaches for denoising multi-echo fMRI data will be added here. Datasets ******** diff --git a/docs/outputs.rst b/docs/outputs.rst index b9e402311..5826a52a5 100644 --- a/docs/outputs.rst +++ b/docs/outputs.rst @@ -150,4 +150,78 @@ I012 ignored ign_add1 Visual reports -------------- -We're working on it. +Static visual reports can be generated by using the ``--png`` flag when calling +tedana from the command line. +Images are created and placed within the output directory, in a folder labeled +``figures``. + +These reports consist of three main types of images. + +Component Images +```````````` +.. image:: /_static/example_good_component.png + :align: center + +For each component identified by tedana, a single image will be created. +Above is an example of an accepted component. +These are designed for an up-close inspection of both the spatial and temporal +aspects of the component, as well as ancillary information. + +The title of the plot provides information about variance, kappa and rho values +as well as the reasons for rejection, if any (see above for codes). + +Below this is the component timeseries, color coded on the basis of its +classification. +Green for accepted, Red for rejected, Black for ignored or unclassified. + +Slices are then selected from sagittal, axial and coronal planes, to highlight +the component pattern. +By default these images used the red-blue colormap and are scaled to 10% of the +max beta value. + +.. note:: + You can select your own colormap to use by specifying its name when calling + tedana with ``--png-cmap``. + For example, to use the bone colormap, you would add ``--png-cmap bone``. + +Finally, the bottom of the image shows the Fast Fourier Transform of the +component timeseries. + +Tip: Look for your fundamental task frequencies here! + + +.. image:: /_static/example_bad_component.png + :align: center + +Above, you can review a component that was rejected. +In this case, the subject moved each time the task was performed - which +affected single slices of the fMRI volume. +This scan used multiband imaging (collecting multiple slices at once), so +the motion artifact occurs in more than once slice. + + +Kappa vs Rho Scatter Plot +```````````` +.. image:: /_static/example_Kappa_vs_Rho_Scatter.png + :align: center + +This diagnostic plot shows the relationship between kappa and rho values for +each component. + +This can be useful for getting a big picture view of your data or for comparing +denoising performance with various fMRI sequences. + +Double Pie Chart +```````````` +.. image:: /_static/example_Component_Overview.png + :align: center + +This diagnostic plot shows the relative variance explained by each +classification type in the outer ring, with individual components on the inner +ring. +If a low amount of variance is explained, this will be shown as a gap in the +ring. + +Tip: Sometimes large variance is due to singular components, which can be easily +seen here. + diff --git a/tedana/decomposition/pca.py b/tedana/decomposition/pca.py index 239ebc2d7..d8c36ced2 100644 --- a/tedana/decomposition/pca.py +++ b/tedana/decomposition/pca.py @@ -7,7 +7,7 @@ from scipy import stats from sklearn.decomposition import PCA -from tedana import model, utils, io +from tedana import metrics, utils, io from tedana.decomposition._utils import eimask from tedana.stats import computefeats2 from tedana.selection import kundu_tedpca @@ -201,8 +201,8 @@ def tedpca(data_cat, data_oc, combmode, mask, t2s, t2sG, # Normalize each component's time series vTmixN = stats.zscore(comp_ts, axis=0) - comptable, _, _, _ = model.dependence_metrics( - data_cat, data_oc, comp_ts, eimum, t2s, tes, ref_img, + comptable, _, _, _ = metrics.dependence_metrics( + data_cat, data_oc, comp_ts, t2s, tes, ref_img, reindex=False, mmixN=vTmixN, algorithm=None, label='mepca_', out_dir=out_dir, verbose=verbose) diff --git a/tedana/model/__init__.py b/tedana/metrics/__init__.py similarity index 91% rename from tedana/model/__init__.py rename to tedana/metrics/__init__.py index 610910281..95261dd14 100644 --- a/tedana/model/__init__.py +++ b/tedana/metrics/__init__.py @@ -1,7 +1,7 @@ # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: -from .fit import ( +from .kundu_fit import ( dependence_metrics, kundu_metrics, get_coeffs, computefeats2 ) diff --git a/tedana/model/fit.py b/tedana/metrics/kundu_fit.py similarity index 98% rename from tedana/model/fit.py rename to tedana/metrics/kundu_fit.py index eed3a82e7..dccc938f8 100644 --- a/tedana/model/fit.py +++ b/tedana/metrics/kundu_fit.py @@ -18,7 +18,7 @@ Z_MAX = 8 -def dependence_metrics(catd, tsoc, mmix, mask, t2s, tes, ref_img, +def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, reindex=False, mmixN=None, algorithm=None, label=None, out_dir='.', verbose=False): """ @@ -33,8 +33,6 @@ def dependence_metrics(catd, tsoc, mmix, mask, t2s, tes, ref_img, mmix : (T x C) array_like Mixing matrix for converting input data to component space, where `C` is components and `T` is the same as in `catd` - mask : (S [x E]) array_like - Boolean mask array t2s : (S [x T]) array_like Limited T2* map or timeseries. tes : list @@ -68,11 +66,13 @@ def dependence_metrics(catd, tsoc, mmix, mask, t2s, tes, ref_img, betas : :obj:`numpy.ndarray` mmix_new : :obj:`numpy.ndarray` """ + # Use t2s as mask + mask = t2s != 0 if not (catd.shape[0] == t2s.shape[0] == mask.shape[0] == tsoc.shape[0]): raise ValueError('First dimensions (number of samples) of catd ({0}), ' - 'tsoc ({1}), t2s ({2}), and mask ({3}) do not ' + 'tsoc ({1}), and t2s ({2}) do not ' 'match'.format(catd.shape[0], tsoc.shape[0], - t2s.shape[0], mask.shape[0])) + t2s.shape[0])) elif catd.shape[1] != len(tes): raise ValueError('Second dimension of catd ({0}) does not match ' 'number of echoes provided (tes; ' @@ -87,8 +87,6 @@ def dependence_metrics(catd, tsoc, mmix, mask, t2s, tes, ref_img, '({0}) does not match number of volumes in ' 't2s ({1})'.format(catd.shape[2], t2s.shape[1])) - mask = t2s != 0 # Override mask because problems - # demean optimal combination tsoc = tsoc[mask, :] tsoc_dm = tsoc - tsoc.mean(axis=-1, keepdims=True) diff --git a/tedana/tests/test_combine.py b/tedana/tests/test_combine.py index 1c2f348e2..9a9f12874 100644 --- a/tedana/tests/test_combine.py +++ b/tedana/tests/test_combine.py @@ -1,5 +1,5 @@ """ -Tests for tedana.model.combine +Tests for tedana.combine """ import numpy as np @@ -9,7 +9,7 @@ def test__combine_t2s(): """ - Test tedana.model.combine._combine_t2s + Test tedana.combine._combine_t2s """ np.random.seed(0) n_voxels, n_echos, n_trs = 20, 3, 10 @@ -29,7 +29,7 @@ def test__combine_t2s(): def test__combine_paid(): """ - Test tedana.model.combine._combine_paid + Test tedana.combine._combine_paid """ np.random.seed(0) n_voxels, n_echos, n_trs = 20, 3, 10 @@ -41,7 +41,7 @@ def test__combine_paid(): def test_make_optcom(): """ - Test tedana.model.combine.make_optcom + Test tedana.combine.make_optcom """ np.random.seed(0) n_voxels, n_echos, n_trs = 20, 3, 10 diff --git a/tedana/tests/test_decay.py b/tedana/tests/test_decay.py index b76d508df..09e61176e 100644 --- a/tedana/tests/test_decay.py +++ b/tedana/tests/test_decay.py @@ -1,5 +1,5 @@ """ -Tests for tedana.model.monoexponential +Tests for tedana.decay """ import os.path as op diff --git a/tedana/tests/test_model_fit_dependence_metrics.py b/tedana/tests/test_model_fit_dependence_metrics.py index 52795807c..ee716d349 100644 --- a/tedana/tests/test_model_fit_dependence_metrics.py +++ b/tedana/tests/test_model_fit_dependence_metrics.py @@ -1,11 +1,11 @@ """ -Tests for tedana.model.fit +Tests for tedana.metrics.fit """ import numpy as np import pytest -from tedana.model import fit +from tedana.metrics import kundu_fit def test_break_dependence_metrics(): @@ -17,7 +17,6 @@ def test_break_dependence_metrics(): catd = np.empty((n_samples, n_echos, n_vols)) tsoc = np.empty((n_samples, n_vols)) mmix = np.empty((n_vols, n_comps)) - mask = np.empty((n_samples)) t2s = np.empty((n_samples, n_vols)) t2s_full = np.empty((n_samples, n_vols)) tes = np.empty((n_echos)) @@ -27,46 +26,46 @@ def test_break_dependence_metrics(): # Shape of catd is wrong catd = np.empty((n_samples+1, n_echos, n_vols)) with pytest.raises(ValueError) as e_info: - fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, mask=mask, + kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, t2s=t2s, tes=tes, ref_img=ref_img, reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('First dimensions (number of samples) of ' - 'catd ({0}), tsoc ({1}), t2s ({2}), and mask ' - '({3}) do not match'.format( - catd.shape[0], tsoc.shape[0], t2s.shape[0], - mask.shape[0])) + 'catd ({0}), tsoc ({1}), ' + 'and t2s ({2}) do not match'.format( + catd.shape[0], tsoc.shape[0], + t2s.shape[0])) # Shape of t2s is wrong catd = np.empty((n_samples, n_echos, n_vols)) t2s = np.empty((n_samples+1, n_vols)) with pytest.raises(ValueError) as e_info: - fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, mask=mask, + kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, t2s=t2s, tes=tes, ref_img=ref_img, reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('First dimensions (number of samples) of ' - 'catd ({0}), tsoc ({1}), t2s ({2}), and mask ' - '({3}) do not match'.format( - catd.shape[0], tsoc.shape[0], t2s.shape[0], - mask.shape[0])) + 'catd ({0}), tsoc ({1}), ' + 'and t2s ({2}) do not match'.format( + catd.shape[0], tsoc.shape[0], + t2s.shape[0])) # Shape of tsoc is wrong t2s = np.empty((n_samples, n_vols)) tsoc = np.empty((n_samples+1, n_vols)) with pytest.raises(ValueError) as e_info: - fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, mask=mask, + kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, t2s=t2s, tes=tes, ref_img=ref_img, reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('First dimensions (number of samples) of ' - 'catd ({0}), tsoc ({1}), t2s ({2}), and mask ' - '({3}) do not match'.format( - catd.shape[0], tsoc.shape[0], t2s.shape[0], - mask.shape[0])) + 'catd ({0}), tsoc ({1}), ' + 'and t2s ({2}) do not match'.format( + catd.shape[0], tsoc.shape[0], + t2s.shape[0])) # Shape of catd is wrong catd = np.empty((n_samples, n_echos+1, n_vols)) tsoc = np.empty((n_samples, n_vols)) with pytest.raises(ValueError) as e_info: - fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, mask=mask, + kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, t2s=t2s, tes=tes, ref_img=ref_img, reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('Second dimension of catd ({0}) does not ' @@ -77,7 +76,7 @@ def test_break_dependence_metrics(): # Shape of catd is wrong catd = np.empty((n_samples, n_echos, n_vols+1)) with pytest.raises(ValueError) as e_info: - fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, mask=mask, + kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, t2s=t2s, tes=tes, ref_img=ref_img, reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('Number of volumes in catd ' @@ -90,7 +89,7 @@ def test_break_dependence_metrics(): catd = np.empty((n_samples, n_echos, n_vols)) t2s = np.empty((n_samples, n_vols+1)) with pytest.raises(ValueError) as e_info: - fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, mask=mask, + kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, t2s=t2s, tes=tes, ref_img=ref_img, reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('Number of volumes in catd ({0}) ' diff --git a/tedana/tests/test_model_kundu_metrics.py b/tedana/tests/test_model_kundu_metrics.py index b3a9deaaa..a9972ce3a 100644 --- a/tedana/tests/test_model_kundu_metrics.py +++ b/tedana/tests/test_model_kundu_metrics.py @@ -6,7 +6,7 @@ import numpy as np import pandas as pd -from tedana.model import fit +from tedana.metrics import kundu_fit def test_smoke_kundu_metrics(): @@ -34,5 +34,5 @@ def test_smoke_kundu_metrics(): metric_maps['Br_R2_clmaps'] = np.random.randint(low=0, high=2, size=(n_voxels, n_comps)) - comptable = fit.kundu_metrics(comptable, metric_maps) + comptable = kundu_fit.kundu_metrics(comptable, metric_maps) assert comptable is not None diff --git a/tedana/tests/test_selection.py b/tedana/tests/test_selection.py index 8a2345644..d65b7acd7 100644 --- a/tedana/tests/test_selection.py +++ b/tedana/tests/test_selection.py @@ -1,5 +1,5 @@ """ -Tests for tedana.model.monoexponential +Tests for tedana.selection """ import os.path as op diff --git a/tedana/viz.py b/tedana/viz.py index e91b1a768..806ba8354 100644 --- a/tedana/viz.py +++ b/tedana/viz.py @@ -9,7 +9,7 @@ matplotlib.use('AGG') import matplotlib.pyplot as plt -from tedana import model +from tedana import metrics from tedana.utils import get_spectrum LGR = logging.getLogger(__name__) @@ -77,7 +77,7 @@ def write_comp_figs(ts, mask, comptable, mmix, ref_img, out_dir, LGR.warning('Provided colormap is not recognized, proceeding with default') png_cmap = 'coolwarm' # regenerate the beta images - ts_B = model.get_coeffs(ts, mmix, mask) + ts_B = metrics.get_coeffs(ts, mmix, mask) ts_B = ts_B.reshape(ref_img.shape[:3] + ts_B.shape[1:]) # trim edges from ts_B array ts_B = trim_edge_zeros(ts_B) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 54dcdf960..ec0aa5cca 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -20,7 +20,7 @@ from scipy import stats from nilearn.masking import compute_epi_mask -from tedana import (decay, combine, decomposition, io, model, selection, utils, +from tedana import (decay, combine, decomposition, io, metrics, selection, utils, viz) import tedana.gscontrol as gsc from tedana.workflows.parser_utils import is_valid_file @@ -311,6 +311,17 @@ def tedana_workflow(data, tes, mask=None, mixm=None, ctab=None, manacc=None, n_samp, n_echos, n_vols = catd.shape LGR.debug('Resulting data shape: {}'.format(catd.shape)) + # check if TR is 0 + img_t_r = ref_img.header.get_zooms()[-1] + if img_t_r == 0 and png: + raise IOError('Dataset has a TR of 0. This indicates incorrect' + ' header information. To correct this, we recommend' + ' using this snippet:' + '\n' + 'https://gist.github.com/jbteves/032c87aeb080dd8de8861cb151bff5d6' + '\n' + 'to correct your TR to the value it should be.') + if mixm is not None and op.isfile(mixm): mixm = op.abspath(mixm) # Allow users to re-run on same folder @@ -403,23 +414,23 @@ def tedana_workflow(data, tes, mask=None, mixm=None, ctab=None, manacc=None, # Estimate betas and compute selection metrics for mixing matrix # generated from dimensionally reduced data using full data (i.e., data # with thermal noise) - comptable, metric_maps, betas, mmix = model.dependence_metrics( - catd, data_oc, mmix_orig, mask, t2s, tes, + comptable, metric_maps, betas, mmix = metrics.dependence_metrics( + catd, data_oc, mmix_orig, t2s, tes, ref_img, reindex=True, label='meica_', out_dir=out_dir, algorithm='kundu_v2', verbose=verbose) np.savetxt(op.join(out_dir, 'meica_mix.1D'), mmix) - comptable = model.kundu_metrics(comptable, metric_maps) + comptable = metrics.kundu_metrics(comptable, metric_maps) comptable = selection.kundu_selection_v2(comptable, n_echos, n_vols) else: LGR.info('Using supplied mixing matrix from ICA') mmix_orig = np.loadtxt(op.join(out_dir, 'meica_mix.1D')) - comptable, metric_maps, betas, mmix = model.dependence_metrics( - catd, data_oc, mmix_orig, mask, t2s, tes, + comptable, metric_maps, betas, mmix = metrics.dependence_metrics( + catd, data_oc, mmix_orig, t2s, tes, ref_img, label='meica_', out_dir=out_dir, algorithm='kundu_v2', verbose=verbose) if ctab is None: - comptable = model.kundu_metrics(comptable, metric_maps) + comptable = metrics.kundu_metrics(comptable, metric_maps) comptable = selection.kundu_selection_v2(comptable, n_echos, n_vols) else: comptable = pd.read_csv(ctab, sep='\t', index_col='component')