From 4b8e55500430826fe9692abc35ec7b0388dc749c Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 8 Aug 2022 16:21:17 -0400 Subject: [PATCH 001/177] Decision tree refactor with minimal and kundu --- docs/building_decision_trees.rst | 283 +++ tedana/io.py | 80 +- tedana/metrics/collect.py | 20 +- tedana/reporting/static_figures.py | 31 +- tedana/resources/config/outputs.json | 16 + .../decision_trees/invalid_kundu_bkup.json | 267 +++ tedana/resources/decision_trees/kundu.json | 427 +++++ tedana/resources/decision_trees/minimal.json | 210 +++ tedana/selection/ComponentSelector.py | 590 ++++++ tedana/selection/__init__.py | 2 +- tedana/selection/_utils.py | 120 -- tedana/selection/selection_nodes.py | 1602 +++++++++++++++++ tedana/selection/selection_utils.py | 755 ++++++++ tedana/selection/tedica.py | 372 +--- tedana/selection/tedpca.py | 2 +- .../tests/data/cornell_three_echo_outputs.txt | 4 + tedana/tests/data/fiu_four_echo_outputs.txt | 4 + .../data/nih_five_echo_outputs_t2smap.txt | 1 + .../data/nih_five_echo_outputs_verbose.txt | 4 + tedana/tests/data/sample_comptable.tsv | 22 + tedana/tests/test_ComponentSelector.py | 250 +++ tedana/tests/test_integration.py | 59 +- tedana/tests/test_selection_nodes.py | 923 ++++++++++ tedana/tests/test_selection_utils.py | 349 +++- tedana/workflows/t2smap.py | 1 + tedana/workflows/tedana.py | 183 +- tedana/workflows/tedana_reclassify.py | 439 +++++ 27 files changed, 6368 insertions(+), 648 deletions(-) create mode 100644 docs/building_decision_trees.rst create mode 100644 tedana/resources/decision_trees/invalid_kundu_bkup.json create mode 100644 tedana/resources/decision_trees/kundu.json create mode 100644 tedana/resources/decision_trees/minimal.json create mode 100644 tedana/selection/ComponentSelector.py delete mode 100644 tedana/selection/_utils.py create mode 100644 tedana/selection/selection_nodes.py create mode 100644 tedana/selection/selection_utils.py create mode 100644 tedana/tests/data/sample_comptable.tsv create mode 100644 tedana/tests/test_ComponentSelector.py create mode 100644 tedana/tests/test_selection_nodes.py create mode 100644 tedana/workflows/tedana_reclassify.py diff --git a/docs/building_decision_trees.rst b/docs/building_decision_trees.rst new file mode 100644 index 000000000..ce71d3ad0 --- /dev/null +++ b/docs/building_decision_trees.rst @@ -0,0 +1,283 @@ +######################################################## +Understanding and building a component selection process +######################################################## + +``tedana`` involves transforming data into components via ICA, and then calculating metrics for each component. +Each metric has one value per component that is stored in a comptable or component_table dataframe. This structure +is then passed to a "decision tree" through which a series of binary choices categorize each component as **accepted** or +**rejected**. The time series for the rejected components are regressed from the data in the final denoising step. + +There are several decision trees that are included by default in ``tedana`` but users can also build their own. +This might be useful if one of the default decision trees needs to be slightly altered due to the nature +of a specific data set, if one has an idea for a new approach to multi-echo denoising, or if one wants to integrate +non-multi-echo metrics into a single decision tree. + +Note: We use two terminologies interchangeably. The whole process is called "component selection" +and much of the code uses variants of that phrase (i.e. the ComponentSelector class, selection_nodes for the functions used in selection). +Instructions for how to classify components is called a "decision tree" since each step in the selection +process branches components into different intermediate or final classifications + +.. contents:: :local: + + +****************************************** +Expected outputs after component selection +****************************************** + +**All of these are stored in the ComponentSelector object and saved in multiple files** + + +New columns in the ``component_table`` (sometimes a stand alone variable ``comptable`` in other parts of the code): + The default file name for the component table is: ``desc-tedana_metrics.tsv`` + + classification: + In the final table, the only values should be 'accepted' or 'rejected'. + While the decision table is running, there may also be intermediate + classification labels. Note: Nothing in the current code requires a tree to + assign one of these two labels to every component. There will be a warning + if other labels remain. + + classification_tags: + Human readable tags that explain why a classification was reached. These can + be things like 'Likely BOLD', 'Unlikely BOLD', 'low variance' (i.e. accepted + because the variance is too low to justify losing a degree of freedom by + regressing it out as noise). + Each component can have no tags (an empty string), one tag, or a comma separated + list of tags. These tags may be useful parameters for visualizing and reviewing results + +``cross_component_metrics``: + Default file name: ``desc-ICA_cross_component_metrics.json`` + A dictionary of metrics that are each a single value calculated across components. + For example, kappa and rho elbows. + +``component_status_table``: + Default file name: ``desc-ICA_status_table.tsv`` + A table where each column lists the classification status of + each component after each node was run. Columns are only added + for runs where component statuses can change. + This is useful for understanding the classification + path of each component through the decision tree + +``tree``: + Default file name: ``desc-ICA_decision_tree.json`` + A copy of the inputted decision tree specification with an added "output" field + for each node. The output field (see next section) contains information about what happened during + execution. Of particular note, each output includes a list of the metrics + used within the node, "node_label", which is a (hopefully) human readable brief + description of the node's function and, for nodes where component classifications + can change, "numFalse" & "numTrue" list what changed. The inputted parameters include + "ifTrue" and "ifFalse" which say what changes for each component. These fields can be used + to construct a visual flow chart or text-based summary of how classifications changed + for each run. + +``used_metrics``: + Saved as a field in the ``tree`` json file + A list of the metrics that were used in the decision tree. This should + match ``necessary_metrics`` which was a predefined list of metrics that + a tree uses. If these don't match, a warning should appear. These might + be useful for future work so that a user can input a tree and metrics + would be calculated based on what's needed to execute the tree. + +``classification_tags``: + Saved as a field in the ``tree`` json file + A list of the pre-specified classification tags that could be used in a decision tree. + Any reporting interface should use this field so that the tags that are possible are listed + even if a given tag is not used by any component by the end of the selection process. + + +**Outputs of each decision tree step** + +This includes all the information from the specified decision tree under each "node" or function +call. For each node, there is also an "outputs" subfield with information from when the tree +was executed. The tree with the output fields is in the Selector object and +with default file name: ``desc-ICA_decision_tree.json`` + +decison_node_idx: + The decision tree functions are run as part of an ordered list. + This is the positional index for when this function was run + as part of this list, starting with index 0. + +used_metrics: + A list of the metrics used in a node of the decision tree + +used_cross_component_metrics: + A list of cross component metrics used in the node of a decision tree + +node_label: + A brief label for what happens in this node that can be used in a decision + tree summary table or flow chart. + +numTrue, numFalse: + For decision tree (dec) functions, the number of components that were classified + as true or false respectively in this decision tree step. + +calc_cross_comp_metrics: + For calculation (calc) functions, cross component metrics that were + calculated in this function. When this is included, each of those + metrics and the calculated values are also distinct keys in 'outputs'. + While the cross component metrics table does not include where each component + was calculated, that information is stored here. + +added_component_table_metrics: + It is possible to add a new metric to the component table during the selection process. + This is useful if a metric is to be calculated on a subset of components based on what + happened during previous steps in the selection process. This is **not** recommended, but + since it was done as part of the original decision tree process defined in meica + it is possible. + + +******************************* +Defining a custom decision tree +******************************* + +Decision trees are stored in json files. The default trees are stored as part of the tedana code repository in ./resources/decision_trees +The minimal tree, minimal.json is a good example highlighting the structure and steps in a tree. It may be helpful +to look at that tree while reading this section. kundu.json should replicate the decision tree used in meica version 2.7, +the predecessor to tedana. It is a more complex, but also highlights additional possible functionality in decision trees. + +A user can specify another decision tree and link to the tree location when tedana is executed with the ``--tree`` option. The format is +flexible to allow for future innovations, but be advised that this also allows you to +to create something with non-ideal results for the current code. Some criteria will result in an error +if violated, but more will just give a warning. If you are designing or editing a new tree, look carefully at the warnings. + +A decision tree can include two types of nodes or functions. All functions are currently in selection_nodes.py + +- A decision function will use existing metrics and potentially change the classification of the components based on those metrics. By convention, all these functions should begin with "dec" +- A calculation function will take existing metrics and calculate a value across components to be used for classification, for example the kappa and rho elbows. By convention, all these functions should begin with "calc" +- Nothing prevents a function from both calculating new cross component values and applying those values in a decision step, but following this convention should hopefully make decision tree specifications easier to follow and interpret. + +**Key expectations** + +- All trees should start with a "manual_classification" node that should set all component classifications to "unclassified" and + have "clear_classification_tags" set to true. There might be special cases where someone might want to violate these rules, + but depending what else happens in preceding code, other functions will expect both of these columns to exist. + This manual_classification step will make sure those columns are created and initialized. +- Every possible path through the tree should result in each component being classified as 'accepted' or 'rejected' by the time the tree is completed. +- Three initialization variables will help prevent mistakes + + necessary_metrics: + Is a list of the necessary metrics in the component table that will be used by the tree. If a metric doesn't exist then this + will raise an error instead of executing a tree. (This can eventually be used to call the metric calculation code based on + the decision tree specification). If a necessary metric isn't used, there will be a warning. This is just a warning because, + if the decision tree code specification is eventually used to execute the code to calculate metrics, one may want to calculate + a metric even if it's not being used. + + intermediate_classifications: + A list of intermediate classifications (i.e. "provisionalaccept", "provisionalreject"). It is very important to pre-specify these + because the code will make sure only the default classifications ("accepted" "rejected" "unclassified") and intermediate classifications + are used in a tree. This prevents someone from accidentially losing a component due to a spelling error or other minor variation in a + classification label + + classification_tags: + A list of acceptable classification tags (i.e. "Likely BOLD", "Unlikely BOLD", "Low variance"). This will both be used to make sure only + these tags are used in the tree and allow programs that interact with the results one place to see all potential tags + +**Decision node json structure** + +There are 6 initial fields, necessary_metrics, intermediate_classification, and classification_tags, as described in the above section: + +- "tree_id": a descriptive name for the tree that will be logged. +- "info": A brief description of the tree for info logging +- "report": A narrative description of the tree that could be used in report logging +- "refs" Publications that should be referenced when this tree is used + +The "nodes" field is a list of elements where each element defines a node in the decision tree. There are several key fields for each of these nodes: + +- "functionname": The exact function name in selection_nodes.py that will be called. +- "parameters": Specifications of all required parameters for the function in functionname +- "kwargs": Specification for optional parameters for the function in functionname + +The only parameter that is used in all functions is "decidecomps" which is used to identify, based on their classifications, +the components a function should be applied to. It can be a single classification, or a comma separated string of classifications. +In addition to the intermediate and default ("accepted" "rejected" "unclassified") component classifications, this can be "all" +for functions that should be applied to all components regardless of their classifications + +Most decision functions also include "ifTrue" and "ifFalse" which specify how to change the classification of each component +based on whether a the decision criterion is true or also. In addition to the default and intermediate classification options, +this can also be "nochange" (i.e. For components where a>b is true, "reject". For components where a>b is false, "nochange"). +The optional parameters "tag_ifTrue" and "tag_ifFalse" define the classification tags to be assigned to components. +Currently, the only exception is "manual_classify" which uses "new_classification" to designate the new component classification +and "tag" (optional) to designate which classification tag to apply. + +There are several optional parameters (to include within "kwargs") in every decision tree function: + +- custom_node_label: A brief label for what happens in this node that can be used in a decision tree summary table or flow chart. If custom_node_label is not not defined, then each function has default descriptive text. +- log_extra_report, log_extra_info: Text for each function call is automatically placed in the logger output. In addition to that text, the text in these these strings will also be included in the logger with the report or info codes respectively. These might be useful to give a narrative explanation of why a step was parameterized a certain way. +- only_used_metrics: If true, this function will only return the names of the component table metrics that will be used when this function is fully run. This can be used to identify all used metrics before running the decision tree. + +"_comments" can be used to add a longer explanation about what a node is doing. This will not be logged anywhere +except in the tree, but may be useful to make sure the purpose of a given node is clear. + +******************************** +Key parts of selection functions +******************************** + +There are several expectations for selection functions that are necessary for them to properly execute. +In selection_nodes.py, manual_classify, dec_left_op_right, and calc_kappa_rho_elbows_kundu are good +examples for how to meet these expectations. + +Create a dictionary called "outputs" that includes key fields that should be recorded. +The following line should be at the end of each function ``selector.nodes[selector.current_node_idx]["outputs"] = outputs`` +Additional fields can be used to log funciton-specific information, but the following fields are common and may be used by other parts of the code: + +- "decision_node_idx" (required): the ordered index for the current function in the decision tree. +- "node_label" (required): A decriptive label for what happens in the node. +- "numTrue" & "numFalse" (required for decision functions): For decision functions, the number of components labels true or false within the function call. +- "used_metrics" (required if a function uses metrics): The list of metrics used in the function. This can be hard coded, defined by input parameters, or empty. +- "used_cross_component_metrics" (required if a function uses cross component metrics): A list of cross component metrics used in the function. This can be hard coded, defined by input parameters, or empty. +- "calc_cross_comp_metrics" (required for calculation functions): A list of cross component metrics calculated within the function. The key-value pair for each calculated metric is also included in "outputs" + +Before any data are touched in the function, there should be an ``if only_used_metrics:`` clause that returns ``used_metrics`` for the function call. +This will be useful to gather all metrics a tree will use without requiring a specific dataset. + +Existing functions define ``function_name_idx = f"Step {selector.current_node_idx}: [text of function_name]`` This is used in logging and is cleaner to initialize near the top of each function. + + +Each function has code that creates a default node label in ``outputs["node_label"]``. The default node lable +may be used in decision tree visualization so it should be relatively short. Within this section, if there is +a user-provided custom_node_label, that should be used instead. + +Calculation nodes should check if the value they are calculating was already calculated and output a warning if the function overwrites an existing value + +Code that adds the text log_extra_info and log_extra_report into the appropriate logs (if they are provided by the user) + +After the above information is included, all functions will call ``selectcomps2use`` which returns the components with classifications included in ``decide_comps`` +and then run ``confirm_metrics_exist`` which is an added check to make sure the metrics used by this function exist in the component table. + +Nearly every function has a clause like: + +.. code-block:: python + + if comps2use is None: + log_decision_tree_step(function_name_idx, comps2use, decide_comps=decide_comps) + outputs["numTrue"] = 0 + outputs["numFalse"] = 0 + else: + +If there are no components with the classifications in ``decide_comps`` this logs that there's nothing for the function to be run on, else continue. + +For decision functions the key variable is ``decision_boolean`` which should be a dataframe column which is True or False for the components in ``decide_comps`` +based on the function's criteria. That column is an input to ``change_comptable_classifications`` which will update the component_table classifications, +update the classification history in component_status_table, and update the component classification_tags. Components not in ``decide_comps`` retain their +existing classifications and tags. +``change_comptable_classifications`` also returns and should assign values to ``outputs["numTrue"]`` and ``outputs["numFalse"]``. +These log how many components were identified as true or false within each function. + +For calculation functions, the calculated values should be added as a value/key pair to both ``selector.cross_component_metrics`` and ``outputs`` + +``log_decision_tree_step`` puts the relevant info from the function call into the program's output log. + +Every function should end. + +.. code-block:: python + + selector.nodes[selector.current_node_idx]["outputs"] = outputs + return selector + + functionname.__doc__ = (functionname.__doc__.format(**decision_docs)) + +This makes sure the outputs from the function are saved in the class structure and the class structure is returned. +The following line should include the function's name and is used to make sure repeated variable names are compiled correctly for the API documentation. + +If you have made it this far, congratulations. +If you follow these steps you'll be able to impress your colleagues, friends, and family by designing your very own decision tree functions. diff --git a/tedana/io.py b/tedana/io.py index e3b7506af..7c9b10919 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -25,7 +25,27 @@ RefLGR = logging.getLogger("REFERENCES") -class OutputGenerator: +class CustomEncoder(json.JSONEncoder): + """Convert some types because of JSON serialization and numpy + incompatibilities + + See here: https://stackoverflow.com/questions/50916422/python-typeerror-object-of-type-int64-is-not-json-serializable/50916741 + """ + def default(self, obj): + # int64 non-serializable but is a numpy output + if isinstance(obj, np.integer): + return int(obj) + + # containers that are not serializable + if isinstance(obj, np.ndarray): + return obj.tolist() + if isinstance (obj, set): + return list(obj) + + return super(CustomEncoder, self).default(obj) + + +class OutputGenerator(): """A class for managing tedana outputs. Parameters @@ -43,6 +63,8 @@ class OutputGenerator: descriptions. Default is "auto", which uses tedana's default configuration file. make_figures : bool, optional Whether or not to actually make a figures directory + force : bool, optional + Whether to force overwrites of data. Default False. Attributes ---------- @@ -59,8 +81,12 @@ class OutputGenerator: This will correspond to a "figures" subfolder of ``out_dir``. prefix : str Prefix to prepend to output filenames. + force: bool + Whether to force file overwrites. verbose : bool - Whether or not to generate verbose output + Whether or not to generate verbose output. + registry: dict + A registry of all files saved """ def __init__( @@ -71,6 +97,7 @@ def __init__( prefix="", config="auto", make_figures=True, + force=False, verbose=False, ): @@ -97,7 +124,9 @@ def __init__( self.out_dir = op.abspath(out_dir) self.figures_dir = op.join(out_dir, "figures") self.prefix = prefix + "_" if prefix != "" else "" + self.force = force self.verbose = verbose + self.registry = {} if not op.isdir(self.out_dir): LGR.info(f"Generating output directory: {self.out_dir}") @@ -196,6 +225,12 @@ def save_file(self, data, description, **kwargs): The full file path of the saved file. """ name = self.get_name(description, **kwargs) + if op.exists(name) and not self.force: + raise RuntimeError( + f"File {name} already exists. In order to allow overwrite " + "please use the --force option in the command line or the " + "force parameter in the Python API." + ) if description.endswith("img"): self.save_img(data, name) elif description.endswith("json"): @@ -204,6 +239,8 @@ def save_file(self, data, description, **kwargs): elif description.endswith("tsv"): self.save_tsv(data, name) + self.registry[description] = op.basename(name) + return name def save_img(self, data, name): @@ -221,7 +258,10 @@ def save_img(self, data, name): Will coerce 64-bit float and int arrays into 32-bit arrays. """ data_type = type(data) - if not isinstance(data, np.ndarray): + if isinstance(data, nib.nifti1.Nifti1Image): + data.to_filename(name) + return + elif not isinstance(data, np.ndarray): raise TypeError(f"Data supplied must of type np.ndarray, not {data_type}.") if data.ndim not in (1, 2): raise TypeError(f"Data must have number of dimensions in (1, 2), not {data.ndim}") @@ -252,7 +292,7 @@ def save_json(self, data, name): if not isinstance(data, dict): raise TypeError(f"data must be a dict, not type {data_type}.") with open(name, "w") as fo: - json.dump(data, fo, indent=4, sort_keys=True) + json.dump(data, fo, indent=4, sort_keys=True, cls=CustomEncoder) def save_tsv(self, data, name): """Save DataFrame to a tsv file. @@ -269,6 +309,38 @@ def save_tsv(self, data, name): raise TypeError(f"data must be pd.Data, not type {data_type}.") data.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) + def save_self(self): + fname = self.save_file(self.registry, "registry json") + return fname + + +class InputHarvester: + """Turns a registry file into a lookup table to get previous data.""" + loaders = { + "json": lambda f: load_json(f), + "tsv": lambda f: pd.read_csv(f, delimiter="\t"), + "img": lambda f: nib.load(f), + } + + def __init__(self, path): + self._full_path = path + self._base_dir = op.dirname(path) + self._registry = load_json(path) + + def get_file_path(self, description): + if description in self._registry.keys(): + return op.join(self._base_dir, self._registry[description]) + else: + return None + + def get_file_contents(self, description): + for ftype, loader in InputHarvester.loaders.items(): + if ftype in description: + return loader(self.get_file_path(description)) + # Since we restrict to just these three types, this function should + # always return. If more types are added, the loaders dict will + # need to be updated with an appopriate loader + def get_fields(name): """Identify all fields in an unformatted string. diff --git a/tedana/metrics/collect.py b/tedana/metrics/collect.py index 0e1b56bbb..bd2dc6b49 100644 --- a/tedana/metrics/collect.py +++ b/tedana/metrics/collect.py @@ -523,15 +523,7 @@ def get_metadata(comptable): ), }, } - if "original_rationale" in comptable: - metric_metadata["original_rationale"] = { - "LongName": "Original rationale", - "Description": ( - "The reason for the original classification. " - "Please see tedana's documentation for information about " - "possible rationales." - ), - } + if "classification" in comptable: metric_metadata["classification"] = { "LongName": "Component classification", @@ -545,13 +537,19 @@ def get_metadata(comptable): ), }, } + if "classification_tags" in comptable: + metric_metadata["classification_tags"] = { + "LongName": "Component classification tags", + "Description": ( + "A single tag or a comma separated list of tags to describe why a component received its classification" + ), + } if "rationale" in comptable: metric_metadata["rationale"] = { "LongName": "Rationale for component classification", "Description": ( "The reason for the original classification. " - "Please see tedana's documentation for information about " - "possible rationales." + "This column label was replaced with classification_tags in late 2021" ), } if "kappa ratio" in comptable: diff --git a/tedana/reporting/static_figures.py b/tedana/reporting/static_figures.py index 41615636a..185f8cf6f 100644 --- a/tedana/reporting/static_figures.py +++ b/tedana/reporting/static_figures.py @@ -42,7 +42,9 @@ def _trim_edge_zeros(arr): return arr[bounding_box] -def carpet_plot(optcom_ts, denoised_ts, hikts, lowkts, mask, io_generator, gscontrol=None): +def carpet_plot( + optcom_ts, denoised_ts, hikts, lowkts, mask, io_generator, gscontrol=None +): """Generate a set of carpet plots for the combined and denoised data. Parameters @@ -121,7 +123,9 @@ def carpet_plot(optcom_ts, denoised_ts, hikts, lowkts, mask, io_generator, gscon title="Optimally Combined Data (Pre-GSR)", ) fig.tight_layout() - fig.savefig(os.path.join(io_generator.out_dir, "figures", "carpet_optcom_nogsr.svg")) + fig.savefig( + os.path.join(io_generator.out_dir, "figures", "carpet_optcom_nogsr.svg") + ) if (gscontrol is not None) and ("mir" in gscontrol): mir_denoised_img = io_generator.get_name("mir denoised img") @@ -134,7 +138,9 @@ def carpet_plot(optcom_ts, denoised_ts, hikts, lowkts, mask, io_generator, gscon title="Denoised Data (Post-MIR)", ) fig.tight_layout() - fig.savefig(os.path.join(io_generator.out_dir, "figures", "carpet_denoised_mir.svg")) + fig.savefig( + os.path.join(io_generator.out_dir, "figures", "carpet_denoised_mir.svg") + ) mir_denoised_img = io_generator.get_name("ICA accepted mir denoised img") fig, ax = plt.subplots(figsize=(14, 7)) @@ -146,7 +152,9 @@ def carpet_plot(optcom_ts, denoised_ts, hikts, lowkts, mask, io_generator, gscon title="High-Kappa Data (Post-MIR)", ) fig.tight_layout() - fig.savefig(os.path.join(io_generator.out_dir, "figures", "carpet_accepted_mir.svg")) + fig.savefig( + os.path.join(io_generator.out_dir, "figures", "carpet_accepted_mir.svg") + ) def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): @@ -193,17 +201,24 @@ def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): expl_text = "" # Remove trailing ';' from rationale column - comptable["rationale"] = comptable["rationale"].str.rstrip(";") + #comptable["rationale"] = comptable["rationale"].str.rstrip(";") for compnum in comptable.index.values: if comptable.loc[compnum, "classification"] == "accepted": line_color = "g" - expl_text = "accepted" + expl_text = ( + "accepted reason(s): " + comptable.loc[compnum, "classification_tags"] + ) elif comptable.loc[compnum, "classification"] == "rejected": line_color = "r" - expl_text = "rejection reason(s): " + comptable.loc[compnum, "rationale"] + expl_text = ( + "rejected reason(s): " + comptable.loc[compnum, "classification_tags"] + ) + elif comptable.loc[compnum, "classification"] == "ignored": line_color = "k" - expl_text = "ignored reason(s): " + comptable.loc[compnum, "rationale"] + expl_text = ( + "ignored reason(s): " + comptable.loc[compnum, "classification_tags"] + ) else: # Classification not added # If new, this will keep code running diff --git a/tedana/resources/config/outputs.json b/tedana/resources/config/outputs.json index e8acea001..0574115f8 100644 --- a/tedana/resources/config/outputs.json +++ b/tedana/resources/config/outputs.json @@ -175,6 +175,18 @@ "orig": "ica_metrics", "bidsv1.5.0": "desc-tedana_metrics" }, + "ICA cross component metrics json": { + "orig": "ica_cross_component_metrics", + "bidsv1.5.0": "desc-ICA_cross_component_metrics" + }, + "ICA status table tsv": { + "orig": "ica_status_table", + "bidsv1.5.0": "desc-ICA_status_table" + }, + "ICA decision tree json": { + "orig": "ica_decision_tree", + "bidsv1.5.0": "desc-ICA_decision_tree" + }, "global signal time series tsv": { "orig": "global_signal_ts", "bidsv1.5.0": "desc-globalSignal_timeseries" @@ -186,5 +198,9 @@ "ICA orthogonalized mixing tsv": { "orig": "ica_orth_mixing", "bidsv1.5.0": "desc-ICAOrth_mixing" + }, + "registry json": { + "orig": "registry", + "bidsv1.5.0": "desc-tedana_registry" } } diff --git a/tedana/resources/decision_trees/invalid_kundu_bkup.json b/tedana/resources/decision_trees/invalid_kundu_bkup.json new file mode 100644 index 000000000..19d6c650b --- /dev/null +++ b/tedana/resources/decision_trees/invalid_kundu_bkup.json @@ -0,0 +1,267 @@ +{ + "tree_id": "kundu_MEICA27_decision_tree", + "info": "Following the full decision tree designed by Prantik Kundu", + "report": "This is based on the minimal criteria of the original MEICA decision tree without the more agressive noise removal steps", + "refs": "Kundu 2013", + "necessary_metrics": [ + "kappa", + "rho", + "countsigFS0", + "countsigFT2", + "dice_FS0", + "dice_FT2", + "signal-noise_t", + "variance explained", + "d_table_score" + ], + "nodes": [ + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "unclassified", + "decide_comps": "all" + }, + "kwargs": { + "log_extra_info": "Initializing all classifications as unclassified and all rationales as blank", + "log_extra_report": "", + "clear_rationale": true + } + }, + { + "functionname": "metric1_greaterthan_metric2", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "metric1": "rho", + "metric2": "kappa" + }, + "kwargs": { + "log_extra_info": "Reject if Kappa>Rho", + "log_extra_report": "", + "metric2_scale": 1 + } + }, + { + "functionname": "metric1_greaterthan_metric2", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "metric1": "countsigFS0", + "metric2": "countsigFT2" + }, + "kwargs": { + "log_extra_info": "Reject if countsig_in S0clusters > T2clusters", + "log_extra_report": "", + "metric2_scale": 1 + } + }, + { + "functionname": "metric1_greaterthan_metric2", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "metric1": "dice_FS0", + "metric2": "dice_FT2" + }, + "kwargs": { + "log_extra_info": "Reject if DICE S0>T2", + "log_extra_report": "", + "metric2_scale": 1 + } + }, + { + "functionname": "metric1_greaterthan_metric2", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "metric1": 0, + "metric2": "signal-noise_t" + }, + "kwargs": { + "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0", + "log_extra_report": "", + "metric2_scale": 1 + } + }, + { + "functionname": "kappa_rho_elbow_cutoffs_kundu", + "parameters": { + "ifTrue": "provisionalaccept", + "ifFalse": "provisionalreject", + "decide_comps": "unclassified", + "n_echos": null + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "classification_exists", + "parameters": { + "ifTrue": "nochange", + "ifFalse": "ignored", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "class_comp_exists": "provisionalaccept" + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "meanmetricrank_and_variance_greaterthan_thresh", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "n_vols": null + }, + "kwargs": { + "high_perc": 90, + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "lowvariance_highmeanmetricrank_lowkappa", + "parameters": { + "ifTrue": "ignored", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "n_echos": null, + "n_vols": null + }, + "kwargs": { + "low_perc": 25, + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "classification_exists", + "parameters": { + "ifTrue": "nochange", + "ifFalse": "accepted", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "class_comp_exists": "provisionalreject" + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "highvariance_highmeanmetricrank_highkapparatio", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "n_echos": null, + "n_vols": null + }, + "kwargs": { + "prev_X_steps": 3, + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "highvariance_highmeanmetricrank", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "n_echos": null, + "n_vols": null + }, + "kwargs": { + "prev_X_steps": 1, + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "highvariance_highmeanmetricrank", + "parameters": { + "ifTrue": "ignored", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "n_echos": null + }, + "kwargs": { + "prev_X_steps": 2, + "high_perc": 100, + "extend_factor": 1, + "recalc_varex_lower_thresh": true, + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "highvariance_lowkappa", + "parameters": { + "ifTrue": "ignored", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "n_echos": null + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "accepted", + "decide_comps": "provisionalaccept" + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "", + "clear_rationale": false + } + }, + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "rejected", + "decide_comps": "provisionalreject" + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "", + "clear_rationale": false + } + } + ] +} diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json new file mode 100644 index 000000000..f40fba90f --- /dev/null +++ b/tedana/resources/decision_trees/kundu.json @@ -0,0 +1,427 @@ +{ + "tree_id": "kundu_MEICA27_decision_tree", + "info": "Following the full decision tree designed by Prantik Kundu", + "report": "This is based on the criteria of the original MEICA decision tree", + "refs": "Kundu 2013", + "necessary_metrics": [ + "kappa", + "rho", + "countsigFS0", + "countsigFT2", + "dice_FS0", + "dice_FT2", + "signal-noise_t", + "variance explained", + "d_table_score" + ], + "intermediate_classifications": [ + "provisionalaccept", + "provisionalreject" + ], + "classification_tags": [ + "Likely BOLD", + "Unlikely BOLD", + "Less likely BOLD", + "Low variance", + "Accept borderline", + "No provisional accept" + ], + "nodes": [ + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "unclassified", + "decide_comps": "all" + }, + "kwargs": { + "log_extra_info": "Initializing all classifications as unclassified and all classification tags as blank", + "log_extra_report": "", + "clear_classification_tags": true, + "dont_warn_reclassify": true + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "op": ">", + "left": "rho", + "right": "kappa" + }, + "kwargs": { + "log_extra_info": "Reject if Kappa>Rho", + "log_extra_report": "", + "tag_ifTrue": "Unlikely BOLD" + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "op": ">", + "left": "countsigFS0", + "right": "countsigFT2" + }, + "kwargs": { + "log_extra_info": "Reject if countsig_in S0clusters > T2clusters", + "log_extra_report": "", + "tag_ifTrue": "Unlikely BOLD" + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "op": ">", + "left": "dice_FS0", + "right": "dice_FT2" + }, + "kwargs": { + "log_extra_info": "Reject if DICE S0>T2", + "log_extra_report": "", + "tag_ifTrue": "Unlikely BOLD" + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "op": ">", + "left": 0, + "right": "signal-noise_t" + }, + "kwargs": { + "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0", + "log_extra_report": "", + "tag_ifTrue": "Unlikely BOLD" + } + }, + { + "functionname": "calc_kappa_rho_elbows_kundu", + "parameters": { + "decide_comps": "unclassified" + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "provisionalaccept", + "ifFalse": "provisionalreject", + "decide_comps": "unclassified", + "op": ">", + "left": "kappa", + "right": "kappa_elbow_kundu" + }, + "kwargs": { + "log_extra_info": "Provisionally accept if kappa>elbow and provisionally reject if kappa", + "left": "rho", + "right": "rho_elbow_kundu" + }, + "kwargs": { + "log_extra_info": "Provisionally reject if rho>elbow", + "log_extra_report": "" + } + }, + { + "functionname": "dec_classification_doesnt_exist", + "parameters": { + "new_classification": "accepted", + "decide_comps": [ + "provisionalaccept", + "provisionalreject", + "unclassified" + ], + "class_comp_exists": "provisionalaccept" + }, + "kwargs": { + "tag_ifTrue": "No provisional accept", + "log_extra_info": "If nothing is provisionally accepted by this point, be conservative and accept everything", + "log_extra_report": "" + } + }, + { + "functionname": "calc_varex_thresh", + "parameters": { + "decide_comps": "provisionalaccept", + "thresh_label": "upper", + "percentile_thresh": 90 + }, + "kwargs": { + "log_extra_info": "Calculuate a high variance threshold based on the 90th percentile variance component" + } + }, + { + "functionname": "calc_varex_thresh", + "parameters": { + "decide_comps": "provisionalaccept", + "thresh_label": "lower", + "percentile_thresh": 25 + }, + "kwargs": { + "log_extra_info": "Calculuate a low variance threshold based on the 25th percentile variance component" + } + }, + { + "functionname": "calc_extend_factor", + "parameters": {}, + "kwargs": { + "log_extra_info": "2 if fewer than 90 fMRI volumes, 3 if more than 110 and linear in-between" + }, + "_comment": "This is a scaling number that is used for a few thresholds" + }, + { + "functionname": "calc_max_good_meanmetricrank", + "parameters": { + "decide_comps": "provisionalaccept" + }, + "kwargs": { + "log_extra_info": "Number of provisionalaccept components * extend_factor" + } + }, + { + "functionname": "calc_varex_kappa_ratio", + "parameters": { + "decide_comps": "provisionalaccept" + }, + "kwargs": { + "log_extra_info": "Scaled ratio of variance/kappa" + }, + "_comment": "This is used to calculate the new 'varex kappa ratio' column in the component_table" + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "op": ">", + "left": "d_table_score", + "right": "max_good_meanmetricrank" + }, + "kwargs": { + "op2": ">", + "left2": "variance explained", + "right2": "varex_upper_thresh", + "log_extra_info": "If variance and d_table_scores are high, then reject" + }, + "_comment": "One of several steps that makes it more likely to reject high variance components" + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "accepted", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "op": "<", + "left": "d_table_score", + "right": "max_good_meanmetricrank" + }, + "kwargs": { + "tag_ifTrue": "Low variance", + "op2": "<", + "left2": "variance explained", + "right2": "varex_lower_thresh", + "op3": ">", + "left3": "kappa", + "right3": "kappa_elbow_kundu", + "log_extra_info": "If low variance and good kappa & d_table_scores accept even if rho or other metrics are bad" + } + }, + { + "functionname": "dec_classification_doesnt_exist", + "parameters": { + "new_classification": "accepted", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "class_comp_exists": "provisionalreject" + }, + "kwargs": { + "tag_ifTrue": "Likely BOLD", + "log_extra_info": "If nothing left is provisionalreject, then accept all", + "log_extra_report": "" + } + }, + { + "functionname": "calc_revised_meanmetricrank_guesses", + "parameters": { + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ] + }, + "kwargs": {}, + "_comment": "Add more here" + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "op": ">", + "left": "d_table_score_node17", + "right": "conservative_guess" + }, + "kwargs": { + "tag_ifTrue": "Less likely BOLD", + "op2": ">", + "left2": "varex kappa ratio", + "right2": "extend_factor", + "right2_scale": 2, + "op3": ">", + "left3": "variance explained", + "right3": "varex_upper_thresh", + "right3_scale": "extend_factor", + "log_extra_info": "Reject if a combination of kappa, variance, and other factors are ranked worse than others" + }, + "_comment": "Quirky combination 1 of a bunch of metrics that deal with rejecting some edge cases" + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "op": ">", + "left": "d_table_score_node17", + "right": "num_acc_guess" + }, + "kwargs": { + "tag_ifTrue": "Less likely BOLD", + "right_scale": 0.9, + "op2": ">", + "left2": "variance explained", + "right2": "varex_lower_thresh", + "right2_scale": "extend_factor", + "log_extra_info": "Reject if a combination of variance and ranks of other metrics are worse than others" + }, + "_comment": "Quirky combination 2 of a bunch of metrics that deal with rejecting some edge cases" + }, + { + "functionname": "calc_varex_thresh", + "parameters": { + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "thresh_label": "new_lower", + "percentile_thresh": 25 + }, + "kwargs": { + "log_extra_info": "Calculuate a low variance threshold based on the 25th percentile variance component" + }, + "_comment": "In the original kundu code, this is run only on the first num_acc_guess remaining component... probably sorted for the lowest variance. Adding that functionality here would be messy and unlikely to significantly alter results" + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "op": ">", + "left": "d_table_score_node17", + "right": "num_acc_guess" + }, + "kwargs": { + "tag_ifTrue": "Accept borderline", + "op2": ">", + "left2": "variance explained", + "right2": "varex_new_lower_thresh", + "log_extra_info": "Accept components with a bad d_table_score, but are at the higher end of the remaining variance so more cautious to not remove" + }, + "_comment": "Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh would be lower than it is here. If there are differences in results, might be worth adding a scaling factor" + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalaccept", + "provisionalreject" + ], + "op": "<=", + "left": "kappa", + "right": "kappa_elbow_kundu" + }, + "kwargs": { + "tag_ifTrue": "Accept borderline", + "op2": ">", + "left2": "variance explained", + "right2": "varex_new_lower_thresh", + "log_extra_info": "Accept components above the kappa elbow, but are at the higher end of the remaining variance so more cautious to not remove" + }, + "_comment": "Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh would be lower than it is here. If there are differences in results, might be worth adding a scaling factor" + }, + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "accepted", + "decide_comps": "provisionalaccept" + }, + "kwargs": { + "log_extra_info": "Anything that is still provisionalaccept should be accepted", + "log_extra_report": "", + "tag": "Likely BOLD" + } + }, + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "rejected", + "decide_comps": [ + "provisionalreject", + "unclassified" + ] + }, + "kwargs": { + "log_extra_info": "Anything that is still provisionalreject should be rejected", + "log_extra_report": "", + "tag": "Unlikely BOLD" + }, + "_comment": "According to a comment in the meica 2.7 code, nothing should be provisionalreject by this point." + } + ] +} \ No newline at end of file diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json new file mode 100644 index 000000000..3f5827732 --- /dev/null +++ b/tedana/resources/decision_trees/minimal.json @@ -0,0 +1,210 @@ +{ + "tree_id": "minimal_decision_tree_test1", + "info": "Proposed minimal decision tree", + "report": "This is based on the minimal criteria of the original MEICA decision tree without the more agressive noise removal steps", + "refs": "Kundu 2013; DuPre, Salo, 2021", + "necessary_metrics": [ + "kappa", + "rho", + "countsigFS0", + "countsigFT2", + "dice_FS0", + "dice_FT2", + "signal-noise_t", + "variance explained" + ], + "intermediate_classifications": [ + "provisionalaccept", + "provisionalreject" + ], + "classification_tags": [ + "Likely BOLD", + "Unlikely BOLD", + "Low variance" + ], + "nodes": [ + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "unclassified", + "decide_comps": "all" + }, + "kwargs": { + "log_extra_info": "Initializing all classifications as unclassified and all classification tags as blank", + "log_extra_report": "", + "clear_classification_tags": true, + "dont_warn_reclassify": true + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "op": ">", + "left": "rho", + "right": "kappa" + }, + "kwargs": { + "log_extra_info": "Reject if Kappa", + "left": "countsigFS0", + "right": "countsigFT2" + }, + "kwargs": { + "log_extra_info": "Reject if countsig_in S0clusters > T2clusters", + "log_extra_report": "", + "tag_ifTrue": "Unlikely BOLD" + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "op": ">", + "left": "dice_FS0", + "right": "dice_FT2" + }, + "kwargs": { + "log_extra_info": "Reject if dice S0>T2", + "log_extra_report": "", + "tag_ifTrue": "Unlikely BOLD" + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "op": ">", + "left": 0, + "right": "signal-noise_t" + }, + "kwargs": { + "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0", + "log_extra_report": "", + "tag_ifTrue": "Unlikely BOLD" + } + }, + { + "functionname": "calc_kappa_rho_elbows_kundu", + "parameters": { + "decide_comps": "unclassified" + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "" + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "provisionalaccept", + "ifFalse": "nochange", + "decide_comps": "unclassified", + "op": ">", + "left": "kappa", + "right": "kappa_elbow_kundu" + }, + "kwargs": { + "log_extra_info": "kappa>elbow", + "log_extra_report": "" + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "accepted", + "ifFalse": "nochange", + "decide_comps": "provisionalaccept", + "op": ">", + "left": "kappa", + "right": "rho" + }, + "kwargs": { + "log_extra_info": "If kappa>elbow and kappa>3*rho accept even if rho>elbow", + "log_extra_report": "", + "right_scale": 3, + "tag_ifTrue": "Likely BOLD" + } + }, + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "nochange", + "ifFalse": "provisionalreject", + "decide_comps": [ + "unclassified", + "provisionalaccept" + ], + "op": ">", + "left": "rho", + "right": "rho_elbow_kundu" + }, + "kwargs": { + "log_extra_info": "rho>elbow", + "log_extra_report": "" + } + }, + { + "functionname": "dec_variance_lessthan_thresholds", + "parameters": { + "ifTrue": "accepted", + "ifFalse": "nochange", + "decide_comps": [ + "provisionalreject", + "unclassified" + ] + }, + "kwargs": { + "var_metric": "variance explained", + "log_extra_info": "", + "log_extra_report": "", + "single_comp_threshold": 0.1, + "all_comp_threshold": 1.0, + "tag_ifTrue": "Low variance" + } + }, + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "accepted", + "decide_comps": "provisionalaccept" + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "", + "tag": "Likely BOLD" + } + }, + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "rejected", + "decide_comps": [ + "provisionalreject", + "unclassified" + ] + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "", + "tag": "Unlikely BOLD" + } + } + ] +} \ No newline at end of file diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py new file mode 100644 index 000000000..9e31a591e --- /dev/null +++ b/tedana/selection/ComponentSelector.py @@ -0,0 +1,590 @@ +""" +Functions that include workflows to identify and label +TE-dependent and TE-independent components. +""" +import os.path as op +import inspect +import logging +from pkg_resources import resource_filename +from numpy import asarray +import pandas as pd + +from tedana.selection.selection_utils import ( + clean_dataframe, + confirm_metrics_exist, + log_classification_counts, +) +from tedana.selection import selection_nodes +from tedana.io import load_json +from tedana.utils import get_resource_path + +LGR = logging.getLogger("GENERAL") +RepLGR = logging.getLogger("REPORT") +RefLGR = logging.getLogger("REFERENCES") + +# These are the names of the json files containing decision +# trees that are stored in the ./resouces/decision_trees/ directory +# A user can run the desision tree either using one of these +# names or by giving the full path to a tree in a different +# location +DEFAULT_TREES = ["minimal", "kundu"] + + +class TreeError(Exception): + pass + + +def load_config(tree): + """ + Loads the json file with the decision tree and validates that the + fields in the decision tree are appropriate. + + Parameters + ---------- + tree : :obj:`str` + The named tree or path to a JSON file that defines one + + Returns + ------- + tree : :obj:`dict` + A validated decision tree for the component selection process. + The `dict` has several required fields to describe the entire tree + `tree_id`: :obj:`str` The name of the tree + `info`: :obj:`str` A brief description of the tree for info logging + `report`: :obj:`str` A narrative description of the tree that could be used in report logging + `refs`: :obj:`str` Publications that should be referenced, when this tree is used + `necessary_metrics`: :obj:`list[str]` The metrics in `component_table` that will be used by this tree + `intermediate_classifications`: :obj:`list[str]` User specified component classification labels. 'accepted', 'rejected', and 'unclassified' are defaults that don't need to be included here + `classification_tags`: :obj:`list[str]` Descriptive labels that can be used to explain why a component was accepted or rejected. For example, ["Likely BOLD","Low variance"] + `nodes`: :obj:`list[dict]` Each dictionary includes the information + to run one node in the decision tree. Each node should either be able + to change component classifications (function names starting with dec_) + or calculate values using information from multiple components + (function names starting with calc_) + nodes includes: + `functionname`: :obj:`str` The name of the function to be called + `parameters`: :obj:`dict` Required parameters for the function + The only parameter that is used in all functions is `decidecomps`, + which are the component classifications the function should run on. + Most dec_ functions also include `ifTrue` and `ifFalse` which + define how to to change the classification of a component if the + criteria in the function is true or false. + `kwargs`: :obj:`dict` Optional parameters for the function + """ + + if tree in DEFAULT_TREES: + fname = op.join(get_resource_path(), "decision_trees", tree + ".json") + else: + fname = tree + + try: + dectree = load_json(fname) + except FileNotFoundError: + raise ValueError( + f"Cannot find tree {tree}. Please check your path or use a " + f"default tree ({DEFAULT_TREES})." + ) + except IsADirectoryError: + raise ValueError( + f"Tree {tree} is a directory. Please supply a JSON file or " + f"default tree ({DEFAULT_TREES})." + ) + + return validate_tree(dectree) + + +def validate_tree(tree): + """ + Confirms that provided `tree` is a valid decision tree + + Parameters + ---------- + tree : :obj:`dict` + Ostensible decision tree for the component selection process + + Returns + ------- + tree : :obj:`dict` + Validated decision tree dictionary + + Raises + ------ + TreeError + """ + + # Set the fields that should always be present + err_msg = "" + tree_expected_keys = [ + "tree_id", + "info", + "report", + "refs", + "necessary_metrics", + "intermediate_classifications", + "classification_tags", + "nodes", + ] + defaults = {"selector", "decision_node_idx"} + default_classifications = {"nochange", "accepted", "rejected", "unclassified"} + default_decide_comps = {"all", "accepted", "rejected", "unclassified"} + + # Confirm that the required fields exist + missing_keys = set(tree_expected_keys) - set(tree.keys()) + if missing_keys: + # If there are missing keys, this function may crash before the end. + # End function here with a clear error message rather than adding + # `if assert tree.get()` statements before every section + raise TreeError("\n" + f"Decision tree missing required fields: {missing_keys}") + + # Warn if unused fields exist + unused_keys = set(tree.keys()) - set(tree_expected_keys) - set(["used_metrics"]) + # Make sure reconstruct_from doesn't trigger a warning; hacky, sorry + if "reconstruct_from" in unused_keys: + unused_keys.remove("reconstruct_from") + + if unused_keys: + LGR.warning(f"Decision tree includes fields that are not used or logged {unused_keys}") + + # Combine the default classifications with the user inputted classifications + all_classifications = set(tree.get("intermediate_classifications")) | set( + default_classifications + ) + all_decide_comps = set(tree.get("intermediate_classifications")) | set(default_decide_comps) + for i, node in enumerate(tree["nodes"]): + # Make sure each function defined in a node exists + try: + fcn = getattr(selection_nodes, node.get("functionname")) + sig = inspect.signature(fcn) + except (AttributeError, TypeError): + err_msg += "Node {} has invalid functionname parameter: {}\n".format( + i, node.get("functionname") + ) + continue + + # Get a functions parameters and compare to parameters defined in the tree + pos = set([p for p, i in sig.parameters.items() if i.default is inspect.Parameter.empty]) + kwargs = set(sig.parameters.keys()) - pos + + missing_pos = pos - set(node.get("parameters").keys()) - defaults + if len(missing_pos) > 0: + err_msg += "Node {} is missing required parameter(s): {}\n".format(i, missing_pos) + + invalid_params = set(node.get("parameters").keys()) - pos + if len(invalid_params) > 0: + err_msg += "Node {} has additional, undefined required parameters: {}\n".format( + i, invalid_params + ) + + invalid_kwargs = set(node.get("kwargs").keys()) - kwargs + if len(invalid_kwargs) > 0: + err_msg += ( + "Node {} has additional, undefined optional parameters (kwargs): {}\n".format( + i, invalid_kwargs + ) + ) + + # Gather all the classification labels used in each tree both for + # changing classifications and for decide_comps which defines which + # component classifications to use in each node then make sure these + # classifications are in the predefined list. + # It's important to require a predefined list of classifications + # beccuse spelling inconsistencies cause problems and are hard to + # catch. For example if a node is applied to "provisionalaccept" + # nodes, but a previous node classified components as + # "provisionalaccepted" they won't be included and there might not + # be any other warnings + compclass = set() + if "ifTrue" in node.get("parameters").keys(): + tmp_comp = node["parameters"]["ifTrue"] + if isinstance(tmp_comp, str): + tmp_comp = [tmp_comp] + compclass = compclass | set(tmp_comp) + if "ifFalse" in node.get("parameters").keys(): + tmp_comp = node["parameters"]["ifFalse"] + if isinstance(tmp_comp, str): + tmp_comp = [tmp_comp] + compclass = compclass | set(tmp_comp) + nonstandard_labels = compclass.difference(all_classifications) + if nonstandard_labels: + LGR.warning( + "{} in node {} of the decision tree includes a classification label that was not predefined".format( + compclass, i + ) + ) + if "decide_comps" in node.get("parameters").keys(): + tmp_comp = node["parameters"]["decide_comps"] + if isinstance(tmp_comp, str): + tmp_comp = [tmp_comp] + compclass = set(tmp_comp) + nonstandard_labels = compclass.difference(all_decide_comps) + if nonstandard_labels: + LGR.warning( + f"{compclass} in node {i} of the decision tree includes a classification label that was not predefined" + ) + + tagset = set() + if "tag_ifTrue" in node.get("kwargs").keys(): + tagset.update(set([node["kwargs"]["tag_ifTrue"]])) + if "tag_ifFalse" in node.get("kwargs").keys(): + tagset.update(set([node["kwargs"]["tag_ifFalse"]])) + if "tag" in node.get("kwargs").keys(): + tagset.update(set([node["kwargs"]["tag"]])) + undefined_classification_tags = tagset.difference(set(tree.get("classification_tags"))) + if undefined_classification_tags: + LGR.warning( + f"{tagset} in node {i} of the decision tree includes a classification tag that was not predefined" + ) + + if err_msg: + raise TreeError("\n" + err_msg) + + return tree + + +class ComponentSelector: + """ + Classifies components based on specified `tree` when the class is initialized + and then the `select` function is called. + The expected output of running a decision tree is that every component + will be classified as 'accepted', or 'rejected'. + + The selection process uses previously calculated parameters listed in + `component_table` for each ICA component such as Kappa (a T2* weighting metric), + Rho (an S0 weighting metric), and variance explained. See tedana.metrics + for more detail on the calculated metrics + + Parameters + ---------- + tree : :obj:`str` + A json file name without the '.json' extension that contains the decision tree to use + component_table : (C x M) :obj:`pandas.DataFrame` + Component metric table. One row for each component, with a column for + each metric; the index should be the component number! + user_notes : :obj:`str, optional` + Additional user notes about decision tree + path : :obj:`str, optional` + The directory path where `tree` is located. + If None, then look for `tree` within ./selection/data + in the tedana code directory. default=None + + Additional Parameters + --------------------- + Any parameter that is used by a decision tree node function can be passed + as a parameter of ComponentSelector class initialization function or can be + included in the json file that defines the decision tree. If a parameter + is set in the json file, that will take precedence. As a style rule, a + parameter that is the same regardless of the inputted data should be + defined in the decision tree json file. A parameter that is dataset specific + should be passed through the initialization function. Parameters that may need + to be passed through the class include: + + n_echos : :obj:`int, optional` + Number of echos in multi-echo fMRI data + n_vols: :obj:`int` + Number of volumes (time points) in the fMRI data + + + Returns + ------- + component_table : :obj:`pandas.DataFrame` + Updated component table with two extra columns: + classifications : :obj:`str` (i.e., accepted, rejected) for each component + classification_tags : :obj:`list[str]` descriptions explaining reasons for classification + cross_component_metrics : :obj:`Dict` + Metrics that are each a single value calculated across + components. For example, kappa and rho. + component_status_table : :obj:`pandas.DataFrame` + A dataframe where each column lists the classification status of + each component after each node was run + Information that was stored in the tree json file. This includes: + tree, classification_tags, intermediate_classifications, necessary_metrics + nodes : :obj:`list[dict]` + Nodes used in decision tree. This includes the decision tree dict + from the json file in the `tree` input. For every element in the list + there is an added dict key `outputs` which includes key information from + when the function was run. Some of this information is function-specific, + but there are common elements across most or all: + decison_node_idx : :obj:`int` + The decision tree functions are run as part of an ordered list. + This is the positional index for when this function was run + as part of this list. + used_metrics : :obj:`list[str]` + A list of the metrics used in a node of the decision tree + used_cross_component_metrics : :obj:`list[str]` + A list of cross component metrics used in the node of a decision tree + node_label : :obj:`str` + A brief label for what happens in this node that can be used in a decision + tree summary table or flow chart. + numTrue, numFalse : :obj:`int` + For decision (dec_) functions, the number of components that were classified + as true or false respectively in this decision tree step. + calc_cross_comp_metrics : :obj:`list[str]` + For calculation (calc_) functions, cross component metrics that were + calculated in this function. When this is included, each of those + metrics and the calculated values are also distinct keys in 'outputs'. + While cross_component_metrics does not include where each component + was calculated, that information is stored here. + current_node_idx : :obj:`int` + The index for the current node, which should be the last node in the decision tree + + Notes + ----- + """ + + def __init__(self, tree, component_table, cross_component_metrics={}, status_table=None): + """ + Initialize the class using the info specified in the json file `tree` + + Any optional variables defined in the function call will be added to + the class structure. Several trees expect n_echos to be defined. + The full kundu tree also require n_vols (number of volumes) to be + defined. An example initialization with these options would look like + selector = ComponentSelector(tree, comptable, n_echos=n_echos, + n_vols=n_vols) + + Returns + ------- + The class structure with the following fields loaded from tree: + nodes, necessary_metrics, intermediate_classificaitons, + classification_tags, + Adds to the class structure: + component_status_table: empty dataframe + cross_component_metrics: empty dict + used_metrics: empty set + """ + self.tree_name = tree + + self.__dict__.update(cross_component_metrics) + self.cross_component_metrics = cross_component_metrics + + """Construct an un-executed selector""" + self.component_table = component_table.copy() + + # To run a decision tree, each component needs to have an initial classification + # If the classification column doesn't exist, create it and label all components + # as unclassified + if "classification" not in self.component_table: + self.component_table["classification"] = "unclassified" + + self.tree = load_config(self.tree_name) + tree_config = self.tree + + LGR.info("Performing component selection with " + tree_config["tree_id"]) + LGR.info(tree_config.get("info", "")) + RepLGR.info(tree_config.get("report", "")) + RefLGR.info(tree_config.get("refs", "")) + + self.tree["nodes"] = tree_config["nodes"] + self.necessary_metrics = set(tree_config["necessary_metrics"]) + self.intermediate_classifications = tree_config["intermediate_classifications"] + self.classification_tags = set(tree_config["classification_tags"]) + if "used_metrics" not in self.tree.keys(): + self.tree["used_metrics"] = set() + else: + self.tree["used_metrics"] = set(self.tree["used_metrics"]) + + if status_table is None: + self.component_status_table = self.component_table[ + ["Component", "classification"] + ].copy() + self.component_status_table = self.component_status_table.rename( + columns={"classification": "initialized classification"} + ) + self.start_idx = 0 + else: + # Since a status table exists, we need to skip nodes up to the + # point where the last tree finished + self.start_idx = len(tree_config["nodes"]) + self.component_status_table = status_table + + def select(self): + """ + Parse the parameters used to call each function in the component + selection decision tree and run the functions to classify components + + Parameters all defined in class initialization + + Returns + ------- + The following attributes are altered in this function are descibed in + the ComponentSelector class description: + component_table, cross_component_metrics, component_status_table, + cross_component_metrics, used_metrics, nodes (outputs field), + current_node_idx + """ + # TODO: force-add classification tags + if not "classification_tags" in self.component_table.columns: + self.component_table["classification_tags"] = "" + # this will crash the program with an error message if not all + # necessary_metrics are in the comptable + confirm_metrics_exist( + self.component_table, self.necessary_metrics, function_name=self.tree_name + ) + + # for each node in the decision tree + for self.current_node_idx, node in enumerate(self.tree["nodes"][self.start_idx :]): + # parse the variables to use with the function + fcn = getattr(selection_nodes, node["functionname"]) + + params, kwargs = node["parameters"], node["kwargs"] + params = self.check_null(params, node["functionname"]) + kwargs = self.check_null(kwargs, node["functionname"]) + # log the function name and parameters used + LGR.info( + "Step {}: Running function {} with parameters: {}".format( + self.current_node_idx, node["functionname"], {**params, **kwargs} + ) + ) + # run the decision node function + self = fcn(self, **params, **kwargs) + self.tree["used_metrics"].update( + self.tree["nodes"][self.current_node_idx]["outputs"]["used_metrics"] + ) + + # log the current counts for all classification labels + log_classification_counts(self.current_node_idx, self.component_table) + + # move decision columns to end + self.component_table = clean_dataframe(self.component_table) + # warning anything called a necessary metric wasn't used and if + # anything not called a necessary metric was used + self.are_only_necessary_metrics_used() + + self.are_all_components_accepted_or_rejected() + + def add_manual(self, indices, classification): + """Add nodes that will manually classify components + + Parameters + ---------- + indices: list[int] + The indices to manually classify + classification: str + The classification to set the nodes to + """ + self.tree["nodes"].append( + { + "functionname": "manual_classify", + "parameters": { + "new_classification": classification, + "decide_comps": indices, + }, + "kwargs": { + "dont_warn_reclassify": "true", + }, + } + ) + + def check_null(self, params, fcn): + """ + Checks that all required parameters for selection node functions are + attributes in the class. Error if any are undefined + + Returns + ------- + params + The values for the inputted parameters + """ + + for key, val in params.items(): + if val is None: + try: + params[key] = getattr(self, key) + except AttributeError: + raise ValueError( + "Parameter {} is required in node {}, but not defined. ".format(key, fcn) + + "If {} is dataset specific, it should be " + "defined in the ".format(key) + " initialization of " + "ComponentSelector. If it is fixed regardless of dataset, it " + "should be defined in the json file that defines the " + "decision tree." + ) + + return params + + def are_only_necessary_metrics_used(self): + """ + Check if all metrics that are declared as necessary are actually + used and if any used_metrics weren't explicitly declared necessary + If either of these happen, a warning is added to the logger + """ + not_declared = self.tree["used_metrics"] - self.necessary_metrics + not_used = self.necessary_metrics - self.tree["used_metrics"] + if len(not_declared) > 0: + LGR.warning( + f"Decision tree {self.tree_name} used the following metrics that were not declared as necessary: {not_declared}" + ) + if len(not_used) > 0: + LGR.warning( + f"Decision tree {self.tree_name} did not use the following metrics that were declared as necessary: {not_used}" + ) + + def are_all_components_accepted_or_rejected(self): + """ + After the tree has finished executing, check if all component + classifications are either "accepted" or "rejected" + If any other component classifications remain, log a warning + """ + component_classifications = set(self.component_table["classification"].to_list()) + nonfinal_classifications = component_classifications.difference({"accepted", "rejected"}) + if nonfinal_classifications: + for nonfinal_class in nonfinal_classifications: + numcomp = asarray(self.component_table["classification"] == nonfinal_class).sum() + LGR.warning( + f"{numcomp} components have a final classification of {nonfinal_class}. At the end of the selection process, all components are expected to be 'accepted' or 'rejected'" + ) + + @property + def n_comps(self): + return len(self.component_table) + + @property + def n_bold_comps(self): + ct = self.component_table + return len(ct[ct.classification == "accepted"]) + + @property + def accepted_comps(self): + return self.component_table["classification"] == "accepted" + + @property + def rejected_comps(self): + return self.component_table["classification"] == "rejected" + + @property + def is_final(self): + """Whether the classifications are all acccepted/rejected""" + return (self.accepted_comps.sum() + self.rejected_comps.sum()) > self.n_comps + + @property + def mixing(self): + return self.mixing_matrix + + @property + def oc_data(self): + return self.oc_data + + def to_files(self, io_generator): + """Convert this selector into component files + + Parameters + ---------- + io_generator: tedana.io.OutputGenerator + The output generator to use for filename generation and saving. + """ + comptable_fname = io_generator.save_file( + self.component_table, + "ICA metrics tsv", + ) + xcomp_fname = io_generator.save_file( + self.cross_component_metrics, + "ICA cross component metrics json", + ) + status_fname = io_generator.save_file( + self.component_status_table, + "ICA status table tsv", + ) + tree_fname = io_generator.save_file( + self.tree, + "ICA decision tree json", + ) diff --git a/tedana/selection/__init__.py b/tedana/selection/__init__.py index b689de6b0..6fb7795af 100644 --- a/tedana/selection/__init__.py +++ b/tedana/selection/__init__.py @@ -1,7 +1,7 @@ # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: -from .tedica import kundu_selection_v2, manual_selection +from .tedica import automatic_selection, manual_selection from .tedpca import kundu_tedpca __all__ = ["kundu_tedpca", "kundu_selection_v2", "manual_selection"] diff --git a/tedana/selection/_utils.py b/tedana/selection/_utils.py deleted file mode 100644 index 906567cac..000000000 --- a/tedana/selection/_utils.py +++ /dev/null @@ -1,120 +0,0 @@ -""" -Utility functions for tedana.selection -""" -import logging - -import numpy as np - -LGR = logging.getLogger("GENERAL") -RepLGR = logging.getLogger("REPORT") -RefLGR = logging.getLogger("REFERENCES") - - -def clean_dataframe(comptable): - """ - Reorder columns in component table so "rationale" and "classification" are - last and remove trailing semicolons from rationale column. - """ - cols_at_end = ["classification", "rationale"] - comptable = comptable[ - [c for c in comptable if c not in cols_at_end] + [c for c in cols_at_end if c in comptable] - ] - comptable["rationale"] = comptable["rationale"].str.rstrip(";") - return comptable - - -def getelbow_cons(arr, return_val=False): - """ - Elbow using mean/variance method - conservative - - Parameters - ---------- - arr : (C,) array_like - Metric (e.g., Kappa or Rho) values. - return_val : :obj:`bool`, optional - Return the value of the elbow instead of the index. Default: False - - Returns - ------- - :obj:`int` or :obj:`float` - Either the elbow index (if return_val is True) or the values at the - elbow index (if return_val is False) - """ - if arr.ndim != 1: - raise ValueError("Parameter arr should be 1d, not {0}d".format(arr.ndim)) - - if not arr.size: - raise ValueError( - "Empty array detected during elbow calculation. " - "This error happens when getelbow_cons is incorrectly called on no components. " - "If you see this message, please open an issue at " - "https://github.com/ME-ICA/tedana/issues with the full traceback and any data " - "necessary to reproduce this error, so that we create additional data checks to " - "prevent this from happening." - ) - - arr = np.sort(arr)[::-1] - nk = len(arr) - temp1 = [ - (arr[nk - 5 - ii - 1] > arr[nk - 5 - ii : nk].mean() + 2 * arr[nk - 5 - ii : nk].std()) - for ii in range(nk - 5) - ] - ds = np.array(temp1[::-1], dtype=np.int) - dsum = [] - c_ = 0 - for d_ in ds: - c_ = (c_ + d_) * d_ - dsum.append(c_) - e2 = np.argmax(np.array(dsum)) - elind = np.max([getelbow(arr), e2]) - - if return_val: - return arr[elind] - else: - return elind - - -def getelbow(arr, return_val=False): - """ - Elbow using linear projection method - moderate - - Parameters - ---------- - arr : (C,) array_like - Metric (e.g., Kappa or Rho) values. - return_val : :obj:`bool`, optional - Return the value of the elbow instead of the index. Default: False - - Returns - ------- - :obj:`int` or :obj:`float` - Either the elbow index (if return_val is True) or the values at the - elbow index (if return_val is False) - """ - if arr.ndim != 1: - raise ValueError("Parameter arr should be 1d, not {0}d".format(arr.ndim)) - - if not arr.size: - raise ValueError( - "Empty array detected during elbow calculation. " - "This error happens when getelbow is incorrectly called on no components. " - "If you see this message, please open an issue at " - "https://github.com/ME-ICA/tedana/issues with the full traceback and any data " - "necessary to reproduce this error, so that we create additional data checks to " - "prevent this from happening." - ) - - arr = np.sort(arr)[::-1] - n_components = arr.shape[0] - coords = np.array([np.arange(n_components), arr]) - p = coords - coords[:, 0].reshape(2, 1) - b = p[:, -1] - b_hat = np.reshape(b / np.sqrt((b**2).sum()), (2, 1)) - proj_p_b = p - np.dot(b_hat.T, p) * np.tile(b_hat, (1, n_components)) - d = np.sqrt((proj_p_b**2).sum(axis=0)) - k_min_ind = d.argmax() - - if return_val: - return arr[k_min_ind] - else: - return k_min_ind diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py new file mode 100644 index 000000000..932210215 --- /dev/null +++ b/tedana/selection/selection_nodes.py @@ -0,0 +1,1602 @@ +""" +Functions that will be used as steps in a decision tree +""" +import logging +import numpy as np +import pandas as pd + +# from scipy import stats + +from scipy.stats import scoreatpercentile +from tedana.stats import getfbounds +from tedana.selection.selection_utils import ( + confirm_metrics_exist, + selectcomps2use, + log_decision_tree_step, + change_comptable_classifications, + getelbow, + get_extend_factor, + kappa_elbow_kundu, + # get_new_meanmetricrank, +) +from tedana.metrics.dependence import generate_decision_table_score + +LGR = logging.getLogger("GENERAL") +RepLGR = logging.getLogger("REPORT") +RefLGR = logging.getLogger("REFERENCES") + +decision_docs = { + "selector": """\ +selector: :obj:`tedana.selection.ComponentSelector` + This structure contains most of the information needed to execute each + decision node function and to store the ouput of the function. The class + description has full details. Key elements include: component_table: + The metrics for each component, and the classification + labels and tags; cross_component_metrics: Values like the kappa and rho + elbows that are used to create decision criteria; nodes: Information on + the function calls for each step in the decision tree; and + current_node_idx: which is the ordered index for when a function is + called in the decision tree\ +""", + "ifTrueFalse": """\ +ifTrue, ifFalse: :obj:`str` + If the condition in this step is true or false, give the component + the label in this string. Options are 'accepted', 'rejected', + 'nochange', or intermediate_classification labels predefined in the + decision tree. If 'nochange' then don't change the current component + classification\ +""", + "decide_comps": """\ +decide_comps: :obj:`str` or :obj:`list[str]` + This is string or a list of strings describing what classifications + of components to operate on, using default or intermediate_classification + labels. For example: decide_comps='unclassified' means to operate only on + unclassified components. The label 'all' will operate on all components + regardess of classification.\ +""", + "log_extra": """\ +log_extra_report, log_extra_info: :obj:`str` + Text for each function call is automatically placed in the logger output + In addition to that text, the text in these these strings will also be + included in the logger with the report or info codes respectively. + These might be useful to give a narrative explanation of why a step was + parameterized a certain way. default="" (no extra logging)\ +""", + "only_used_metrics": """\ +only_used_metrics: :obj:`bool` + If true, this function will only return the names of the comptable metrics + that will be used when this function is fully run. default=False\ +""", + "custom_node_label": """\ +custom_node_label: :obj:`str` + A brief label for what happens in this node that can be used in a decision +tree summary table or flow chart. If custom_node_label is not empty, then the +text in this parameter is used instead of the text would be automatically +assigned within the function call default=""\ +""", + "tag_ifTrueFalse": """\ +tag_ifTrue, tag_ifFalse: :obj:`str` + A string containing a label in classification_tags that will be added to + the classification_tags column in component_table if a component is + classified as true or false. default=None +""", + "basicreturns": """\ +selector: :obj:`tedana.selection.ComponentSelector` + The key fields that will be changed in selector are the component + classifications and tags in component_table or a new metric that is + added to cross_component_metrics. The output field for the current + node will also be updated to include relevant information including + the use_metrics of the node, and the numTrue and numFalse components + the call to the node's function.\ +""", + "extend_factor": """\ +extend_factor: :obj:`float` + A scaler used to set the threshold for the mean rank metric + \ + """, + "restrict_factor": """\ +restrict_factor: :obj:`float` + A scaler used to set the threshold for the mean rank metric + \ + """, + "prev_X_steps": """\ +prev_X_steps: :obj:`int` + Search for components with a classification label in the current or the previous X steps in + the decision tree + \ + """, +} + + +def manual_classify( + selector, + decide_comps, + new_classification, + clear_classification_tags=False, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, + tag=None, + dont_warn_reclassify=False, +): + """ + Explicitly assign a classifictation, defined in new_classification, + to all the components in decide_comps. + + Parameters + ---------- + {selector} + {decide_comps} + new_classification: :obj: `str` + Assign all components identified in decide_comps the classification + in new_classification. Options are 'unclassified', 'accepted', + 'rejected', or intermediate_classification labels predefined in the + decision tree + clear_classification_tags: :obj: `bool` + If True, reset all values in the 'classification_tags' column to empty + strings. This also can create the classification_tags column if it + does not already exist + If False, do nothing. + tag: :obj: `str` + A classification tag to assign to all components being reclassified. + This should be one of the tags defined by classification_tags in + the decision tree specification + dont_warn_reclassify: :obj:`bool` + By default, if this function changes a component classification from accepted or + rejected to something else, it gives a warning, since those should be terminal + classifications. If this is True, that warning is suppressed. + (Useful if manual_classify is used to reset all labels to unclassified). + default=False + {log_extra} + {custom_node_label} + {only_used_metrics} + + + Returns + ------- + {basicreturns} + + Note + ---- + This was designed with three use + cases in mind: + 1. Set the classifications of all components to unclassified for the first + node of a decision tree. clear_classification_tags=True is recommended for + this use case + 2. Shift all components between classifications, such as provisionalaccept + to accepted for the penultimate node in the decision tree. + 3. Manually re-classify components by number based on user observations. + + Unlike other decision node functions, ifTrue and ifFalse are not inputs + since the same classification is assigned to all components listed in + decide_comps + """ + + # predefine all outputs that should be logged + outputs = { + "decision_node_idx": selector.current_node_idx, + "used_metrics": set(), + "node_label": None, + "numTrue": None, + "numFalse": None, + } + + if only_used_metrics: + return outputs["used_metrics"] + + ifTrue = new_classification + ifFalse = "nochange" + + function_name_idx = "Step {}: manual_classify".format((selector.current_node_idx)) + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + outputs["node_label"] = "Set " + str(decide_comps) + " to " + new_classification + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + comps2use = selectcomps2use(selector, decide_comps) + + if not comps2use: + log_decision_tree_step(function_name_idx, comps2use, decide_comps=decide_comps) + outputs["numTrue"] = 0 + outputs["numFalse"] = 0 + else: + decision_boolean = pd.Series(True, index=comps2use) + selector, outputs["numTrue"], outputs["numFalse"] = change_comptable_classifications( + selector, + ifTrue, + ifFalse, + decision_boolean, + tag_ifTrue=tag, + dont_warn_reclassify=dont_warn_reclassify, + ) + # outputs["numTrue"] = decision_boolean.sum() + # outputs["numFalse"] = np.logical_not(decision_boolean).sum() + + log_decision_tree_step( + function_name_idx, + comps2use, + numTrue=outputs["numTrue"], + numFalse=outputs["numFalse"], + ifTrue=ifTrue, + ifFalse=ifFalse, + ) + + if clear_classification_tags: + selector.component_table["classification_tags"] = "" + LGR.info(function_name_idx + " component classification tags are cleared") + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +manual_classify.__doc__ = manual_classify.__doc__.format(**decision_docs) + + +def dec_left_op_right( + selector, + ifTrue, + ifFalse, + decide_comps, + op, + left, + right, + left_scale=1, + right_scale=1, + op2=None, + left2=None, + right2=None, + left2_scale=1, + right2_scale=1, + op3=None, + left3=None, + right3=None, + left3_scale=1, + right3_scale=1, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, + tag_ifTrue=None, + tag_ifFalse=None, +): + """ + Tests a relationship between (left_scale*)left and (right_scale*right) + using an operator, like >, defined with op + This can be used to directly compare any 2 metrics and use that info + to change component classification. If either metric is a number, + this can also compare a metric against a fixed threshold. + + Parameters + ---------- + {selector} + {ifTrueFalse} + {decide_comps} + op: :ojb:`str` + Must be one of: ">", ">=", "==", "<=", "<" + Applied the user defined operator to left op right + left, right: :obj:`str` or :obj:`float` + The labels for the two metrics to be used for comparision. + for example: left='kappa', right='rho' and op='>' means this + function will test kappa>rho. One of the two can also be a number. + In that case a metric would be compared against a fixed threshold. + For example left='T2fitdiff_invsout_ICAmap_Tstat', right=0, and op='>' + means this function will test T2fitdiff_invsout_ICAmap_Tstat>0 + left_scale, right_scale: :obj:`float`, optional + Multiply the left or right metrics value by a constant. For example + if left='kappa', right='rho', right_scale=2, and op='>' this tests + kappa>(2*rho). These also be a string that labels a value in + cross_component_metrics, since those will resolve to a single value. + This cannot be a label for a component_table column since that would + output a different value for each component. default=1 + op2: :ojb:`str`, optional + left2, right2, left3, right3: :obj:`str` or :obj:`float`, optional + left2_scale, right2_scale, left3_scale, right3_scale: :obj:`float`, optional + This function can also be used to calculate the intersection of two or three + boolean statements. If op2, left2, and right2 are defined then + this function returns + (left_scale*)left op (right_scale*right) AND (left2_scale*)left2 op2 (right2_scale*right2) + if the "3" parameters are also defined then it's the intersection of all 3 statements + {log_extra} + {custom_node_label} + {only_used_metrics} + {tag_ifTrueFalse} + + Returns + ------- + {basicreturns} + + Note + ---- + This function is ideally run with one boolean statement at a time so that + the result of each boolean is logged. For example, it's better to test + kappa>kappa_elbow and rho>rho_elbow with two separate calls to this function + so that the results of each can be easily viewed. That said, particularly for + the original kundu decision tree, if you're making decisions on components with + various classifications based on multiple boolean statements, the decision tree + becomes really messy and the added functionality here is useful. + Combinations of boolean statements only test with "and" and not "or". This is + an intentional decision because, if a classification changes if A or B are true + then the results of each should be logged separately + """ + + # predefine all outputs that should be logged + outputs = { + "decision_node_idx": selector.current_node_idx, + "used_metrics": set(), + "used_cross_component_metrics": set(), + "node_label": None, + "numTrue": None, + "numFalse": None, + } + + function_name_idx = f"Step {selector.current_node_idx}: left_op_right" + # Only select components if the decision tree is being run + if not only_used_metrics: + comps2use = selectcomps2use(selector, decide_comps) + + def identify_used_metric(val, isnum=False): + """ + Parse the left or right values or scalers to see if they are an + existing used_metric or cross_component_metric + If the value already a number, no parse would be needed + + This is also used on left_scale and right_scale to convert + a value in cross_component_metrics to a number. Set the isnum + flag to true for those inputs and this will raise an error + if a number isn't loaded + """ + orig_val = val + if isinstance(val, str): + if val in selector.component_table.columns: + outputs["used_metrics"].update([val]) + elif val in selector.cross_component_metrics: + outputs["used_cross_component_metrics"].update([val]) + val = selector.cross_component_metrics[val] + # If decision tree is being run, then throw errors or messages + # if a component doesn't exist. If this is just getting a list + # of metrics to be used, then don't bring up warnings + elif not only_used_metrics: + if not comps2use: + LGR.info( + f"{function_name_idx}: {val} is neither a metric in " + "selector.component_table nor selector.cross_component_metrics, " + f"but no components with {decide_comps} remain by this node " + "so nothing happens" + ) + else: + raise ValueError( + f"{val} is neither a metric in selector.component_table nor selector.cross_component_metrics" + ) + if isnum: + if not isinstance(val, (int, float)): + raise ValueError(f"{orig_val} must be a number. It is {val}") + return val + + legal_ops = (">", ">=", "==", "<=", "<") + + def confirm_valid_conditional(left_scale, left_val, right_scale, right_val, op_val): + """ + Makes sure the left_scale, left_val, right_scale, right_val, and + operator variables combine into a valid conditional statement + """ + + left_val = identify_used_metric(left_val) + right_val = identify_used_metric(right_val) + left_scale = identify_used_metric(left_scale, isnum=True) + right_scale = identify_used_metric(right_scale, isnum=True) + + if op_val not in legal_ops: + raise ValueError(f"{op_val} is not a binary comparison operator, like > or <") + return left_scale, left_val, right_scale, right_val + + def operator_scale_descript(val_scale, val): + """ + Return a string with one element from the mathematical expression + If val_scale is not 1, include scaling factor (rounded to 2 decimals) + If val is a column in the component_table output the column label + If val is a number (either an inputted number or from cross_component_metrics + include the number (rounded to 2 decimals) + This output is used to great a descriptor for visualizing the decision tree + Unrounded values are saved and rounding here will not affect results + """ + if not isinstance(val, str): + val = str(round(val, 2)) + if val_scale == 1: + return val + else: + return f"{round(val_scale,2)}*{val}" + + left_scale, left, right_scale, right = confirm_valid_conditional( + left_scale, left, right_scale, right, op + ) + descript_left = operator_scale_descript(left_scale, left) + descript_right = operator_scale_descript(right_scale, right) + is_compound = 0 + + # If any of the values for the second boolean statement are set + if left2 or right2 or op2: + # Check if they're all set & use them all or raise an error + if left2 and right2 and op2: + is_compound = 2 + left2_scale, left2, right2_scale, right2 = confirm_valid_conditional( + left2_scale, left2, right2_scale, right2, op2 + ) + descript_left2 = operator_scale_descript(left2_scale, left2) + descript_right2 = operator_scale_descript(right2_scale, right2) + else: + raise ValueError( + "left_op_right can check if a first and second boolean " + "statement are both true. This call includes some but not " + "all variables to define the second boolean statement " + f"left2={left2}, right2={right2}, op2={op2}" + ) + + # If any of the values for the second boolean statement are set + if left3 or right3 or op3: + if is_compound == 0: + raise ValueError( + "left_op_right is includes parameters for a third conditional " + "(left3, right3, or op3) statement without setting the " + "second statement" + ) + # Check if they're all set & use them all or raise an error + if left3 and right3 and op3: + is_compound = 3 + left3_scale, left3, right3_scale, right3 = confirm_valid_conditional( + left3_scale, left3, right3_scale, right3, op3 + ) + descript_left3 = operator_scale_descript(left3_scale, left3) + descript_right3 = operator_scale_descript(right3_scale, right3) + else: + raise ValueError( + "left_op_right can check if three boolean " + "statements are all true. This call includes some but not " + "all variables to define the third boolean statement " + f"left3={left3}, right3={right3}, op3={op3}" + ) + + if only_used_metrics: + return outputs["used_metrics"] + + if custom_node_label: + outputs["node_label"] = custom_node_label + elif is_compound == 0: + outputs["node_label"] = f"{descript_left}{op}{descript_right}" + elif is_compound == 2: + outputs["node_label"] = [ + f"{descript_left}{op}{descript_right} & " f"{descript_left2}{op2}{descript_right2}" + ] + elif is_compound == 3: + outputs["node_label"] = [ + f"{descript_left}{op}{descript_right} & " + f"{descript_left2}{op2}{descript_right2} & " + f"{descript_left3}{op3}{descript_right3}" + ] + + # Might want to add additional default logging to functions here + # The function input will be logged before the function call + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + + def parse_vals(val): + """Get the actual metric values for the selected components or return the constant int or float""" + if isinstance(val, str): + return selector.component_table.loc[comps2use, val].copy() + else: + return val # should be a fixed number + + if not comps2use: + outputs["numTrue"] = 0 + outputs["numFalse"] = 0 + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + ifTrue=outputs["numTrue"], + ifFalse=outputs["numFalse"], + ) + + else: + left1_val = parse_vals(left) + right1_val = parse_vals(right) + decision_boolean = eval(f"(left_scale*left1_val) {op} (right_scale * right1_val)") + if is_compound >= 2: + left2_val = parse_vals(left2) + right2_val = parse_vals(right2) + statement1 = decision_boolean.copy() + statement2 = eval(f"(left2_scale*left2_val) {op2} (right2_scale * right2_val)") + # logical dot product for compound statement + decision_boolean = statement1 * statement2 + if is_compound == 3: + left3_val = parse_vals(left3) + right3_val = parse_vals(right3) + # statement 1 is now the combination of the first two conditional statements + statement1 = decision_boolean.copy() + # statement 2 is now the third conditional statement + statement2 = eval(f"(left3_scale*left3_val) {op2} (right3_scale * right3_val)") + # logical dot product for compound statement + decision_boolean = statement1 * statement2 + + (selector, outputs["numTrue"], outputs["numFalse"],) = change_comptable_classifications( + selector, + ifTrue, + ifFalse, + decision_boolean, + tag_ifTrue=tag_ifTrue, + tag_ifFalse=tag_ifFalse, + ) + # outputs["numTrue"] = np.asarray(decision_boolean).sum() + # outputs["numFalse"] = np.logical_not(decision_boolean).sum() + + log_decision_tree_step( + function_name_idx, + comps2use, + numTrue=outputs["numTrue"], + numFalse=outputs["numFalse"], + ifTrue=ifTrue, + ifFalse=ifFalse, + ) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +dec_left_op_right.__doc__ = dec_left_op_right.__doc__.format(**decision_docs) + + +def dec_variance_lessthan_thresholds( + selector, + ifTrue, + ifFalse, + decide_comps, + var_metric="variance explained", + single_comp_threshold=0.1, + all_comp_threshold=1.0, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, + tag_ifTrue=None, + tag_ifFalse=None, +): + """ + Finds components with variance all_comp_threshold: + while variance[decision_boolean].sum() > all_comp_threshold: + tmpmax = variance == variance[decision_boolean].max() + decision_boolean[tmpmax] = False + (selector, outputs["numTrue"], outputs["numFalse"],) = change_comptable_classifications( + selector, + ifTrue, + ifFalse, + decision_boolean, + tag_ifTrue=tag_ifTrue, + tag_ifFalse=tag_ifFalse, + ) + # outputs["numTrue"] = np.asarray(decision_boolean).sum() + # outputs["numFalse"] = np.logical_not(decision_boolean).sum() + + log_decision_tree_step( + function_name_idx, + comps2use, + numTrue=outputs["numTrue"], + numFalse=outputs["numFalse"], + ifTrue=ifTrue, + ifFalse=ifFalse, + ) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + return selector + + +dec_variance_lessthan_thresholds.__doc__ = dec_variance_lessthan_thresholds.__doc__.format( + **decision_docs +) + + +def calc_kappa_rho_elbows_kundu( + selector, + decide_comps, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, + kappa_only=False, + rho_only=False, +): + """ + Calculates 'elbows' for kappa and rho values across compnents and thresholds + on kappa>kappa_elbow & rhokappa_elbow threshold. default=False + rho_only: :obj:`bool`, optional + Only use the rho>rho_elbow threshold. default=False + + + Returns + ------- + {basicreturns} + + Note + ---- + This script is currently hard coded for a specific way to calculate kappa and rho elbows + based on the method by Kundu in the MEICA v2.7 code. Another elbow calculation would + require a distinct function. Ideally, there can be one elbow function can allows for + some more flexible options + + This also uses all unclassified components as part of the elbow calculation, irregardless + of what is in decide_comps. + """ + + # If kappa_only or rho_only is true kappa or rho might not actually be + # used, but, as of now, both are required to run this function + + outputs = { + "decision_node_idx": selector.current_node_idx, + "node_label": None, + "n_echos": selector.n_echos, + "varex_upper_p": None, + } + if not (kappa_only ^ rho_only): + # if neither or both kappa and rho_only are set + outputs["used_metrics"] = set(["kappa", "rho"]) + outputs["calc_cross_comp_metrics"] = [ + "kappa_elbow_kundu", + "rho_elbow_kundu", + "varex_upper_p", + ] + outputs["kappa_elbow_kundu"] = None + outputs["rho_elbow_kundu"] = None + calc_kappa = True + calc_rho = True + elif kappa_only: + outputs["used_metrics"] = set(["kappa"]) + outputs["calc_cross_comp_metrics"] = [ + "kappa_elbow_kundu", + "varex_upper_p", + ] + outputs["kappa_elbow_kundu"] = None + calc_kappa = True + calc_rho = False + elif rho_only: + outputs["used_metrics"] = set(["rho"]) + outputs["calc_cross_comp_metrics"] = [ + "rho_elbow_kundu", + "varex_upper_p", + ] + outputs["rho_elbow_kundu"] = None + calc_kappa = False + calc_rho = True + + if only_used_metrics: + return outputs["used_metrics"] + + function_name_idx = f"Step {selector.current_node_idx}: calc_kappa_rho_elbows_kundu" + + if ("kappa_elbow_kundu" in selector.cross_component_metrics) and ( + "kappa_elbow_kundu" in outputs["calc_cross_comp_metrics"] + ): + LGR.warning( + f"kappa_elbow_kundu already calculated. Overwriting previous value in {function_name_idx}" + ) + if ("rho_elbow_kundu" in selector.cross_component_metrics) and ( + "rho_elbow_kundu" in outputs["calc_cross_comp_metrics"] + ): + LGR.warning( + f"rho_elbow_kundu already calculated. Overwriting previous value in {function_name_idx}" + ) + if "varex_upper_p" in selector.cross_component_metrics: + LGR.warning( + f"varex_upper_p already calculated. Overwriting previous value in {function_name_idx}" + ) + + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + if not (kappa_only ^ rho_only): + outputs["node_label"] = "Calc Kappa & Rho Elbows" + elif kappa_only: + outputs["node_label"] = "Calc Kappa Elbow" + elif rho_only: + outputs["node_label"] = "Calc Rho Elbow" + + LGR.info( + "Note: This matches the elbow selecton criteria in Kundu's MEICA v2.7" + " except there is a variance threshold that is used for the rho criteria that " + "really didn't make sense and is being excluded." + ) + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + comps2use = selectcomps2use(selector, decide_comps) + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + + unclassified_comps2use = selectcomps2use(selector, "unclassified") + + if (not comps2use) or (not unclassified_comps2use): + if not comps2use: + # outputs["numTrue"] = 0 + # outputs["numFalse"] = 0 + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + # ifTrue=outputs["numTrue"], + # ifFalse=outputs["numFalse"], + ) + if not unclassified_comps2use: + # outputs["numTrue"] = 0 + # outputs["numFalse"] = 0 + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps="unclassified", + # ifTrue=outputs["numTrue"], + # ifFalse=outputs["numFalse"], + ) + else: + if calc_kappa: + outputs["kappa_elbow_kundu"] = kappa_elbow_kundu( + selector.component_table, selector.n_echos + ) + selector.cross_component_metrics["kappa_elbow_kundu"] = outputs["kappa_elbow_kundu"] + + # The first elbow used to be for rho values of the unclassified components + # excluding a few based on differences of variance. Now it's all unclassified + # components + # Upper limit for variance explained is median across components with high + # Kappa values. High Kappa is defined as Kappa above Kappa elbow. + f05, _, f01 = getfbounds(selector.n_echos) + outputs["varex_upper_p"] = np.median( + selector.component_table.loc[ + selector.component_table["kappa"] + > getelbow(selector.component_table["kappa"], return_val=True), + "variance explained", + ] + ) + selector.cross_component_metrics["varex_upper_p"] = outputs["varex_upper_p"] + + ncls = unclassified_comps2use.copy() + for i_loop in range(3): + temp_comptable = selector.component_table.loc[ncls].sort_values( + by=["variance explained"], ascending=False + ) + diff_vals = temp_comptable["variance explained"].diff(-1) + diff_vals = diff_vals.fillna(0) + ncls = temp_comptable.loc[diff_vals < outputs["varex_upper_p"]].index.values + # kappa_elbow was already calculated in kappa_elbow_kundu above + # kappas_nonsig = comptable.loc[comptable["kappa"] < f01, "kappa"] + # kappa_elbow = np.min( + # ( + # getelbow(kappas_nonsig, return_val=True), + # getelbow(comptable["kappa"], return_val=True), + # ) + # ) + if calc_rho: + outputs["rho_elbow_kundu"] = np.mean( + ( + getelbow(selector.component_table.loc[ncls, "rho"], return_val=True), + getelbow(selector.component_table["rho"], return_val=True), + f05, + ) + ) + selector.cross_component_metrics["rho_elbow_kundu"] = outputs["rho_elbow_kundu"] + + # print(('numTrue={}, numFalse={}, numcomps2use={}'.format( + # numTrue, numFalse, len(comps2use)))) + log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +calc_kappa_rho_elbows_kundu.__doc__ = calc_kappa_rho_elbows_kundu.__doc__.format(**decision_docs) + + +def dec_classification_doesnt_exist( + selector, + new_classification, + decide_comps, + class_comp_exists, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, + tag_ifTrue=None, +): + """ + If there are no components with a classification specified in class_comp_exists, + change the classification of all components in decide_comps + + Parameters + ---------- + {selector} + new_classification: :obj: `str` + Assign all components identified in decide_comps the classification + in new_classification. Options are 'unclassified', 'accepted', + 'rejected', or intermediate_classification labels predefined in the + decision tree + {decide_comps} + class_comp_exists: :obj:`str` or :obj:`list[str]` or :obj:`int` or :obj:`list[int]` + This has the same structure options as decide_comps. This function tests + whether any components have the classifications defined in this variable. + {log_extra} + {custom_node_label} + {only_used_metrics} + {tag_ifTrueFalse} + + + Returns + ------- + {basicreturns} + + Note + ---- + This function is useful to end the component selection process early + even if there are additional nodes. For example, in the original + kundu tree, if no components are identified with kappa>elbow and + rho>elbow then, instead of removing everything, it effectively says + something's wrong and conservatively keeps everything. Similarly, + later in the kundu tree, there are several steps deciding how to + classify any remaining provisional components. If none of the + remaining components are "provisionalreject" then it skips those + steps and accepts everything left. + + """ + + # predefine all outputs that should be logged + outputs = { + "decision_node_idx": selector.current_node_idx, + "used_metrics": set(), + "used_cross_component_metrics": set(), + "node_label": None, + "numTrue": None, + "numFalse": None, + } + + if only_used_metrics: + return outputs["used_metrics"] + + function_name_idx = "Step {}: classification_doesnt_exist".format((selector.current_node_idx)) + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + outputs["node_label"] = f"Change {decide_comps} if {class_comp_exists} doesn't exist" + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + ifTrue = new_classification + ifFalse = "nochange" + + comps2use = selectcomps2use(selector, decide_comps) + + do_comps_exist = selectcomps2use(selector, class_comp_exists) + + if (not comps2use) or (do_comps_exist): + outputs["numTrue"] = 0 + # If nothing chanages, then assign the number of components in comps2use to numFalse + outputs["numFalse"] = len(comps2use) + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + ifTrue=outputs["numTrue"], + ifFalse=outputs["numFalse"], + ) + else: # do_comps_exist is None: + # should be True for all components in comps2use + # decision_boolean = pd.Series(data=False, index=np.arange(len(selector.component_table)), dtype=bool) + # decision_boolean.iloc[comps2use] = True + decision_boolean = pd.Series(True, index=comps2use) + + selector, outputs["numTrue"], outputs["numFalse"] = change_comptable_classifications( + selector, + ifTrue, + ifFalse, + decision_boolean, + tag_ifTrue=tag_ifTrue, + ) + + log_decision_tree_step( + function_name_idx, + comps2use, + numTrue=outputs["numTrue"], + numFalse=outputs["numFalse"], + ifTrue=ifTrue, + ifFalse=ifFalse, + ) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +dec_classification_doesnt_exist.__doc__ = dec_classification_doesnt_exist.__doc__.format( + **decision_docs +) + + +def calc_varex_thresh( + selector, + decide_comps, + thresh_label, + percentile_thresh, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, +): + """ + Calculates the variance explained threshold to use in the kundu decision tree. + Will save a high or low percentile threshold depending on highlow_thresh + + Parameters + ---------- + {selector} + {decide_comps} + thresh_label: :obj:`str` + The threshold will be saved in "varex_(thresh_label)_thresh" + In the original kundu decision tree this was either "upper" or "lower" + If passed an empty string,t hen will be saved as "varex_thresh" + percentile_thresh: :obj:`int` + A percentile threshold to apply to components to set the variance threshold. + In the original kundu decision tree this was 90 for varex_upper_thresh and + 25 for varex_lower_thresh + {log_extra} + {custom_node_label} + {only_used_metrics} + + Returns + ------- + {basicreturns} + + """ + + function_name_idx = f"Step {selector.current_node_idx}: calc_varex_thresh" + thresh_label = thresh_label.lower() + if thresh_label is None or thresh_label is "": + varex_name = "varex_thresh" + perc_name = "perc" + else: + varex_name = f"varex_{thresh_label}_thresh" + perc_name = f"{thresh_label}_perc" + + outputs = { + "decision_node_idx": selector.current_node_idx, + "node_label": None, + varex_name: None, + "used_metrics": set(["variance explained"]), + } + if ( + isinstance(percentile_thresh, (int, float)) + and (percentile_thresh > 0) + and (percentile_thresh < 100) + ): + outputs["calc_cross_comp_metrics"] = [varex_name, perc_name] + outputs[perc_name] = percentile_thresh + else: + raise ValueError( + f"percentile_thresh must be a number between 0 & 100. It is: {percentile_thresh}" + ) + + if only_used_metrics: + return outputs["used_metrics"] + + if varex_name in selector.cross_component_metrics: + LGR.warning( + f"{varex_name} already calculated. Overwriting previous value in {function_name_idx}" + ) + + if perc_name in selector.cross_component_metrics: + LGR.warning( + f"{perc_name} already calculated. Overwriting previous value in {function_name_idx}" + ) + + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + outputs["node_label"] = f"Calc {varex_name}" + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + comps2use = selectcomps2use(selector, decide_comps) + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + + if not comps2use: + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + ) + else: + + outputs[varex_name] = scoreatpercentile( + selector.component_table.loc[comps2use, "variance explained"], percentile_thresh + ) + + selector.cross_component_metrics[varex_name] = outputs[varex_name] + + log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +calc_varex_thresh.__doc__ = calc_varex_thresh.__doc__.format(**decision_docs) + + +def calc_extend_factor( + selector, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, + extend_factor=None, +): + """ + Calculates the scaler used to set a threshold for d_table_score + + Parameters + ---------- + {selector} + {decide_comps} + {log_extra} + {custom_node_label} + {only_used_metrics} + extend_factor: :obj:`float` + If a number, then use rather than calculating anything. If None than calculate. default=None + + Returns + ------- + {basicreturns} + + """ + + outputs = { + "used_metrics": set(), + "decision_node_idx": selector.current_node_idx, + "node_label": None, + "extend_factor": None, + "calc_cross_comp_metrics": ["extend_factor"], + } + + if only_used_metrics: + return outputs["used_metrics"] + + function_name_idx = f"Step {selector.current_node_idx}: calc_extend_factor" + + if "extend_factor" in selector.cross_component_metrics: + LGR.warning( + f"extend_factor already calculated. Overwriting previous value in {function_name_idx}" + ) + + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + outputs["node_label"] = "Calc extend_factor" + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + outputs["extend_factor"] = get_extend_factor( + n_vols=selector.cross_component_metrics["n_vols"], extend_factor=extend_factor + ) + + selector.cross_component_metrics["extend_factor"] = outputs["extend_factor"] + + log_decision_tree_step(function_name_idx, -1, calc_outputs=outputs) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +calc_extend_factor.__doc__ = calc_extend_factor.__doc__.format(**decision_docs) + + +def calc_max_good_meanmetricrank( + selector, + decide_comps, + metric_suffix=None, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, +): + """ + Calculates the max_good_meanmetricrank to use in the kundu decision tree + This is the number of components seleted with decide_comps * the extend_factor + calculated in calc_extend_factor + + Parameters + ---------- + {selector} + {decide_comps} + metric_suffix: :obj:`str` + By default, this will output a value called "max_good_meanmetricrank" + If this variable is not None or "" then it will output: "max_good_meanmetricrank_[metric_suffix] + {log_extra} + {custom_node_label} + {only_used_metrics} + + Returns + ------- + {basicreturns} + + Note + ---- + "meanmetricrank" is the same as "d_table_score" and is used to set a threshold for + the "d_table" values in the component table. This metric ranks + the components based on 5 metrics and then outputs the mean rank across the 5 metrics. + Thus "meanmetricrank" is a slightly better description but d_table was used in earlier + versions of this code. It might be worth consistently using the same term, but this + note will hopefully suffice for now. + + """ + + function_name_idx = f"Step {selector.current_node_idx}: calc_max_good_meanmetricrank" + + if ( + (metric_suffix is not None) + and (metric_suffix is not "") + and isinstance(metric_suffix, str) + ): + metric_name = f"max_good_meanmetricrank_{metric_suffix}" + else: + metric_name = "max_good_meanmetricrank" + + outputs = { + "decision_node_idx": selector.current_node_idx, + "node_label": None, + metric_name: None, + "used_metrics": set(), + "calc_cross_comp_metrics": [metric_name], + } + + if only_used_metrics: + return outputs["used_metrics"] + + if metric_name in selector.cross_component_metrics: + LGR.warning( + f"max_good_meanmetricrank already calculated. Overwriting previous value in {function_name_idx}" + ) + + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + outputs["node_label"] = f"Calc {metric_name}" + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + comps2use = selectcomps2use(selector, decide_comps) + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + + if not comps2use: + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + ) + else: + + num_prov_accept = len(comps2use) + if "extend_factor" in selector.cross_component_metrics: + extend_factor = selector.cross_component_metrics["extend_factor"] + outputs[metric_name] = extend_factor * num_prov_accept + else: + raise ValueError( + f"extend_factor needs to be in cross_component_metrics for {function_name_idx}" + ) + + selector.cross_component_metrics[metric_name] = outputs[metric_name] + + log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +calc_max_good_meanmetricrank.__doc__ = calc_max_good_meanmetricrank.__doc__.format(**decision_docs) + + +def calc_varex_kappa_ratio( + selector, + decide_comps, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, +): + """ + Calculates the variance explained / kappa ratio for the componentse in decide_comps + and add those values to a new column in the component_table titled "varex kappa ratio". + Also calculated kappa_rate which is a cross_component_metric + + Parameters + ---------- + {selector} + {decide_comps} + {log_extra} + {custom_node_label} + {only_used_metrics} + + Returns + ------- + {basicreturns} + + Note + ---- + These measures are used in the original kundu decision tree. + kappa_rate = (max-min kappa values of selected components)/(max-min variance explained) + varex_k + varex kappa ratio = kappa_rate * "variance explained"/"kappa" for each component + Components with larger variance and smaller kappa are more likely to be rejected + This metric sometimes causes issues with high magnitude BOLD responses + such as the V1 response to a block-design flashing checkerboard + """ + + function_name_idx = f"Step {selector.current_node_idx}: calc_varex_kappa_ratio" + + outputs = { + "decision_node_idx": selector.current_node_idx, + "node_label": None, + "kappa_rate": None, + "used_metrics": {"kappa", "variance explained"}, + "calc_cross_comp_metrics": ["kappa_rate"], + "added_component_table_metrics": ["varex kappa ratio"], + } + + if only_used_metrics: + return outputs["used_metrics"] + + if "kappa_rate" in selector.cross_component_metrics: + LGR.warning( + f"kappa_rate already calculated. Overwriting previous value in {function_name_idx}" + ) + + if "varex kappa ratio" in selector.component_table: + raise ValueError( + f"'varex kappa ratio' is already a column in the component_table. Recalculating in {function_name_idx} can cause problems since these are only calculated on a subset of components" + ) + + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + outputs["node_label"] = "Calc varex kappa ratio" + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + comps2use = selectcomps2use(selector, decide_comps) + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + + if not comps2use: + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + ) + else: + kappa_rate = ( + np.nanmax(selector.component_table.loc[comps2use, "kappa"]) + - np.nanmin(selector.component_table.loc[comps2use, "kappa"]) + ) / ( + np.nanmax(selector.component_table.loc[comps2use, "variance explained"]) + - np.nanmin(selector.component_table.loc[comps2use, "variance explained"]) + ) + outputs["kappa_rate"] = kappa_rate + LGR.info(f"Kappa rate found to be {kappa_rate} from components " f"{comps2use}") + selector.component_table["varex kappa ratio"] = ( + kappa_rate + * selector.component_table.loc[comps2use, "variance explained"] + / selector.component_table.loc[comps2use, "kappa"] + ) + # Unclear if necessary, but this may clean up a weird issue on passing references in a data frame + # See longer comment in selection_utils.comptable_classification_changer + selector.component_table = selector.component_table.copy() + + selector.cross_component_metrics["kappa_rate"] = outputs["kappa_rate"] + + log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +calc_varex_kappa_ratio.__doc__ = calc_varex_kappa_ratio.__doc__.format(**decision_docs) + + +def calc_revised_meanmetricrank_guesses( + selector, + decide_comps, + restrict_factor=2, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, +): + """ + Calculates a new d_table_score (meanmetricrank) on a subset of components defiend in decide_comps + Also saves a bunch of cross_component_metrics that are used for various thresholds. These + are: + num_acc_guess: A guess of the final number of accepted components + restrict_factor: An inputted scaling value + conservative_guess: A conservative guess of the final number of accepted components + (num_acc_guess/restrict_factor) + + Parameters + ---------- + {selector} + {decide_comps} + restrict_factor: :obj:`int` or :obj:`float` + A scaling factor to scale between num_acc_guess and conservative_guess. default=2 + {log_extra} + {custom_node_label} + {only_used_metrics} + + Returns + ------- + {basicreturns} + + Note + ---- + These measures are used in the original kundu decision tree. + Since the d_table_rank is a mean rank across 5 metrics, those ranks + will change when they're calculated on a subset of components. It's + unclear how much the relative magnitudes will change and when the + recalculation will affect results, but this was in the original + kundu tree and will be replicated here to allow for comparisions + """ + + function_name_idx = f"Step {selector.current_node_idx}: calc_revised_meanmetricrank_guesses" + + outputs = { + "decision_node_idx": selector.current_node_idx, + "node_label": None, + "num_acc_guess": None, + "conservative_guess": None, + "restrict_factor": None, + "used_metrics": { + "kappa", + "dice_FT2", + "signal-noise_t", + "countnoise", + "countsigFT2", + "rho", + }, + "used_cross_component_metrics": {"kappa_elbow_kundu", "rho_elbow_kundu"}, + "calc_cross_comp_metrics": ["num_acc_guess", "conservative_guess", "restrict_factor"], + "added_component_table_metrics": [f"d_table_score_node{selector.current_node_idx}"], + } + + if only_used_metrics: + return outputs["used_metrics"] + + if "num_acc_guess" in selector.cross_component_metrics: + LGR.warning( + f"num_acc_guess already calculated. Overwriting previous value in {function_name_idx}" + ) + + if "conservative_guess" in selector.cross_component_metrics: + LGR.warning( + f"conservative_guess already calculated. Overwriting previous value in {function_name_idx}" + ) + + if "restrict_factor" in selector.cross_component_metrics: + LGR.warning( + f"restrict_factor already calculated. Overwriting previous value in {function_name_idx}" + ) + if not isinstance(restrict_factor, (int, float)): + raise ValueError(f"restrict_factor needs to be a number. It is: {restrict_factor}") + + if f"d_table_score_node{selector.current_node_idx}" in selector.component_table: + raise ValueError( + f"d_table_score_node{selector.current_node_idx} is already a column in the component_table. Recalculating in {function_name_idx} can cause problems since these are only calculated on a subset of components" + ) + + for xcompmetric in outputs["used_cross_component_metrics"]: + if xcompmetric not in selector.cross_component_metrics: + raise ValueError( + f"{xcompmetric} not in cross_component_metrics. It needs to be calculated before {function_name_idx}" + ) + + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + outputs["node_label"] = "Calc revised d_table_score & num accepted component guesses" + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + comps2use = selectcomps2use(selector, decide_comps) + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + + if not comps2use: + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + ) + else: + outputs["restrict_factor"] = restrict_factor + outputs["num_acc_guess"] = int( + np.mean( + [ + np.sum( + ( + selector.component_table.loc[comps2use, "kappa"] + > selector.cross_component_metrics["kappa_elbow_kundu"] + ) + & ( + selector.component_table.loc[comps2use, "rho"] + < selector.cross_component_metrics["rho_elbow_kundu"] + ) + ), + np.sum( + selector.component_table.loc[comps2use, "kappa"] + > selector.cross_component_metrics["kappa_elbow_kundu"] + ), + ] + ) + ) + outputs["conservative_guess"] = outputs["num_acc_guess"] / outputs["restrict_factor"] + + tmp_kappa = selector.component_table.loc[comps2use, "kappa"].to_numpy() + tmp_dice_FT2 = selector.component_table.loc[comps2use, "dice_FT2"].to_numpy() + tmp_signal_m_noise_t = selector.component_table.loc[comps2use, "signal-noise_t"].to_numpy() + tmp_countnoise = selector.component_table.loc[comps2use, "countnoise"].to_numpy() + tmp_countsigFT2 = selector.component_table.loc[comps2use, "countsigFT2"].to_numpy() + tmp_d_table_score = generate_decision_table_score( + tmp_kappa, tmp_dice_FT2, tmp_signal_m_noise_t, tmp_countnoise, tmp_countsigFT2 + ) + selector.component_table[f"d_table_score_node{selector.current_node_idx}"] = np.NaN + selector.component_table.loc[ + comps2use, f"d_table_score_node{selector.current_node_idx}" + ] = tmp_d_table_score + # Unclear if necessary, but this may clean up a weird issue on passing references in a data frame + # See longer comment in selection_utils.comptable_classification_changer + selector.component_table = selector.component_table.copy() + + selector.cross_component_metrics["conservative_guess"] = outputs["conservative_guess"] + selector.cross_component_metrics["num_acc_guess"] = outputs["num_acc_guess"] + selector.cross_component_metrics["restrict_factor"] = outputs["restrict_factor"] + + log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +calc_revised_meanmetricrank_guesses.__doc__ = calc_revised_meanmetricrank_guesses.__doc__.format( + **decision_docs +) diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py new file mode 100644 index 000000000..1fd0850fe --- /dev/null +++ b/tedana/selection/selection_utils.py @@ -0,0 +1,755 @@ +""" +Utility functions for tedana.selection +""" + +import logging +import re +import numpy as np +from tedana.stats import getfbounds +from tedana.metrics.dependence import generate_decision_table_score + +LGR = logging.getLogger("GENERAL") +RepLGR = logging.getLogger("REPORT") +RefLGR = logging.getLogger("REFERENCES") + +############################################################## +# Functions that are used for interacting with component_table +############################################################## + + +def selectcomps2use(selector, decide_comps): + """ + Give a list of component numbers that fit the classification types in + decide_comps. + + Parameters + ---------- + selector: :obj:`tedana.selection.ComponentSelector` + Only uses the component_table in this object + decide_comps: :obj:`str` or :obj:`list[str]` or :obj:`list[int]` + This is string or a list of strings describing what classifications + of components to operate on, using default or intermediate_classification + labels. For example: decide_comps='unclassified' means to operate only on + unclassified components. The label 'all' will operate on all components + regardess of classification. This can also be used to pass through a list + of component indices to comps2use + + Returns + ------- + comps2use: :obj:`list[int]` + A list of component indices that should be used by a function + """ + + if "classification" not in selector.component_table: + raise ValueError( + "selector.component_table needs a 'classification' column to run selectcomp2suse" + ) + + if (type(decide_comps) == str) or (type(decide_comps) == int): + decide_comps = [decide_comps] + if (type(decide_comps) == list) and (decide_comps[0] == "all"): + # All components with any string in the classification field + # are set to True + comps2use = list(range(selector.component_table.shape[0])) + + elif (type(decide_comps) == list) and all(isinstance(elem, str) for elem in decide_comps): + comps2use = [] + for didx in range(len(decide_comps)): + newcomps2use = selector.component_table.index[ + selector.component_table["classification"] == decide_comps[didx] + ].tolist() + comps2use = list(set(comps2use + newcomps2use)) + elif (type(decide_comps) == list) and all(type(elem) == int for elem in decide_comps): + # decide_comps is already a string of indices + if len(selector.component_table) <= max(decide_comps): + raise ValueError( + f"decide_comps for selectcomps2use is selecting for a component with index {max(decide_comps)} (0 indexing) which is greater than the number of components: {len(selector.component_table)}" + ) + elif min(decide_comps) < 0: + raise ValueError( + f"decide_comps for selectcomps2use is selecting for a component with index {min(decide_comps)}, which is less than 0" + ) + else: + comps2use = decide_comps + else: + raise ValueError( + f"decide_comps in selectcomps2use needs to be a list or a single element of strings or integers. It is {decide_comps}" + ) + + # If no components are selected, then return None. + # The function that called this can check for None and exit before + # attempting any computations on no data + # if not comps2use: + # comps2use = None + + return comps2use + + +def change_comptable_classifications( + selector, + ifTrue, + ifFalse, + decision_boolean, + tag_ifTrue=None, + tag_ifFalse=None, + dont_warn_reclassify=False, +): + """ + Given information on whether a decision critereon is true or false for each component + change or don't change the component classification + + Parameters + ---------- + selector: :obj:`tedana.selection.ComponentSelector` + The attributes used are component_table, component_status_table, and + current_node_idx + ifTrue, ifFalse: :obj:`str` + If the condition in this step is true or false, give the component + the label in this string. Options are 'accepted', 'rejected', + 'nochange', or intermediate_classification labels predefined in the + decision tree. If 'nochange' then don't change the current component + classification + decision_boolean: :obj:`pd.Series(bool)` + A dataframe column of equal length to component_table where each value + is True or False. + tag_ifTrue, tag_ifFalse: :obj:`str` + A string containing a label in classification_tags that will be added to + the classification_tags column in component_table if a component is + classified as true or false. default=None + dont_warn_reclassify: :obj:`bool` + If this function changes a component classification from accepted or + rejected to something else, it gives a warning. If this is True, that + warning is suppressed. default=False + + Returns + ------- + selector: :obj:`tedana.selection.ComponentSelector` + component_table["classifications"] will reflect any new + classifications. + component_status_table will have a new column titled + "Node current_node_idx" that is a copy of the updated classifications + column. + component_table["classification_tags"] will be updated to include any + new tags. Each tag should appear only once in the string and tags will + be separated by commas. + numTrue, numFalse: :obj:`int` + The number of True and False components in decision_boolean + + Note + ---- + If a classification is changed away from accepted or rejected and + dont_warn_reclassify is False, then a warning is logged + """ + selector = comptable_classification_changer( + selector, + True, + ifTrue, + decision_boolean, + tag_ifTrue, + dont_warn_reclassify=dont_warn_reclassify, + ) + selector = comptable_classification_changer( + selector, + False, + ifFalse, + decision_boolean, + tag_ifFalse, + dont_warn_reclassify=dont_warn_reclassify, + ) + + selector.component_status_table[ + f"Node {selector.current_node_idx}" + ] = selector.component_table["classification"] + + numTrue = decision_boolean.sum() + numFalse = np.logical_not(decision_boolean).sum() + return selector, numTrue, numFalse + + +def comptable_classification_changer( + selector, + boolstate, + classify_if, + decision_boolean, + tag_if=None, + dont_warn_reclassify=False, +): + """ + Implement the component classification changes specified in + change_comptable_classifications. + + Parameters + ---------- + selector: :obj:`tedana.selection.ComponentSelector` + The attributes used are component_table, component_status_table, and + current_node_idx + boolstate : :obj:`bool` + Change classifications only for True or False components in + decision_boolean based on this variable + classify_if: :obj:`str` + This should be if_True or if_False to match boolstate. + If the condition in this step is true or false, give the component + the label in this string. Options are 'accepted', 'rejected', + 'nochange', or intermediate_classification labels predefined in the + decision tree. If 'nochange' then don't change the current component + classification + decision_boolean: :obj:`pd.Series(bool)` + A dataframe column of equal length to component_table where each value + is True or False. + tag_if: :obj:`str` + This should be tag_ifTrue or tag_ifFalse to match boolstate + A string containing a label in classification_tags that will be added to + the classification_tags column in component_table if a component is + classified as true or false. default=None + dont_warn_reclassify: :obj:`bool` + If this function changes a component classification from accepted or + rejected to something else, it gives a warning. If this is True, that + warning is suppressed. default=False + Returns + ------- + selector: :obj:`tedana.selection.ComponentSelector` + Operates on the True OR False components depending on boolstate + component_table["classifications"] will reflect any new + classifications. + component_status_table will have a new column titled + "Node current_node_idx" that is a copy of the updated classifications + column. + component_table["classification_tags"] will be updated to include any + new tags. Each tag should appear only once in the string and tags will + be separated by commas. + If a classification is changed away from accepted or rejected and + dont_warn_reclassify is False, then a warning is logged + """ + if classify_if != "nochange": + changeidx = decision_boolean.index[np.asarray(decision_boolean) == boolstate] + if not changeidx.empty: + current_classifications = set( + selector.component_table.loc[changeidx, "classification"].tolist() + ) + if current_classifications.intersection({"accepted", "rejected"}): + if not dont_warn_reclassify: + # don't make a warning if classify_if matches the current classification + # That is reject->reject shouldn't throw a warning + if ( + ("accepted" in current_classifications) and (classify_if != "accepted") + ) or (("rejected" in current_classifications) and (classify_if != "rejected")): + LGR.warning( + f"Step {selector.current_node_idx}: Some classifications are" + " changing away from accepted or rejected. Once a component is " + "accepted or rejected, it shouldn't be reclassified" + ) + selector.component_table.loc[changeidx, "classification"] = classify_if + # NOTE: CAUTION: extremely bizarre pandas behavior violates guarantee + # that df['COLUMN'] matches the df as a a whole in this case. + # We cannot replicate this consistently, but it seems to happen in some + # datasets where decide_comps does not select all components. We strongly + # suspect it has something to do with passing via reference a pandas + # data series. + # We do not understand why, but copying the table and thus removing references + # to past memory locations seems to reliably solve this issue. + # TODO: understand why this happens and avoid the problem without this hack. + # Comment line below to re-introduce original bug. For the kundu decision + # tree it happens on node 6 which is the first time decide_comps is for + # a subset of components + selector.component_table = selector.component_table.copy() + + if tag_if is not None: # only run if a tag is provided + for idx in changeidx: + tmpstr = selector.component_table.loc[idx, "classification_tags"] + if tmpstr != "": + tmpset = set(tmpstr.split(",")) + tmpset.update([tag_if]) + else: + tmpset = set([tag_if]) + selector.component_table.loc[idx, "classification_tags"] = ",".join( + str(s) for s in tmpset + ) + else: + LGR.info( + f"Step {selector.current_node_idx}: No components fit criterion {boolstate} to change classification" + ) + return selector + + +def clean_dataframe(component_table): + """ + Reorder columns in component table so that "classification" + and "classification_tags" are last. + + Parameters + ---------- + component_table : (C x M) :obj:`pandas.DataFrame` + Component metric table. One row for each component, with a column for + each metric + + Returns + ------- + component_table : (C x M) :obj:`pandas.DataFrame` + Same data as input, but the final two columns are "classification" + and "classification_tags" + """ + cols_at_end = ["classification", "classification_tags"] + component_table = component_table[ + [c for c in component_table if c not in cols_at_end] + + [c for c in cols_at_end if c in component_table] + ] + + return component_table + + +LGR = logging.getLogger("GENERAL") +RepLGR = logging.getLogger("REPORT") +RefLGR = logging.getLogger("REFERENCES") + +################################################# +# Functions to validate inputs or log information +################################################# + + +def confirm_metrics_exist(component_table, necessary_metrics, function_name=None): + """ + Confirm that all metrics declared in necessary_metrics are + already included in comptable. + + Parameters + ---------- + component_table : (C x M) :obj:`pandas.DataFrame` + Component metric table. One row for each component, with a column for + each metric. The index should be the component number. + necessary_metrics : :obj:`set` a set of strings of metrics + function_name : :obj:`str` + Text identifying the function name that called this function + + Returns + ------- + metrics_exist : :obj:`bool` + True if all metrics in necessary_metrics are in component_table + + If metrics_exist is False then raise an error and end the program + + Notes + ----- + This doesn't check if there are data in each metric's column, just that + the columns exist. Also, this requires identical strings for the names + of the metrics in necessary_metrics and the column labels in component_table + """ + + missing_metrics = necessary_metrics - set(component_table.columns) + metrics_exist = len(missing_metrics) > 0 + if metrics_exist is True: + if function_name is None: + function_name = "unknown function" + + error_msg = ( + f"Necessary metrics for {function_name}: " + f"{necessary_metrics}. " + f"Comptable metrics: {set(component_table.columns)}. " + f"MISSING METRICS: {missing_metrics}." + ) + raise ValueError(error_msg) + + return metrics_exist + + +def log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=None, + numTrue=None, + numFalse=None, + ifTrue=None, + ifFalse=None, + calc_outputs=None, +): + """ + Logging text to add for every decision tree calculation + + Parameters + ---------- + function_name_idx: :obj:`str` + The name of the function that should be logged. By convention, this + be "Step current_node_idx: function_name" + comps2use: :obj:`list[int]` or -1 + A list of component indices that should be used by a function. + Only used to report no components found if empty and report + the number of components found if not empty. + Note: calc_ functions that don't use component metrics do not + need to use the component_table and may not require selecting + components. For those functions, set comps2use==-1 to avoid + logging a warning that no components were found. Currently, + this is only used by calc_extend_factor + decide_comps: :obj:`str` or :obj:`list[str]` or :obj:`list[int]` + This is string or a list of strings describing what classifications + of components to operate on. Only used in this function to report + its contents if no components with these classifications were found + numTrue, numFalse: :obj:`int` + The number of components classified as True or False + ifTrue, ifFalse: :obj:`str` + If a component is true or false, the classification to assign that + component + calc_outputs: :obj:`dict` + A dictionary with output information from the function. If it contains a key + "calc_cross_comp_metrics" then the value for that key is a list of + cross component metrics (i.e. kappa or rho elbows) that were calculated + within the function. Each of those metrics will also be a key in calc_outputs + and those keys and values will be logged by this function + + Returns + ------- + Information is added to the LGR.info logger. This either logs that + nothing was changed, the number of components classified as true or + false and what they changed to, or the cross component metrics that were + calculated + """ + + if not (comps2use == -1) and not comps2use: + LGR.info( + f"{function_name_idx} not applied because no remaining components were " + f"classified as {decide_comps}" + ) + if ifTrue or ifFalse: + LGR.info( + f"{function_name_idx} applied to {len(comps2use)} components. " + f"{numTrue} True -> {ifTrue}. " + f"{numFalse} False -> {ifFalse}." + ) + if calc_outputs: + if "calc_cross_comp_metrics" in calc_outputs: + calc_summaries = [ + f"{metric_name}={calc_outputs[metric_name]}" + for metric_name in calc_outputs["calc_cross_comp_metrics"] + ] + LGR.info(f"{function_name_idx} calculated: {', '.join(calc_summaries)}") + else: + LGR.warning( + f"{function_name_idx} logged to write out cross_component_metrics, but none were calculated" + ) + + +def log_classification_counts(decision_node_idx, component_table): + """ + Log the total counts for each component classification in component_table + + Parameters + ---------- + decision_node_idx : :obj:`int` + The index number for the function in the decision tree that just + finished executing + component_table : (C x M) :obj:`pandas.DataFrame` + Component metric table. One row for each component, with a column for + each metric. Only the "classification" column is usd in this function + + Returns + ------- + The info logger will add a line like: + 'Step 4: Total component classifications: 10 accepted, 5 provisionalreject, 8 rejected' + """ + + (classification_labels, label_counts) = np.unique( + component_table["classification"].values, return_counts=True + ) + label_summaries = [ + f"{label_counts[i]} {label}" for i, label in enumerate(classification_labels) + ] + prelude = f"Step {decision_node_idx}: Total component classifications:" + out_str = f"{prelude} {', '.join(label_summaries)}" + LGR.info(out_str) + + +####################################################### +# Calculations that are used in decision tree functions +####################################################### +def getelbow_cons(arr, return_val=False): + """ + Elbow using mean/variance method - conservative + + Parameters + ---------- + arr : (C,) array_like + Metric (e.g., Kappa or Rho) values. + return_val : :obj:`bool`, optional + Return the value of the elbow instead of the index. Default: False + + Returns + ------- + :obj:`int` or :obj:`float` + Either the elbow index (if return_val is True) or the values at the + elbow index (if return_val is False) + """ + if arr.ndim != 1: + raise ValueError("Parameter arr should be 1d, not {0}d".format(arr.ndim)) + + if not arr.size: + raise ValueError( + "Empty array detected during elbow calculation. " + "This error happens when getelbow_cons is incorrectly called on no components. " + "If you see this message, please open an issue at " + "https://github.com/ME-ICA/tedana/issues with the full traceback and any data " + "necessary to reproduce this error, so that we create additional data checks to " + "prevent this from happening." + ) + + arr = np.sort(arr)[::-1] + nk = len(arr) + temp1 = [ + (arr[nk - 5 - ii - 1] > arr[nk - 5 - ii : nk].mean() + 2 * arr[nk - 5 - ii : nk].std()) + for ii in range(nk - 5) + ] + ds = np.array(temp1[::-1], dtype=np.int) + dsum = [] + c_ = 0 + for d_ in ds: + c_ = (c_ + d_) * d_ + dsum.append(c_) + e2 = np.argmax(np.array(dsum)) + elind = np.max([getelbow(arr), e2]) + + if return_val: + return arr[elind] + else: + return elind + + +def getelbow(arr, return_val=False): + """ + Elbow using linear projection method - moderate + + Parameters + ---------- + arr : (C,) array_like + Metric (e.g., Kappa or Rho) values. + return_val : :obj:`bool`, optional + Return the value of the elbow instead of the index. Default: False + + Returns + ------- + :obj:`int` or :obj:`float` + Either the elbow index (if return_val is True) or the values at the + elbow index (if return_val is False) + """ + if arr.ndim != 1: + raise ValueError("Parameter arr should be 1d, not {0}d".format(arr.ndim)) + + if not arr.size: + raise ValueError( + "Empty array detected during elbow calculation. " + "This error happens when getelbow is incorrectly called on no components. " + "If you see this message, please open an issue at " + "https://github.com/ME-ICA/tedana/issues with the full traceback and any data " + "necessary to reproduce this error, so that we create additional data checks to " + "prevent this from happening." + ) + + arr = np.sort(arr)[::-1] + n_components = arr.shape[0] + coords = np.array([np.arange(n_components), arr]) + p = coords - coords[:, 0].reshape(2, 1) + b = p[:, -1] + b_hat = np.reshape(b / np.sqrt((b**2).sum()), (2, 1)) + proj_p_b = p - np.dot(b_hat.T, p) * np.tile(b_hat, (1, n_components)) + d = np.sqrt((proj_p_b**2).sum(axis=0)) + k_min_ind = d.argmax() + + if return_val: + return arr[k_min_ind] + else: + return k_min_ind + + +def kappa_elbow_kundu(comptable, n_echos): + """ + Calculate an elbow for kappa using the approach originally in + Prantik Kundu's MEICA v2.7 code + + Parameters + ---------- + comptable : (C x M) :obj:`pandas.DataFrame` + Component metric table. One row for each component, with a column for + each metric. The index should be the component number. + Only the 'kappa' column is used in this function + n_echos: :obj:`int` + The number of echos in the multi-echo data + + Returns + ------- + kappa_elbow: :obj:`float` + The 'elbow' value for kappa values, above which components are considered + more likely to contain T2* weighted signals + """ + # low kappa threshold + f05, _, f01 = getfbounds(n_echos) + # get kappa values for components below a significance threshold + kappas_nonsig = comptable.loc[comptable["kappa"] < f01, "kappa"] + + # Would an elbow from all Kappa values *ever* be lower than one from + # a subset of lower values? + # Note: Only use the subset of values if it includes at least 5 data point + # That is enough to calculate an elbow of a curve + # This is an arbitrary threshold not from the original meica as is + # worth reconsidering at some point + if kappas_nonsig.size > 5: + kappa_elbow = np.min( + ( + getelbow(kappas_nonsig, return_val=True), + getelbow(comptable["kappa"], return_val=True), + ) + ) + LGR.info(("Calculating kappa elbow based on min of all and nonsig components.")) + else: + kappa_elbow = getelbow(comptable["kappa"], return_val=True) + LGR.info(("Calculating kappa elbow based on all components.")) + + return kappa_elbow + + +def get_extend_factor(n_vols=None, extend_factor=None): + """ + extend_factor is a scaler used to set a threshold for the d_table_score + It is either defined by the number of volumes in the time series or directly + defined by the user. If it is defined by the user, that takes precedence over + using the number of volumes in a calculation + + Parameters + ---------- + n_vols: :obj:`int` + The number of volumes in an fMRI time series. default=None + In the MEICA code, extend_factor was hard-coded to 2 for data with more + than 100 volumes and 3 for data with less than 100 volumes. + Now is linearly ramped from 2-3 for vols between 90 & 110 + + extend_factor: :obj:`float` + The scaler used to set a threshold for d_table_score. default=None + + Returns + ------- + extend_factor: :obj:`float` + + Note + ---- + Either n_vols OR extend_factor is a required input + """ + + if extend_factor: + if isinstance(extend_factor, int): + extend_factor = float(extend_factor) + LGR.info("extend_factor={}, as defined by user".format(extend_factor)) + elif n_vols: + if n_vols < 90: + extend_factor = 3.0 + elif n_vols < 110: + extend_factor = 2.0 + (n_vols - 90) / 20.0 + else: + extend_factor = 2.0 + LGR.info("extend_factor={}, based on number of fMRI volumes".format(extend_factor)) + else: + error_msg = "get_extend_factor need n_vols or extend_factor as an input" + LGR.error(error_msg) + raise ValueError(error_msg) + return extend_factor + + +# This will likely need to be revived to run the kundu decision tree, but it will be slightly differe +# So commenting out for now. +def get_new_meanmetricrank(component_table, comps2use, decision_node_idx, calc_new_rank=False): + """ + If a revised d_table_score was already calculated, use that. + If not, calculate a new d_table_score based on the components + identified in comps2use + + Parameters + ---------- + component_table + comps2use + decision_node_idx: :obj:`int` + The index for the current decision node + calc_new_rank: :obj:`bool` + calculate a new d_table_score even if revised scores with the same + labels were already calculated + + Return + ------ + meanmetricrank + comptable + """ + rank_label = f"d_table_score_node{decision_node_idx}" + if not calc_new_rank and (rank_label in component_table.columns): + # return existing + LGR.info( + f"{rank_label} already calculated so not recalculating in node {decision_node_idx}" + ) + return component_table[rank_label], component_table + + # get the array of ranks + ranks = generate_decision_table_score( + component_table.loc[comps2use, "kappa"], + component_table.loc[comps2use, "dice_FT2"], + component_table.loc[comps2use, "signal-noise_t"], + component_table.loc[comps2use, "countnoise"], + component_table.loc[comps2use, "countsigFT2"], + ) + # see if we need to make a new column + if rank_label not in component_table.columns: + component_table[rank_label] = np.zeros(component_table.shape[0]) * np.nan + + # fill in the column with the components of interest + for c, rank in zip(comps2use, ranks): + component_table.loc[c, rank_label] = rank + + return component_table[rank_label].copy(), component_table.copy() + + +# Not currently being used and hopefully will never again be used +# def prev_classified_comps(comptable, decision_node_idx, classification_label, prev_X_steps=0): +# """ +# Output a list of components with a specific label during the current or +# previous X steps of the decision tree. For example, if +# classification_label = ['provisionalaccept'] and prev_X_steps = 0 +# then this outputs the indices of components that are currenlty +# classsified as provisionalaccept. If prev_X_steps=2, then this will +# output components that are classified as provisionalaccept or were +# classified as such any time before the previous two decision tree steps + +# Parameters +# ---------- +# comptable +# n_echos: :obj:`int` +# The number of echos in the multi-echo data set +# decision_node_idx: :obj:`int` +# The index of the node in the decision tree that called this function +# classification_label: :obj:`list[str]` +# A list of strings containing classification labels to identify in components +# For example: ['provisionalaccept'] +# prev_X_steps: :obj:`int` +# If 0, then just count the number of provisionally accepted or rejected +# or unclassified components in the current node. If this is a positive +# integer, then also check if a component was a in one of those three +# categories in ignore_prev_X_steps previous nodes. default=0 + +# Returns +# ------- +# full_comps2use: :obj:`list[int]` +# A list of indices of components that have or add classification_lable +# """ + +# full_comps2use = selectcomps2use(comptable, classification_label) +# rationales = comptable["rationale"] + +# if prev_X_steps > 0: # if checking classifications in prevision nodes +# for compidx in range(len(comptable)): +# tmp_rationale = rationales.values[compidx] +# tmp_list = re.split(":|;| ", tmp_rationale) +# while "" in tmp_list: # remove blank strings after splitting rationale +# tmp_list.remove("") +# # Check the previous nodes +# # This is inefficient, but it should work +# for didx in range(max(0, decision_node_idx - prev_X_steps), decision_node_idx): +# if str(didx) in tmp_list: +# didx_loc = tmp_list.index(str(didx)) +# if didx_loc > 1: +# tmp_classifier = tmp_list[didx_loc - 1] +# if tmp_classifier in classification_label: +# full_comps2use.append(compidx) + +# full_comps2use = list(set(full_comps2use)) + +# return full_comps2use diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 854387f8f..d2f4a6d92 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -6,8 +6,10 @@ import numpy as np from scipy import stats +from tedana.stats import getfbounds +from tedana.selection.ComponentSelector import ComponentSelector from tedana.metrics import collect -from tedana.selection._utils import clean_dataframe, getelbow +from tedana.selection.selection_utils import clean_dataframe, getelbow from tedana.stats import getfbounds LGR = logging.getLogger("GENERAL") @@ -42,15 +44,16 @@ def manual_selection(comptable, acc=None, rej=None): "BOLD (TE-dependent), non-BOLD (TE-independent), or " "uncertain (low-variance)." ) + # NOTE: during a merge conflict this got split oddly in a diff + # Please pay attention to this part to make sure it makes sense if ( "classification" in comptable.columns and "original_classification" not in comptable.columns ): comptable["original_classification"] = comptable["classification"] - comptable["original_rationale"] = comptable["rationale"] + # comptable["original_rationale"] = comptable["rationale"] - comptable["classification"] = "accepted" - comptable["rationale"] = "" + # comptable["rationale"] = "" all_comps = comptable.index.values if acc is not None: @@ -72,9 +75,10 @@ def manual_selection(comptable, acc=None, rej=None): ign = np.setdiff1d(all_comps, np.union1d(acc, rej)) comptable.loc[acc, "classification"] = "accepted" comptable.loc[rej, "classification"] = "rejected" - comptable.loc[rej, "rationale"] += "I001;" + # TODO Need to fix classification_tags here to better interact with any previous tags + # comptable.loc[rej, "classification_tags"] += "Manual" comptable.loc[ign, "classification"] = "ignored" - comptable.loc[ign, "rationale"] += "I001;" + # comptable.loc[ign, "classification_tags"] += "Manual" # Move decision columns to end comptable = clean_dataframe(comptable) @@ -82,344 +86,34 @@ def manual_selection(comptable, acc=None, rej=None): return comptable, metric_metadata -def kundu_selection_v2(comptable, n_echos, n_vols): - """ - Classify components as "accepted", "rejected", or "ignored" based on - relevant metrics. - - The selection process uses previously calculated parameters listed in - comptable for each ICA component such as Kappa (a T2* weighting metric), - Rho (an S0 weighting metric), and variance explained. - See `Notes` for additional calculated metrics used to classify each - component into one of the listed groups. +def automatic_selection(comptable, n_echos, n_vols, tree="minimal"): + """Classify components based on component table and tree type. Parameters ---------- - comptable : (C x M) :obj:`pandas.DataFrame` - Component metric table. One row for each component, with a column for - each metric. The index should be the component number. - n_echos : :obj:`int` - Number of echos in original data - n_vols : :obj:`int` - Number of volumes in dataset + comptable: pd.DataFrame + The component table to classify + n_echos: int + The number of echoes in this dataset + tree: str + The type of tree to use for the ComponentSelector object Returns ------- - comptable : :obj:`pandas.DataFrame` - Updated component table with additional metrics and with - classification (accepted, rejected, or ignored) - metric_metadata : :obj:`dict` - Dictionary with metadata about calculated metrics. - Each entry corresponds to a column in ``comptable``. - - Notes - ----- - The selection algorithm used in this function was originated in ME-ICA - by Prantik Kundu, and his original implementation is available at: - https://github.com/ME-ICA/me-ica/blob/b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py - - This component selection process uses multiple, previously calculated - metrics that include kappa, rho, variance explained, noise and spatial - frequency metrics, and measures of spatial overlap across metrics. - - Prantik began to update these selection criteria to use SVMs to distinguish - components, a hypercommented version of this attempt is available at: - https://gist.github.com/emdupre/ca92d52d345d08ee85e104093b81482e - - References - ---------- - * Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., - Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. - (2013). Integrated strategy for improving functional - connectivity mapping using multiecho fMRI. Proceedings - of the National Academy of Sciences, 110(40), - 16187-16192. - """ - LGR.info("Performing ICA component selection with Kundu decision tree v2.5") - RepLGR.info( - "Next, component selection was performed to identify " - "BOLD (TE-dependent), non-BOLD (TE-independent), and " - "uncertain (low-variance) components using the Kundu " - "decision tree (v2.5; Kundu et al., 2013)." - ) - RefLGR.info( - "Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., " - "Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. " - "(2013). Integrated strategy for improving functional " - "connectivity mapping using multiecho fMRI. Proceedings " - "of the National Academy of Sciences, 110(40), " - "16187-16192." - ) - comptable["classification"] = "accepted" - comptable["rationale"] = "" - - # Set knobs - LOW_PERC = 25 - HIGH_PERC = 90 - if n_vols < 100: - EXTEND_FACTOR = 3 - else: - EXTEND_FACTOR = 2 - RESTRICT_FACTOR = 2 + A dataframe of the component table, after classification and reorder + The metadata associated with the component table - # Lists of components - all_comps = np.arange(comptable.shape[0]) - # unclf is a full list that is whittled down over criteria - # since the default classification is "accepted", at the end of the tree - # the remaining elements in unclf are classified as accepted - unclf = all_comps.copy() - - """ - Step 1: Reject anything that's obviously an artifact - a. Estimate a null variance + See Also + -------- + ComponentSelector, the class used to represent the classification process """ - # Rho is higher than Kappa - temp_rej0a = all_comps[(comptable["rho"] > comptable["kappa"])] - comptable.loc[temp_rej0a, "classification"] = "rejected" - comptable.loc[temp_rej0a, "rationale"] += "I002;" - - # Number of significant voxels for S0 model is higher than number for T2 - # model *and* number for T2 model is greater than zero. - temp_rej0b = all_comps[ - ((comptable["countsigFS0"] > comptable["countsigFT2"]) & (comptable["countsigFT2"] > 0)) - ] - comptable.loc[temp_rej0b, "classification"] = "rejected" - comptable.loc[temp_rej0b, "rationale"] += "I003;" - rej = np.union1d(temp_rej0a, temp_rej0b) - - # Dice score for S0 maps is higher than Dice score for T2 maps and variance - # explained is higher than the median across components. - temp_rej1 = all_comps[ - (comptable["dice_FS0"] > comptable["dice_FT2"]) - & (comptable["variance explained"] > np.median(comptable["variance explained"])) - ] - comptable.loc[temp_rej1, "classification"] = "rejected" - comptable.loc[temp_rej1, "rationale"] += "I004;" - rej = np.union1d(temp_rej1, rej) - - # T-value is less than zero (noise has higher F-statistics than signal in - # map) and variance explained is higher than the median across components. - temp_rej2 = unclf[ - (comptable.loc[unclf, "signal-noise_t"] < 0) - & (comptable.loc[unclf, "variance explained"] > np.median(comptable["variance explained"])) - ] - comptable.loc[temp_rej2, "classification"] = "rejected" - comptable.loc[temp_rej2, "rationale"] += "I005;" - rej = np.union1d(temp_rej2, rej) - unclf = np.setdiff1d(unclf, rej) - - # Quit early if no potentially accepted components remain - if len(unclf) == 0: - LGR.warning("No BOLD-like components detected. Ignoring all remaining components.") - ign = sorted(np.setdiff1d(all_comps, rej)) - comptable.loc[ign, "classification"] = "ignored" - comptable.loc[ign, "rationale"] += "I006;" - - # Move decision columns to end - comptable = clean_dataframe(comptable) - metric_metadata = collect.get_metadata(comptable) - return comptable, metric_metadata - - """ - Step 2: Make a guess for what the good components are, in order to - estimate good component properties - a. Not outlier variance - b. Kappa>kappa_elbow - c. Rho getelbow(comptable["kappa"], return_val=True), - "variance explained", - ] - ) - - # Sort component table by variance explained and find outlier components by - # change in variance explained from one component to the next. - # Remove variance-explained outliers from list of components to consider - # for acceptance. These components will have another chance to be accepted - # later on. - # NOTE: We're not sure why this is done this way, nor why it's specifically - # done three times. - ncls = unclf.copy() - for i_loop in range(3): - temp_comptable = comptable.loc[ncls].sort_values( - by=["variance explained"], ascending=False - ) - diff_vals = temp_comptable["variance explained"].diff(-1) - diff_vals = diff_vals.fillna(0) - ncls = temp_comptable.loc[diff_vals < varex_upper_p].index.values - - # Compute elbows from other elbows - f05, _, f01 = getfbounds(n_echos) - kappas_nonsig = comptable.loc[comptable["kappa"] < f01, "kappa"] - if not kappas_nonsig.size: - LGR.warning( - "No nonsignificant kappa values detected. " - "Only using elbow calculated from all kappa values." - ) - kappas_nonsig_elbow = np.nan - else: - kappas_nonsig_elbow = getelbow(kappas_nonsig, return_val=True) - - kappas_all_elbow = getelbow(comptable["kappa"], return_val=True) - - # NOTE: Would an elbow from all Kappa values *ever* be lower than one from - # a subset of lower (i.e., nonsignificant) values? - kappa_elbow = np.nanmin((kappas_all_elbow, kappas_nonsig_elbow)) - rhos_ncls_elbow = getelbow(comptable.loc[ncls, "rho"], return_val=True) - rhos_all_elbow = getelbow(comptable["rho"], return_val=True) - rho_elbow = np.mean((rhos_ncls_elbow, rhos_all_elbow, f05)) - - # Provisionally accept components based on Kappa and Rho elbows - acc_prov = ncls[ - (comptable.loc[ncls, "kappa"] >= kappa_elbow) & (comptable.loc[ncls, "rho"] < rho_elbow) - ] - - # Quit early if no potentially accepted components remain - if len(acc_prov) <= 1: - LGR.warning("Too few BOLD-like components detected. Ignoring all remaining.") - ign = sorted(np.setdiff1d(all_comps, rej)) - comptable.loc[ign, "classification"] = "ignored" - comptable.loc[ign, "rationale"] += "I006;" - - # Move decision columns to end - comptable = clean_dataframe(comptable) - metric_metadata = collect.get_metadata(comptable) - return comptable, metric_metadata - - # Calculate "rate" for kappa: kappa range divided by variance explained - # range, for potentially accepted components - # NOTE: What is the logic behind this? - kappa_rate = ( - np.max(comptable.loc[acc_prov, "kappa"]) - np.min(comptable.loc[acc_prov, "kappa"]) - ) / ( - np.max(comptable.loc[acc_prov, "variance explained"]) - - np.min(comptable.loc[acc_prov, "variance explained"]) - ) - comptable["kappa ratio"] = kappa_rate * comptable["variance explained"] / comptable["kappa"] - - # Calculate bounds for variance explained - varex_lower = stats.scoreatpercentile(comptable.loc[acc_prov, "variance explained"], LOW_PERC) - varex_upper = stats.scoreatpercentile(comptable.loc[acc_prov, "variance explained"], HIGH_PERC) - - """ - Step 3: Get rid of midk components; i.e., those with higher than - max decision score and high variance - """ - max_good_d_score = EXTEND_FACTOR * len(acc_prov) - midk = unclf[ - (comptable.loc[unclf, "d_table_score"] > max_good_d_score) - & (comptable.loc[unclf, "variance explained"] > EXTEND_FACTOR * varex_upper) - ] - comptable.loc[midk, "classification"] = "rejected" - comptable.loc[midk, "rationale"] += "I007;" - unclf = np.setdiff1d(unclf, midk) - acc_prov = np.setdiff1d(acc_prov, midk) - - """ - Step 4: Find components to ignore - """ - # collect high variance unclassified components - # and mix of high/low provisionally accepted - high_varex = np.union1d( - acc_prov, unclf[comptable.loc[unclf, "variance explained"] > varex_lower] - ) - # ignore low variance components - ign = np.setdiff1d(unclf, high_varex) - # but only if they have bad decision scores - ign = np.setdiff1d(ign, ign[comptable.loc[ign, "d_table_score"] < max_good_d_score]) - # and low kappa - ign = np.setdiff1d(ign, ign[comptable.loc[ign, "kappa"] > kappa_elbow]) - comptable.loc[ign, "classification"] = "ignored" - comptable.loc[ign, "rationale"] += "I008;" - unclf = np.setdiff1d(unclf, ign) - - """ - Step 5: Scrub the set if there are components that haven't been rejected or - ignored, but are still not listed in the provisionally accepted group. - """ - if len(unclf) > len(acc_prov): - comptable["d_table_score_scrub"] = np.nan - # Recompute the midk steps on the limited set to clean up the tail - d_table_rank = np.vstack( - [ - len(unclf) - stats.rankdata(comptable.loc[unclf, "kappa"]), - len(unclf) - stats.rankdata(comptable.loc[unclf, "dice_FT2"]), - len(unclf) - stats.rankdata(comptable.loc[unclf, "signal-noise_t"]), - stats.rankdata(comptable.loc[unclf, "countnoise"]), - len(unclf) - stats.rankdata(comptable.loc[unclf, "countsigFT2"]), - ] - ).T - comptable.loc[unclf, "d_table_score_scrub"] = d_table_rank.mean(1) - num_acc_guess = int( - np.mean( - [ - np.sum( - (comptable.loc[unclf, "kappa"] > kappa_elbow) - & (comptable.loc[unclf, "rho"] < rho_elbow) - ), - np.sum(comptable.loc[unclf, "kappa"] > kappa_elbow), - ] - ) - ) - - # Rejection candidate based on artifact type A: candartA - conservative_guess = num_acc_guess / RESTRICT_FACTOR - candartA = np.intersect1d( - unclf[comptable.loc[unclf, "d_table_score_scrub"] > conservative_guess], - unclf[comptable.loc[unclf, "kappa ratio"] > EXTEND_FACTOR * 2], - ) - candartA = candartA[ - comptable.loc[candartA, "variance explained"] > varex_upper * EXTEND_FACTOR - ] - comptable.loc[candartA, "classification"] = "rejected" - comptable.loc[candartA, "rationale"] += "I009;" - midk = np.union1d(midk, candartA) - unclf = np.setdiff1d(unclf, midk) - - # Rejection candidate based on artifact type B: candartB - conservative_guess2 = num_acc_guess * HIGH_PERC / 100.0 - candartB = unclf[comptable.loc[unclf, "d_table_score_scrub"] > conservative_guess2] - candartB = candartB[ - comptable.loc[candartB, "variance explained"] > varex_lower * EXTEND_FACTOR - ] - comptable.loc[candartB, "classification"] = "rejected" - comptable.loc[candartB, "rationale"] += "I010;" - midk = np.union1d(midk, candartB) - unclf = np.setdiff1d(unclf, midk) - - # Find components to ignore - # Ignore high variance explained, poor decision tree scored components - new_varex_lower = stats.scoreatpercentile( - comptable.loc[unclf[:num_acc_guess], "variance explained"], LOW_PERC - ) - candart = unclf[comptable.loc[unclf, "d_table_score_scrub"] > num_acc_guess] - ign_add0 = candart[comptable.loc[candart, "variance explained"] > new_varex_lower] - ign_add0 = np.setdiff1d(ign_add0, midk) - comptable.loc[ign_add0, "classification"] = "ignored" - comptable.loc[ign_add0, "rationale"] += "I011;" - ign = np.union1d(ign, ign_add0) - unclf = np.setdiff1d(unclf, ign) - - # Ignore low Kappa, high variance explained components - ign_add1 = np.intersect1d( - unclf[comptable.loc[unclf, "kappa"] <= kappa_elbow], - unclf[comptable.loc[unclf, "variance explained"] > new_varex_lower], - ) - ign_add1 = np.setdiff1d(ign_add1, midk) - comptable.loc[ign_add1, "classification"] = "ignored" - comptable.loc[ign_add1, "rationale"] += "I012;" - - # at this point, unclf is equivalent to accepted - - # Move decision columns to end - comptable = clean_dataframe(comptable) - metric_metadata = collect.get_metadata(comptable) - return comptable, metric_metadata + comptable["classification_tags"] = "" + xcomp = { + "n_echos": n_echos, + "n_vols": n_vols, + } + selector = ComponentSelector(tree, comptable, cross_component_metrics=xcomp) + selector.select() + selector.metadata = collect.get_metadata(selector.component_table) + + return selector diff --git a/tedana/selection/tedpca.py b/tedana/selection/tedpca.py index da24bce85..5b871e401 100644 --- a/tedana/selection/tedpca.py +++ b/tedana/selection/tedpca.py @@ -7,7 +7,7 @@ from tedana import utils from tedana.metrics import collect -from tedana.selection._utils import clean_dataframe, getelbow, getelbow_cons +from tedana.selection.selection_utils import clean_dataframe, getelbow, getelbow_cons from tedana.stats import getfbounds LGR = logging.getLogger("GENERAL") diff --git a/tedana/tests/data/cornell_three_echo_outputs.txt b/tedana/tests/data/cornell_three_echo_outputs.txt index 4e45b1773..4af90bcaf 100644 --- a/tedana/tests/data/cornell_three_echo_outputs.txt +++ b/tedana/tests/data/cornell_three_echo_outputs.txt @@ -7,6 +7,10 @@ desc-ICA_components.nii.gz desc-ICA_decomposition.json desc-tedana_metrics.json desc-tedana_metrics.tsv +desc-tedana_registry.json +desc-ICA_cross_component_metrics.json +desc-ICA_status_table.tsv +desc-ICA_decision_tree.json desc-ICA_mixing.tsv desc-ICA_stat-z_components.nii.gz desc-PCA_cross_component_metrics.json diff --git a/tedana/tests/data/fiu_four_echo_outputs.txt b/tedana/tests/data/fiu_four_echo_outputs.txt index 7e9ce1169..cb7f5e599 100644 --- a/tedana/tests/data/fiu_four_echo_outputs.txt +++ b/tedana/tests/data/fiu_four_echo_outputs.txt @@ -10,6 +10,10 @@ desc-ICA_components.nii.gz desc-ICA_decomposition.json desc-tedana_metrics.json desc-tedana_metrics.tsv +desc-tedana_registry.json +desc-ICA_cross_component_metrics.json +desc-ICA_status_table.tsv +desc-ICA_decision_tree.json desc-ICA_mixing.tsv desc-ICA_stat-z_components.nii.gz desc-PCAAveragingWeights_components.nii.gz diff --git a/tedana/tests/data/nih_five_echo_outputs_t2smap.txt b/tedana/tests/data/nih_five_echo_outputs_t2smap.txt index ce203aebd..ecf1753b1 100644 --- a/tedana/tests/data/nih_five_echo_outputs_t2smap.txt +++ b/tedana/tests/data/nih_five_echo_outputs_t2smap.txt @@ -1,6 +1,7 @@ dataset_description.json desc-limited_S0map.nii.gz desc-limited_T2starmap.nii.gz +desc-tedana_registry.json desc-optcom_bold.nii.gz S0map.nii.gz T2starmap.nii.gz diff --git a/tedana/tests/data/nih_five_echo_outputs_verbose.txt b/tedana/tests/data/nih_five_echo_outputs_verbose.txt index 907b4ec49..74259ae2b 100644 --- a/tedana/tests/data/nih_five_echo_outputs_verbose.txt +++ b/tedana/tests/data/nih_five_echo_outputs_verbose.txt @@ -9,6 +9,10 @@ desc-ICA_components.nii.gz desc-ICA_decomposition.json desc-tedana_metrics.json desc-tedana_metrics.tsv +desc-tedana_registry.json +desc-ICA_cross_component_metrics.json +desc-ICA_status_table.tsv +desc-ICA_decision_tree.json desc-ICA_mixing.tsv desc-ICA_stat-z_components.nii.gz desc-PCAAveragingWeights_components.nii.gz diff --git a/tedana/tests/data/sample_comptable.tsv b/tedana/tests/data/sample_comptable.tsv new file mode 100644 index 000000000..ad4551417 --- /dev/null +++ b/tedana/tests/data/sample_comptable.tsv @@ -0,0 +1,22 @@ +Component kappa rho variance explained normalized variance explained countsigFT2 countsigFS0 dice_FT2 dice_FS0 countnoise signal-noise_t signal-noise_p d_table_score optimal sign classification classification_tags +ICA_00 11.773633384130344 12.244047372613279 1.8708761636220743 0.0244263937653776 70 281 0.0 0.0 4653 0.0 0.0 19.4 1 rejected Unlikely BOLD +ICA_01 13.469100021727968 10.597359822668041 1.8141851634150163 0.024946211086853073 356 0 0.0 0.0 4427 7.160459030871695 4.424304742815937e-12 16.3 1 rejected None,Unlikely BOLD +ICA_02 22.044384493213173 14.081602067232835 5.0844942012809025 0.05182420614143496 3860 331 0.41309492505916384 0.0 4096 0.0 0.0 7.1 -1 accepted None,Likely BOLD +ICA_03 21.435565425383167 13.08776097105916 6.337651400602204 0.06267167186849244 3501 340 0.4229266174554928 0.0 4154 0.0 0.0 8.7 -1 accepted None,Likely BOLD +ICA_04 21.571783725457784 13.180291764692623 5.410512852983703 0.05415357748457931 3111 379 0.3955332578087069 0.0 4351 0.0 0.0 13.5 1 accepted None,Likely BOLD +ICA_05 20.455815181233913 13.530653271482537 5.103253880526514 0.0484684678593299 2992 122 0.41797072185764766 0.0 4253 4.26379519408162 8.466083940618762e-05 11.6 1 rejected Unlikely BOLD +ICA_06 21.998300075307892 12.342036790070972 4.661405806338223 0.047164097532350104 3647 446 0.4382828993666432 0.0 4331 0.0 0.0 9.5 1 accepted None,Likely BOLD +ICA_07 21.57683009938704 14.067931569329534 5.052657635488493 0.0471509419227379 2995 368 0.38913870632998965 0.0 4288 0.0 0.0 12.7 -1 accepted None,Likely BOLD +ICA_08 20.662379427772397 13.605577967277663 4.899573691276539 0.0487272274448804 3292 486 0.40497951465490073 0.0 4169 2.4101037193307158 0.019728835459480055 10.4 1 accepted None,Likely BOLD +ICA_09 21.350101713170858 14.433444938178267 5.480380082264091 0.053822797428019244 2934 529 0.40547176429323045 0.0 4240 5.942792150490043 3.1181725670893785e-08 10.4 1 accepted None,Likely BOLD +ICA_10 21.333212633418892 12.977569833261267 4.88266845033625 0.048109815519354025 2757 244 0.48600223964165734 0.0 4271 3.193303025882193 0.0019400513552828209 11.0 1 accepted None,Likely BOLD +ICA_11 22.70767526961776 9.5558503504626 2.106637746364626 0.0290437126768305 3131 0 0.6537842190016103 0.0 3383 18.975490976691813 4.409180770584508e-76 2.2 -1 rejected None,Unlikely BOLD +ICA_12 21.42480737193139 12.523285689527825 4.700042017821479 0.04555941778867682 3216 445 0.46589486858573215 0.0 4349 8.925209841985136 6.428202512989954e-14 8.4 -1 accepted None,Likely BOLD +ICA_13 20.461714898910873 13.372399561878185 4.91288433926911 0.049139412646169 2988 379 0.37319449109842123 0.0 4270 0.0 0.0 14.7 1 accepted None,Likely BOLD +ICA_14 20.831824101283157 14.123062103306893 5.117731849017717 0.052114111704595754 2849 768 0.42624709458251386 0.0 4295 0.0 0.0 13.9 -1 accepted None,Likely BOLD +ICA_15 23.443590881814984 13.43898022294784 5.607029047746701 0.054302339698066616 3847 409 0.4945730351771937 0.0 4266 4.303738107001976 3.587737594462498e-05 4.2 -1 accepted None,Likely BOLD +ICA_16 21.510571197601205 13.279774339508181 4.847709079277335 0.04722499686178239 3042 227 0.4956953642384106 0.0 4275 11.819966876475757 1.8003157916056668e-22 6.6 -1 accepted None,Likely BOLD +ICA_17 21.38843440973818 13.768635078323872 5.040219756607229 0.04728714164872373 2519 50 0.4331983805668016 0.0 4339 5.108828337559395 3.1788748004451597e-06 12.0 1 accepted None,Likely BOLD +ICA_18 23.30080869317963 14.338966963066872 5.690965607643776 0.05575825515462903 4046 541 0.36185133239831696 0.0 4234 3.321412684022962 0.0014954034132828892 7.0 -1 accepted None,Likely BOLD +ICA_19 21.957993034031354 13.929222476750176 6.323103550347913 0.05978202785850936 3480 195 0.44127806563039723 0.0 4174 7.475122521508307 6.663523274208933e-10 5.0 -1 accepted None,Likely BOLD +ICA_20 22.28373739816742 13.346603659167044 5.0560176777701145 0.04832317590860793 2347 150 0.4419735927727589 0.0 4298 4.609368981883021 2.9109368062960523e-05 9.6 -1 accepted None,Likely BOLD diff --git a/tedana/tests/test_ComponentSelector.py b/tedana/tests/test_ComponentSelector.py new file mode 100644 index 000000000..4607bcb04 --- /dev/null +++ b/tedana/tests/test_ComponentSelector.py @@ -0,0 +1,250 @@ +"""Tests for the decision tree modularization""" +import pytest +import json, os, glob +import os.path as op + +import numpy as np +import pandas as pd + +from tedana.selection import ComponentSelector +from tedana import io + +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) + +# ---------------------------------------------------------------------- +# Functions Used For Tests +# ---------------------------------------------------------------------- + + +def sample_comptable(): + """Retrieves a sample component table""" + sample_fname = op.join(THIS_DIR, "data", "sample_comptable.tsv") + + return pd.read_csv(sample_fname, delimiter="\t") + + +def dicts_to_test(treechoice): + """ + Outputs decision tree dictionaries to use to test tree validation + + Parameters + ---------- + treechoice: :obj:`str` One of several labels to select which dict to output + Options are: + "valid": A tree that would trigger all warnings, but pass validation + "extra_req_param": A tree with an undefined required parameter for a decision node function + "extra_opt_param": A tree with an undefined optional parameter for a decision node function + "missing_req_param": A missing required param in a decision node function + "missing_function": An undefined decision node function + "missing_key": A dict missing one of the required keys (refs) + + Returns + ------- + tree: :ojb:`dict` A dict that can be input into ComponentSelector.validate_tree + """ + + # valid_dict is a simple valid dictionary to test + # It includes a few things that should trigger warnings, but not errors. + valid_dict = { + "tree_id": "valid_simple_tree", + "info": "This is a short valid tree", + "report": "", + "refs": "", + # Warning for an unused key + "unused_key": "There can be added keys that are valid, but aren't used", + "necessary_metrics": ["kappa", "rho"], + "intermediate_classifications": ["random1"], + "classification_tags": ["Random1"], + "nodes": [ + { + "functionname": "dec_left_op_right", + "parameters": { + "ifTrue": "rejected", + "ifFalse": "nochange", + "decide_comps": "all", + "op": ">", + "left": "rho", + "right": "kappa", + }, + "kwargs": { + "log_extra_info": "random1 if Kappa", + "left": "kappa", + "right": "rho", + }, + "kwargs": { + "log_extra_info": "random2 if Kappa>Rho", + "log_extra_report": "", + # Warning for an non-predefined classification assigned to a component + "tag_ifTrue": "random2notpredefined", + }, + }, + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "accepted", + # Warning for an non-predefined classification used to select components to operate on + "decide_comps": "random2notpredefined", + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "", + # Warning for a tag that wasn't predefined + "tag": "Random2_NotPredefined", + }, + }, + { + "functionname": "manual_classify", + "parameters": { + "new_classification": "rejected", + "decide_comps": "random1", + }, + "kwargs": { + "tag": "Random1", + }, + }, + ], + } + + tree = valid_dict + if treechoice == "valid": + return tree + elif treechoice == "extra_req_param": + tree["nodes"][0]["parameters"]["nonexistent_req_param"] = True + elif treechoice == "extra_opt_param": + tree["nodes"][0]["kwargs"]["nonexistent_opt_param"] = True + elif treechoice == "missing_req_param": + tree["nodes"][0]["parameters"].pop("op") + elif treechoice == "missing_function": + tree["nodes"][0]["functionname"] = "not_a_function" + elif treechoice == "missing_key": + tree.pop("refs") + else: + raise Exception(f"{treechoice} is an invalid option for treechoice") + + return tree + + +# ---------------------------------------------------------------------- +# ComponentSelector Tests +# ---------------------------------------------------------------------- + +# load_config +# ----------- +def test_load_config_fails(): + """Tests for load_config failure modes""" + + # We recast to ValueError in the file not found and directory cases + with pytest.raises(ValueError): + ComponentSelector.load_config("THIS FILE DOES NOT EXIST.txt") + + # Raises IsADirectoryError for a directory + with pytest.raises(ValueError): + ComponentSelector.load_config(".") + + # Note: we defer validation errors for validate_tree even though + # load_config may raise them + + +def test_load_config_succeeds(): + """Tests to make sure load_config succeeds""" + + # The minimal tree should have an id of "minimal_decision_tree_test1" + tree = ComponentSelector.load_config("minimal") + assert tree["tree_id"] == "minimal_decision_tree_test1" + + +def test_minimal(): + """Smoke test for constructor for ComponentSelector using minimal tree""" + xcomp = { + "n_echos": 3, + } + tree = ComponentSelector.ComponentSelector( + "minimal", + sample_comptable(), + cross_component_metrics=xcomp, + ) + tree.select() + + +# validate_tree +# ------------- + + +def test_validate_tree_succeeds(): + """ + Tests to make sure validate_tree suceeds for all default + decision trees in decision trees + Tested on all default trees in ./tedana/resources/decision_trees + Note: If there is a tree in the default trees directory that + is being developed and not yet valid, it's file name should + include 'invalid' as a prefix + """ + + default_tree_names = glob.glob( + os.path.join(THIS_DIR, "../resources/decision_trees/[!invalid]*.json") + ) + + for tree_name in default_tree_names: + f = open(tree_name) + tree = json.load(f) + assert ComponentSelector.validate_tree(tree) + + # Test a few extra possabilities just using the minimal.json tree + if "/minimal.json" in tree_name: + # Should remove/ignore the "reconstruct_from" key during validation + tree["reconstruct_from"] = "testinput" + # Need to test handling of the tag_ifFalse kwarg somewhere + tree["nodes"][1]["kwargs"]["tag_ifFalse"] = "testing tag" + assert ComponentSelector.validate_tree(tree) + + +def test_validate_tree_warnings(): + """ + Tests to make sure validate_tree triggers all warning conditions + but still succeeds + """ + + # A tree that raises all possible warnings in the validator should still be valid + assert ComponentSelector.validate_tree(dicts_to_test("valid")) + + +def test_validate_tree_fails(): + """ + Tests to make sure validate_tree fails for invalid trees + Tests ../resources/decision_trees/invalid*.json and + ./data/ComponentSelection/invalid*.json trees + """ + + # An empty dict should not be valid + with pytest.raises(ComponentSelector.TreeError): + ComponentSelector.validate_tree({}) + + # A tree that is missing a required key should not be valid + with pytest.raises(ComponentSelector.TreeError): + ComponentSelector.validate_tree(dicts_to_test("missing_key")) + + # Calling a selection node function that does not exist should not be valid + with pytest.raises(ComponentSelector.TreeError): + ComponentSelector.validate_tree(dicts_to_test("missing_function")) + + # Calling a function with an non-existent required parameter should not be valid + with pytest.raises(ComponentSelector.TreeError): + ComponentSelector.validate_tree(dicts_to_test("extra_req_param")) + + # Calling a function with an non-existent optional parameter should not be valid + with pytest.raises(ComponentSelector.TreeError): + ComponentSelector.validate_tree(dicts_to_test("extra_opt_param")) + + # Calling a function missing a required parameter should not be valid + with pytest.raises(ComponentSelector.TreeError): + ComponentSelector.validate_tree(dicts_to_test("missing_req_param")) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 4a050deaa..e632011b7 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -4,6 +4,7 @@ import glob import os +import os.path as op import re import shutil import tarfile @@ -17,6 +18,7 @@ from tedana.workflows import t2smap as t2smap_cli from tedana.workflows import tedana as tedana_cli +from tedana.workflows.tedana_reclassify import post_tedana def check_integration_outputs(fname, outpath): @@ -108,33 +110,6 @@ def test_integration_five_echo(skip_integration): df = pd.read_table(comptable) assert isinstance(df, pd.DataFrame) - # Test re-running, but use the CLI - acc_comps = df.loc[df["classification"] == "ignored"].index.values - acc_comps = [str(c) for c in acc_comps] - mixing = os.path.join(out_dir, "desc-ICA_mixing.tsv") - t2smap = os.path.join(out_dir, "T2starmap.nii.gz") - args = ( - ["-d"] - + datalist - + ["-e"] - + [str(te) for te in echo_times] - + [ - "--out-dir", - out_dir_manual, - "--debug", - "--verbose", - "--manacc", - *acc_comps, - "--ctab", - comptable, - "--mix", - mixing, - "--t2smap", - t2smap, - ] - ) - tedana_cli._main(args) - # compare the generated output files fn = resource_filename("tedana", "tests/data/nih_five_echo_outputs_verbose.txt") check_integration_outputs(fn, out_dir) @@ -149,8 +124,10 @@ def test_integration_four_echo(skip_integration): out_dir = "/tmp/data/four-echo/TED.four-echo" out_dir_manual = "/tmp/data/four-echo/TED.four-echo-manual" + """ if os.path.exists(out_dir): shutil.rmtree(out_dir) + """ if os.path.exists(out_dir_manual): shutil.rmtree(out_dir_manual) @@ -161,6 +138,7 @@ def test_integration_four_echo(skip_integration): prepend += "sub-PILOT_ses-01_task-localizerDetection_run-01_echo-" suffix = "_space-sbref_desc-preproc_bold+orig.HEAD" datalist = [prepend + str(i + 1) + suffix for i in range(4)] + """ tedana_cli.tedana_workflow( data=datalist, tes=[11.8, 28.04, 44.28, 60.52], @@ -171,27 +149,15 @@ def test_integration_four_echo(skip_integration): debug=True, verbose=True, ) + """ - # Test re-running with the component table - mixing_matrix = os.path.join(out_dir, "desc-ICA_mixing.tsv") - comptable = os.path.join(out_dir, "desc-tedana_metrics.tsv") - temporary_comptable = os.path.join(out_dir, "temporary_metrics.tsv") - comptable_df = pd.read_table(comptable) - comptable_df.loc[comptable_df["classification"] == "ignored", "classification"] = "accepted" - comptable_df.to_csv(temporary_comptable, sep="\t", index=False) - tedana_cli.tedana_workflow( - data=datalist, - tes=[11.8, 28.04, 44.28, 60.52], + post_tedana( + op.join(out_dir, "desc-tedana_registry.json"), + accept=[1, 2, 3], + reject=[4, 5, 6], out_dir=out_dir_manual, - tedpca="kundu-stabilize", - gscontrol=["gsr", "mir"], - png_cmap="bone", - mixm=mixing_matrix, - ctab=temporary_comptable, - debug=True, - verbose=False, + mir=True, ) - os.remove(temporary_comptable) # compare the generated output files fn = resource_filename("tedana", "tests/data/fiu_four_echo_outputs.txt") @@ -236,8 +202,7 @@ def test_integration_three_echo(skip_integration): out_dir_manual, "--debug", "--verbose", - "--ctab", - os.path.join(out_dir, "desc-tedana_metrics.tsv"), + "-f", "--mix", os.path.join(out_dir, "desc-ICA_mixing.tsv"), ] diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py new file mode 100644 index 000000000..ef459c528 --- /dev/null +++ b/tedana/tests/test_selection_nodes.py @@ -0,0 +1,923 @@ +"""Tests for the tedana.selection.selection_nodes module.""" +from re import S +import numpy as np +import pytest +import os +import pandas as pd + +from tedana.selection.ComponentSelector import ComponentSelector +from tedana.selection import selection_utils +from tedana.selection import selection_nodes +from tedana.tests.test_selection_utils import sample_component_table, sample_selector + +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def test_manual_classify_smoke(): + """Smoke tests for all options in manual_classify""" + + selector = sample_selector(options="provclass") + + decide_comps = "provisional accept" + new_classification = "accepted" + + # Outputs just the metrics used in this function (nothing in this case) + used_metrics = selection_nodes.manual_classify( + selector, decide_comps, new_classification, only_used_metrics=True + ) + assert used_metrics == set() + + # Standard execution where components are changed from "provisional accept" to "accepted" + # And all extra logging code is run + selector = selection_nodes.manual_classify( + selector, + decide_comps, + new_classification, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + tag="test tag", + ) + # There should be 4 selected components and component_status_table should have a new column "Node 0" + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 4 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + # No components with "NotALabel" classification so nothing selected and no + # Node 1 column not created in component_status_table + selector.current_node_idx = 1 + selector = selection_nodes.manual_classify(selector, "NotAClassification", new_classification) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert f"Node {selector.current_node_idx}" not in selector.component_status_table + + # Changing components from "rejected" to "accepted" and suppressing warning + selector.current_node_idx = 2 + selector = selection_nodes.manual_classify( + selector, + "rejected", + new_classification, + clear_classification_tags=True, + log_extra_report="report log", + log_extra_info="info log", + tag="test tag", + dont_warn_reclassify=True, + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 4 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + +def test_dec_left_op_right_succeeds(): + """tests for successful calls to dec_left_op_right""" + + selector = sample_selector(options="provclass") + + decide_comps = "provisional accept" + + # Outputs just the metrics used in this function {"kappa", "rho"} + used_metrics = selection_nodes.dec_left_op_right( + selector, "accepted", "rejected", decide_comps, ">", "kappa", "rho", only_used_metrics=True + ) + assert len(used_metrics - {"kappa", "rho"}) == 0 + + # Standard execution where components with kappa>rho are changed from "provisional accept" to "accepted" + # And all extra logging code and options are run + # left and right are both component_table_metrics + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + "kappa", + "rho", + left_scale=0.9, + right_scale=1.4, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + tag_ifTrue="test true tag", + tag_ifFalse="test false tag", + ) + # scales are set to make sure 3 components are true and 1 is false using the sample component table + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 1 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + # No components with "NotALabel" classification so nothing selected and no + # Node 1 column is created in component_status_table + selector.current_node_idx = 1 + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + "NotAClassification", + ">", + "kappa", + "rho", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert f"Node {selector.current_node_idx}" not in selector.component_status_table + + # Re-initializing selector so that it has components classificated as "provisional accept" again + selector = sample_selector(options="provclass") + # Test when left is a component_table_metric, & right is a cross_component_metric + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + "kappa", + "test_elbow", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 1 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + # right is a component_table_metric, left is a cross_component_metric + # left also has a left_scale that's a cross component metric + selector = sample_selector(options="provclass") + selector.cross_component_metrics["new_cc_metric"] = 1.02 + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + "test_elbow", + "kappa", + left_scale="new_cc_metric", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 3 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + # left component_table_metric, right is a constant integer value + selector = sample_selector(options="provclass") + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + "kappa", + 21, + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 1 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + # right component_table_metric, left is a constant float value + selector = sample_selector(options="provclass") + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + 21.0, + "kappa", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 3 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + # Testing combination of two statements. kappa>21 AND rho<14 + selector = sample_selector(options="provclass") + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + "<", + 21.0, + "kappa", + left2="rho", + op2="<", + right2=14, + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 2 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 2 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + # Testing combination of three statements. kappa>21 AND rho<14 AND 'variance explained'<5 + selector = sample_selector(options="provclass") + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + "<", + 21.0, + "kappa", + left2="rho", + op2="<", + right2=14, + left3="variance explained", + op3="<", + right3=5, + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 3 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + +def test_dec_left_op_right_fails(): + """tests for calls to dec_left_op_right that raise errors""" + + selector = sample_selector(options="provclass") + decide_comps = "provisional accept" + + # Raise error for left value that is not a metric + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + "NotAMetric", + 21, + ) + + # Raise error for right value that is not a metric + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + 21, + "NotAMetric", + ) + + # Raise error for invalid operator + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + "><", + "kappa", + 21, + ) + + # Raise error for right_scale that is not a number + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + 21.0, + "kappa", + right_scale="NotANumber", + ) + + # Raise error for right_scale that a column in the component_table + # which isn't allowed since the scale value needs to resolve to a + # a fixed number and not a different number for each component + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selector = selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + 21.0, + "kappa", + right_scale="rho", + ) + + # Raise error if some but not all parameters for the second conditional statement are defined + # In this case, op2 is not defined + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + "kappa", + 21, + left2="rho", + right2=14, + ) + + # Raise error for invalid operator for op2 + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + "kappa", + 21, + left2="rho", + op2="<>", + right2=14, + ) + + # Raise error if some but not all parameters for the third conditional statement are defined + # In this case, op3 is not defined + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + "kappa", + 21, + left2="rho", + right2=14, + op2="<", + left3="variance explained", + right3=5, + ) + + # Raise error if there's a third conditional statement but not a second statement + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selection_nodes.dec_left_op_right( + selector, + "accepted", + "rejected", + decide_comps, + ">", + "kappa", + 21, + left3="variance explained", + right3=5, + op3="<", + ) + + +def test_dec_variance_lessthan_thresholds_smoke(): + """Smoke tests for dec_variance_lessthan_thresholds""" + + selector = sample_selector(options="provclass") + decide_comps = "provisional accept" + + # Outputs just the metrics used in this function {"variance explained"} + used_metrics = selection_nodes.dec_variance_lessthan_thresholds( + selector, "accepted", "rejected", decide_comps, only_used_metrics=True + ) + assert len(used_metrics - {"variance explained"}) == 0 + + # Standard execution where with all extra logging code and options changed from defaults + selector = selection_nodes.dec_variance_lessthan_thresholds( + selector, + "accepted", + "rejected", + decide_comps, + var_metric="normalized variance explained", + single_comp_threshold=0.05, + all_comp_threshold=0.09, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + tag_ifTrue="test true tag", + tag_ifFalse="test false tag", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 3 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + # No components with "NotALabel" classification so nothing selected and no + # Node 1 column not created in component_status_table + selector.current_node_idx = 1 + selector = selection_nodes.dec_variance_lessthan_thresholds( + selector, "accepted", "rejected", "NotAClassification" + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert f"Node {selector.current_node_idx}" not in selector.component_status_table + + # Running without specifying logging text generates internal text + selector = sample_selector(options="provclass") + selector = selection_nodes.dec_variance_lessthan_thresholds( + selector, "accepted", "rejected", decide_comps + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 4 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + +def test_calc_kappa_rho_elbows_kundu(): + """Smoke tests for calc_kappa_rho_elbows_kundu""" + + # Standard use of this function requires some components to be "unclassified" + selector = sample_selector(options="unclass") + decide_comps = "all" + + # Outputs just the metrics used in this function {"variance explained"} + used_metrics = selection_nodes.calc_kappa_rho_elbows_kundu( + selector, decide_comps, only_used_metrics=True + ) + assert len(used_metrics - {"kappa", "rho"}) == 0 + + # Standard call to this function. + selector = selection_nodes.calc_kappa_rho_elbows_kundu( + selector, + decide_comps, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = {"kappa_elbow_kundu", "rho_elbow_kundu", "varex_upper_p"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 + + # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + selector = sample_selector(options="unclass") + selector.cross_component_metrics["kappa_elbow_kundu"] = 1 + selector.cross_component_metrics["rho_elbow_kundu"] = 1 + selector.cross_component_metrics["varex_upper_p"] = 1 + decide_comps = "all" + selector = selection_nodes.calc_kappa_rho_elbows_kundu( + selector, + decide_comps, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.cross_component_metrics["kappa_elbow_kundu"] > 2 + assert selector.cross_component_metrics["rho_elbow_kundu"] > 2 + assert selector.cross_component_metrics["varex_upper_p"] > 2 + + # Run with kappa_only==True + selector = sample_selector(options="unclass") + selector = selection_nodes.calc_kappa_rho_elbows_kundu(selector, decide_comps, kappa_only=True) + calc_cross_comp_metrics = {"kappa_elbow_kundu", "varex_upper_p"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] > 0 + assert "rho_elbow_kundu" not in selector.tree["nodes"][selector.current_node_idx]["outputs"] + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 + + # Run with rho_only==True + selector = sample_selector(options="unclass") + selector = selection_nodes.calc_kappa_rho_elbows_kundu(selector, decide_comps, rho_only=True) + calc_cross_comp_metrics = {"rho_elbow_kundu", "varex_upper_p"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 + assert "kappa_elbow_kundu" not in selector.tree["nodes"][selector.current_node_idx]["outputs"] + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 + + # Should run normally with both kappa_only and rho_only==True + selector = sample_selector(options="unclass") + selector = selection_nodes.calc_kappa_rho_elbows_kundu( + selector, decide_comps, kappa_only=True, rho_only=True + ) + calc_cross_comp_metrics = {"kappa_elbow_kundu", "rho_elbow_kundu", "varex_upper_p"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 + + # Log without running if no components of class decide_comps or no components + # classified as "unclassified" are in the component table + selector = sample_selector() + selector = selection_nodes.calc_kappa_rho_elbows_kundu(selector, "NotAClassification") + calc_cross_comp_metrics = {"kappa_elbow_kundu", "rho_elbow_kundu", "varex_upper_p"} + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] == None + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] == None + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] == None + + +def test_dec_classification_doesnt_exist_smoke(): + """Smoke tests for dec_classification_doesnt_exist""" + + selector = sample_selector(options="unclass") + decide_comps = ["unclassified", "provisional accept"] + + # Outputs just the metrics used in this function {"variance explained"} + used_metrics = selection_nodes.dec_classification_doesnt_exist( + selector, + "rejected", + decide_comps, + class_comp_exists="provisional accept", + only_used_metrics=True, + ) + assert len(used_metrics) == 0 + + # Standard execution where with all extra logging code and options changed from defaults + selector = selection_nodes.dec_classification_doesnt_exist( + selector, + "accepted", + decide_comps, + class_comp_exists="provisional accept", + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + tag_ifTrue="test true tag", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + # Lists the number of components in decide_comps in numFalse + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 17 + # During normal execution, it will find provionally accepted components + # and do nothing so another node isn't created + assert f"Node {selector.current_node_idx}" not in selector.component_status_table + + # No components with "NotALabel" classification so nothing selected and no + # Node 1 column not created in component_status_table + # Running without specifying logging text generates internal text + selector.current_node_idx = 1 + selector = selection_nodes.dec_classification_doesnt_exist( + selector, + "accepted", + "NotAClassification", + class_comp_exists="provisional accept", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert f"Node {selector.current_node_idx}" not in selector.component_status_table + + # Other normal state is to change classifications when there are + # no components with class_comp_exists. Since the component_table + # initialized with sample_selector as not "provisional reject" + # components, using that for class_comp_exists + selector = sample_selector() + decide_comps = "accepted" + selector = selection_nodes.dec_classification_doesnt_exist( + selector, + "changed accepted", + decide_comps, + class_comp_exists="provisional reject", + tag_ifTrue="test true tag", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 17 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + +def test_calc_varex_thresh_smoke(): + """Smoke tests for calc_varex_thresh""" + + # Standard use of this function requires some components to be "provisional accept" + selector = sample_selector(options="provclass") + decide_comps = "provisional accept" + + # Outputs just the metrics used in this function {"variance explained"} + used_metrics = selection_nodes.calc_varex_thresh( + selector, decide_comps, thresh_label="upper", percentile_thresh=90, only_used_metrics=True + ) + assert len(used_metrics - set(["variance explained"])) == 0 + + # Standard call to this function. + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps, + thresh_label="upper", + percentile_thresh=90, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = {"varex_upper_thresh", "upper_perc"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_thresh"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["upper_perc"] == 90 + + # Standard call , but thresh_label is "" + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps, + thresh_label="", + percentile_thresh=90, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = {"varex_thresh", "perc"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_thresh"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["perc"] == 90 + + # Run warning logging code to see if any of the cross_component_metrics already existed and would be over-written + selector = sample_selector(options="provclass") + selector.cross_component_metrics["varex_upper_thresh"] = 1 + selector.cross_component_metrics["upper_perc"] = 1 + decide_comps = "provisional accept" + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps, + thresh_label="upper", + percentile_thresh=90, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_thresh"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["upper_perc"] == 90 + + # Raise error if percentile_thresh isn't a number + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selector = selection_nodes.calc_varex_thresh( + selector, decide_comps, thresh_label="upper", percentile_thresh="NotANumber" + ) + + # Raise error if percentile_thresh isn't a number between 0 & 100 + selector = sample_selector(options="provclass") + with pytest.raises(ValueError): + selector = selection_nodes.calc_varex_thresh( + selector, decide_comps, thresh_label="upper", percentile_thresh=101 + ) + + # Log without running if no components of decide_comps are in the component table + selector = sample_selector() + selector = selection_nodes.calc_varex_thresh( + selector, decide_comps="NotAClassification", thresh_label="upper", percentile_thresh=90 + ) + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_thresh"] == None + ) + # percentile_thresh doesn't depend on components and is assigned + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["upper_perc"] == 90 + + +def test_calc_extend_factor_smoke(): + """Smoke tests for calc_extend_factor""" + + selector = sample_selector() + + # Outputs just the metrics used in this function {""} + used_metrics = selection_nodes.calc_extend_factor(selector, only_used_metrics=True) + assert used_metrics == set() + + # Standard call to this function. + selector = selection_nodes.calc_extend_factor( + selector, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = {"extend_factor"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["extend_factor"] > 0 + + # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + selector = sample_selector() + selector.cross_component_metrics["extend_factor"] = 1.0 + selector = selection_nodes.calc_extend_factor(selector) + + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["extend_factor"] > 0 + + # Run with extend_factor defined as an input + selector = sample_selector() + selector = selection_nodes.calc_extend_factor(selector, extend_factor=1.2) + + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["extend_factor"] == 1.2 + + +def test_max_good_meanmetricrank_smoke(): + """Smoke tests for calc_max_good_meanmetricrank""" + + # Standard use of this function requires some components to be "provisional accept" + selector = sample_selector("provclass") + # This function requires "extend_factor" to already be defined + selector.cross_component_metrics["extend_factor"] = 2.0 + + # Outputs just the metrics used in this function {""} + used_metrics = selection_nodes.calc_max_good_meanmetricrank( + selector, "provisional accept", only_used_metrics=True + ) + assert used_metrics == set() + + # Standard call to this function. + selector = selection_nodes.calc_max_good_meanmetricrank( + selector, + "provisional accept", + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = {"max_good_meanmetricrank"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["max_good_meanmetricrank"] > 0 + ) + + # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + selector = sample_selector("provclass") + selector.cross_component_metrics["max_good_meanmetricrank"] = 10 + selector.cross_component_metrics["extend_factor"] = 2.0 + + selector = selection_nodes.calc_max_good_meanmetricrank(selector, "provisional accept") + + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["max_good_meanmetricrank"] > 0 + ) + + # Raise an error if "extend_factor" isn't pre-defined + selector = sample_selector("provclass") + with pytest.raises(ValueError): + selector = selection_nodes.calc_max_good_meanmetricrank(selector, "provisional accept") + + # Log without running if no components of decide_comps are in the component table + selector = sample_selector() + selector.cross_component_metrics["extend_factor"] = 2.0 + + selector = selection_nodes.calc_max_good_meanmetricrank(selector, "NotAClassification") + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["max_good_meanmetricrank"] + == None + ) + + +def test_calc_varex_kappa_ratio_smoke(): + """Smoke tests for calc_varex_kappa_ratio""" + + # Standard use of this function requires some components to be "provisional accept" + selector = sample_selector("provclass") + + # Outputs just the metrics used in this function {""} + used_metrics = selection_nodes.calc_varex_kappa_ratio( + selector, "provisional accept", only_used_metrics=True + ) + assert used_metrics == {"kappa", "variance explained"} + + # Standard call to this function. + selector = selection_nodes.calc_varex_kappa_ratio( + selector, + "provisional accept", + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = {"kappa_rate"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_rate"] > 0 + + # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + selector = sample_selector("provclass") + selector.cross_component_metrics["kappa_rate"] = 10 + selector = selection_nodes.calc_varex_kappa_ratio(selector, "provisional accept") + + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_rate"] > 0 + + # Log without running if no components of decide_comps are in the component table + selector = sample_selector() + selector = selection_nodes.calc_varex_kappa_ratio(selector, "NotAClassification") + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_rate"] == None + + # Raise error if "varex kappa ratio" is already in component_table + selector = sample_selector("provclass") + selector.component_table["varex kappa ratio"] = 42 + with pytest.raises(ValueError): + selector = selection_nodes.calc_varex_kappa_ratio(selector, "provisional accept") + + +def test_calc_revised_meanmetricrank_guesses_smoke(): + """Smoke tests for calc_revised_meanmetricrank_guesses""" + + # Standard use of this function requires some components to be "provisional accept" + selector = sample_selector("provclass") + selector.cross_component_metrics["kappa_elbow_kundu"] = 19.1 + selector.cross_component_metrics["rho_elbow_kundu"] = 15.2 + + # Outputs just the metrics used in this function {""} + used_metrics = selection_nodes.calc_revised_meanmetricrank_guesses( + selector, + ["provisional accept", "provisional reject", "unclassified"], + only_used_metrics=True, + ) + assert used_metrics == { + "kappa", + "dice_FT2", + "signal-noise_t", + "countnoise", + "countsigFT2", + "rho", + } + + # Standard call to this function. + selector = selection_nodes.calc_revised_meanmetricrank_guesses( + selector, + ["provisional accept", "provisional reject", "unclassified"], + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = {"num_acc_guess", "conservative_guess", "restrict_factor"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["num_acc_guess"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["conservative_guess"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["restrict_factor"] == 2 + + # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + selector = sample_selector("provclass") + selector.cross_component_metrics["kappa_elbow_kundu"] = 19.1 + selector.cross_component_metrics["rho_elbow_kundu"] = 15.2 + selector.cross_component_metrics["num_acc_guess"] = 10 + selector.cross_component_metrics["conservative_guess"] = 10 + selector.cross_component_metrics["restrict_factor"] = 5 + selector = selection_nodes.calc_revised_meanmetricrank_guesses( + selector, ["provisional accept", "provisional reject", "unclassified"] + ) + + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["num_acc_guess"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["conservative_guess"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["restrict_factor"] == 2 + + # Log without running if no components of decide_comps are in the component table + selector = sample_selector() + selector.cross_component_metrics["kappa_elbow_kundu"] = 19.1 + selector.cross_component_metrics["rho_elbow_kundu"] = 15.2 + selector = selection_nodes.calc_revised_meanmetricrank_guesses(selector, "NotAClassification") + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["num_acc_guess"] == None + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["conservative_guess"] == None + ) + + # Raise error if "d_table_score_node0" is already in component_table + selector = sample_selector("provclass") + selector.cross_component_metrics["kappa_elbow_kundu"] = 19.1 + selector.cross_component_metrics["rho_elbow_kundu"] = 15.2 + selector.component_table["d_table_score_node0"] = 42 + with pytest.raises(ValueError): + selector = selection_nodes.calc_revised_meanmetricrank_guesses( + selector, ["provisional accept", "provisional reject", "unclassified"] + ) + + # Raise error if restrict_factor isn't a number + selector = sample_selector("provclass") + selector.cross_component_metrics["kappa_elbow_kundu"] = 19.1 + selector.cross_component_metrics["rho_elbow_kundu"] = 15.2 + with pytest.raises(ValueError): + selector = selection_nodes.calc_revised_meanmetricrank_guesses( + selector, + ["provisional accept", "provisional reject", "unclassified"], + restrict_factor="2", + ) + + # Raise error if kappa_elbow_kundu isn't in cross_component_metrics + selector = sample_selector("provclass") + selector.cross_component_metrics["rho_elbow_kundu"] = 15.2 + with pytest.raises(ValueError): + selector = selection_nodes.calc_revised_meanmetricrank_guesses( + selector, ["provisional accept", "provisional reject", "unclassified"] + ) diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index 7288f8c6e..e4a4aafe3 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -1,45 +1,372 @@ -"""Tests for the tedana.selection._utils module.""" +"""Tests for the tedana.selection.selection_utils module.""" import numpy as np import pytest +import os +import pandas as pd -from tedana.selection import _utils +from tedana.selection.ComponentSelector import ComponentSelector +from tedana.selection import selection_utils + + +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) + + +def sample_component_table(options=None): + """ + Retrieves a sample component table + + Options: Different strings will also the contents of the component table + 'provclass': Change the classifications to "provisional accept" for 4 components + 'unclass': Change 4 classifications to "provisional accept", 2 to accepted, + 2 to rejected, and the rest to "unclassified" + """ + + sample_fname = os.path.join(THIS_DIR, "data", "sample_comptable.tsv") + component_table = pd.read_csv(sample_fname, delimiter="\t") + component_table["classification_tags"] = "" + if options == "unclass": + component_table["classification"] = "unclassified" + component_table.loc[[16, 18], "classification"] = "accepted" + component_table.loc[[11, 13], "classification"] = "rejected" + + if (options == "provclass") or (options == "unclass"): + component_table.loc[[2, 4, 6, 8], "classification"] = "provisional accept" + return component_table + + +def sample_selector(options=None): + """ + Retrieves a sample component table and initializes + a selector using that component table and the minimal tree + + options: Different strings will alter the selector + 'provclass': Change the classifications to "provisional accept" for 4 components + 'unclass': Change 4 classifications to "provisional accept" and the rest to "unclassified" + + """ + + tree = "minimal" + + component_table = sample_component_table(options=options) + + xcomp = { + "n_echos": 3, + "n_vols": 201, + "test_elbow": 21, + } + selector = ComponentSelector(tree, component_table, cross_component_metrics=xcomp) + selector.current_node_idx = 0 + + return selector + + +############################################################## +# Functions that are used for interacting with component_table +############################################################## + + +def test_selectcomps2use_succeeds(): + """ + Tests to make sure selectcomps2use runs with full range of inputs. + Include tests to make sure the correct number of components are selected + from the pre-defined sample_comptable.tsv component table + """ + selector = sample_selector() + + decide_comps_options = [ + "rejected", + ["accepted"], + "all", + ["accepted", "rejected"], + 4, + [2, 6, 4], + "NotALabel", + ] + # Given the pre-defined comptable in sample_table_selector, these + # are the expected number of components that should be selected + # for each of the above decide_comps_options + decide_comps_lengths = [4, 17, 21, 21, 1, 3, 0] + + for idx, decide_comps in enumerate(decide_comps_options): + comps2use = selection_utils.selectcomps2use(selector, decide_comps) + assert ( + len(comps2use) == decide_comps_lengths[idx] + ), f"selectcomps2use test should select {decide_comps_lengths[idx]} with decide_comps={decide_comps}, but it selected {len(comps2use)}" + + +def test_selectcomps2use_fails(): + """Tests for selectcomps2use failure modes""" + selector = sample_selector() + + decide_comps_options = [ + 18.2, # no floats + [11.2, 13.1], # no list of floats + ["accepted", 4], # needs to be either int or string, not both + [4, 3, -1, 9], # no index should be < 0 + [2, 4, 6, 21], # no index should be > number of 0 indexed components + 22, ## no index should be > number of 0 indexed components + ] + for decide_comps in decide_comps_options: + with pytest.raises(ValueError): + selection_utils.selectcomps2use(selector, decide_comps) + + +def test_comptable_classification_changer_succeeds(): + """ + All conditions where comptable_classification_changer should run + Note: This confirms the function runs, but not that outputs are accurate + Also tests conditions where the warning logger is used, but doesn't + check the logger + """ + + def validate_changes(expected_classification): + # check every element that was supposed to change, did change + changeidx = decision_boolean.index[np.asarray(decision_boolean) == boolstate] + new_vals = selector.component_table.loc[changeidx, "classification"] + for val in new_vals: + assert val == expected_classification + + # Change if true + selector = sample_selector(options="provclass") + decision_boolean = selector.component_table["classification"] == "provisional accept" + boolstate = True + selector = selection_utils.comptable_classification_changer( + selector, boolstate, "accepted", decision_boolean, tag_if="testing_tag" + ) + validate_changes("accepted") + + # Run nochange condition + selector = sample_selector(options="provclass") + decision_boolean = selector.component_table["classification"] == "provisional accept" + selector = selection_utils.comptable_classification_changer( + selector, boolstate, "nochange", decision_boolean, tag_if="testing_tag" + ) + validate_changes("provisional accept") + + # Change if false + selector = sample_selector(options="provclass") + decision_boolean = selector.component_table["classification"] != "provisional accept" + boolstate = False + selector = selection_utils.comptable_classification_changer( + selector, boolstate, "rejected", decision_boolean, tag_if="testing_tag1, testing_tag2" + ) + validate_changes("rejected") + + # Change from accepted to rejected, which should output a warning (test if the warning appears?) + selector = sample_selector(options="provclass") + decision_boolean = selector.component_table["classification"] == "accepted" + boolstate = True + selector = selection_utils.comptable_classification_changer( + selector, boolstate, "rejected", decision_boolean, tag_if="testing_tag" + ) + validate_changes("rejected") + + # Change from rejected to accepted and suppress warning + selector = sample_selector(options="provclass") + decision_boolean = selector.component_table["classification"] == "rejected" + boolstate = True + selector = selection_utils.comptable_classification_changer( + selector, + boolstate, + "accepted", + decision_boolean, + tag_if="testing_tag", + dont_warn_reclassify=True, + ) + validate_changes("accepted") + + +def test_change_comptable_classifications_succeeds(): + """All conditions where change_comptable_classifications should run""" + + selector = sample_selector(options="provclass") + + # Given the rho values in the sample table, decision_boolean should have 2 True and 2 False values + comps2use = selection_utils.selectcomps2use(selector, "provisional accept") + rho = selector.component_table.loc[comps2use, "rho"] + decision_boolean = rho < 13.5 + + selector, numTrue, numFalse = selection_utils.change_comptable_classifications( + selector, + "accepted", + "nochange", + decision_boolean, + tag_ifTrue="testing_tag1", + tag_ifFalse="testing_tag2", + ) + + assert numTrue == 2 + assert numFalse == 2 + # check every element that was supposed to change, did change + changeidx = decision_boolean.index[np.asarray(decision_boolean) == True] + new_vals = selector.component_table.loc[changeidx, "classification"] + for val in new_vals: + assert val == "accepted" + + +def test_clean_dataframe_smoke(): + """A smoke test for the clean_dataframe function""" + component_table = sample_component_table(options="comptable") + selection_utils.clean_dataframe(component_table) + + +################################################# +# Functions to validate inputs or log information +################################################# + + +def test_confirm_metrics_exist_succeeds(): + """tests confirm_metrics_exist run with correct inputs""" + component_table = sample_component_table(options="comptable") + + # Testing for metrics that exist with 1 or 2 necessary metrics in a set + # Returns True if an undefined metric exists so using "assert not" + assert not selection_utils.confirm_metrics_exist(component_table, {"kappa"}) + assert not selection_utils.confirm_metrics_exist(component_table, {"kappa", "rho"}) + + +def test_confirm_metrics_exist_fails(): + """tests confirm_metrics_exist for failure conditions""" + + component_table = sample_component_table(options="comptable") + + # Should fail with and error would have default or pre-defined file name + with pytest.raises(ValueError): + selection_utils.confirm_metrics_exist(component_table, {"kappa", "quack"}) + with pytest.raises(ValueError): + selection_utils.confirm_metrics_exist( + component_table, {"kappa", "mooo"}, function_name="farm" + ) + + +def test_log_decision_tree_step_smoke(): + """A smoke test for log_decision_tree_step""" + + selector = sample_selector() + + # Standard run for logging classification changes + comps2use = selection_utils.selectcomps2use(selector, "reject") + selection_utils.log_decision_tree_step( + "Step 0: test_function_name", + comps2use, + decide_comps="reject", + numTrue=5, + numFalse=2, + ifTrue="accept", + ifFalse="reject", + ) + + # Standard use for logging cross_component_metric calculation + outputs = { + "calc_cross_comp_metrics": [ + "kappa_elbow_kundu", + "rho_elbow_kundu", + ], + "kappa_elbow_kundu": 45, + "rho_elbow_kundu": 12, + } + selection_utils.log_decision_tree_step( + "Step 0: test_function_name", comps2use, calc_outputs=outputs + ) + + # Puts a warning in the logger if outputs doesn't have a cross_component_metrics field + outputs = { + "kappa_elbow_kundu": 45, + "rho_elbow_kundu": 12, + } + selection_utils.log_decision_tree_step( + "Step 0: test_function_name", comps2use, calc_outputs=outputs + ) + + # Logging no components found with a specified classification + comps2use = selection_utils.selectcomps2use(selector, "NotALabel") + selection_utils.log_decision_tree_step( + "Step 0: test_function_name", + comps2use, + decide_comps="NotALabel", + numTrue=5, + numFalse=2, + ifTrue="accept", + ifFalse="reject", + ) + + +def test_log_classification_counts_smoke(): + """A smoke test for log_classification_counts""" + + component_table = sample_component_table(options="comptable") + + selection_utils.log_classification_counts(5, component_table) + + +####################################################### +# Calculations that are used in decision tree functions +####################################################### def test_getelbow_smoke(): """A smoke test for the getelbow function.""" arr = np.random.random(100) - idx = _utils.getelbow(arr) + idx = selection_utils.getelbow(arr) assert isinstance(idx, np.integer) - val = _utils.getelbow(arr, return_val=True) + val = selection_utils.getelbow(arr, return_val=True) assert isinstance(val, float) # Running an empty array should raise a ValueError arr = np.array([]) with pytest.raises(ValueError): - _utils.getelbow(arr) + selection_utils.getelbow(arr) # Running a 2D array should raise a ValueError arr = np.random.random((100, 100)) with pytest.raises(ValueError): - _utils.getelbow(arr) + selection_utils.getelbow(arr) -def test_getelbow_cons(): +def test_getelbow_cons_smoke(): """A smoke test for the getelbow_cons function.""" arr = np.random.random(100) - idx = _utils.getelbow_cons(arr) + idx = selection_utils.getelbow_cons(arr) assert isinstance(idx, np.integer) - val = _utils.getelbow_cons(arr, return_val=True) + val = selection_utils.getelbow_cons(arr, return_val=True) assert isinstance(val, float) # Running an empty array should raise a ValueError arr = np.array([]) with pytest.raises(ValueError): - _utils.getelbow_cons(arr) + selection_utils.getelbow_cons(arr) # Running a 2D array should raise a ValueError arr = np.random.random((100, 100)) with pytest.raises(ValueError): - _utils.getelbow_cons(arr) + selection_utils.getelbow_cons(arr) + + +def test_kappa_elbow_kundu_smoke(): + """A smoke test for the kappa_elbow_kundu function""" + + component_table = sample_component_table() + + kappa_elbow = selection_utils.kappa_elbow_kundu(component_table, n_echos=3) + assert isinstance(kappa_elbow, float) + + # For the sample component_table, when n_echos=6, there are fewer than 5 components + # that are greater than an f01 threshold and a different condition in kappa_elbow_kundu is run + kappa_elbow = selection_utils.kappa_elbow_kundu(component_table, n_echos=6) + assert isinstance(kappa_elbow, float) + + +def test_get_extend_factor_smoke(): + """A smoke test for get_extend_factor""" + + val = selection_utils.get_extend_factor(extend_factor=int(10)) + assert isinstance(val, float) + + for n_vols in [80, 100, 120]: + val = selection_utils.get_extend_factor(n_vols=n_vols) + assert isinstance(val, float) + + with pytest.raises(ValueError): + selection_utils.get_extend_factor() diff --git a/tedana/workflows/t2smap.py b/tedana/workflows/t2smap.py index f2cee3497..16add4fad 100644 --- a/tedana/workflows/t2smap.py +++ b/tedana/workflows/t2smap.py @@ -328,6 +328,7 @@ def t2smap_workflow( ], } io_generator.save_file(derivative_metadata, "data description json") + io_generator.save_self() LGR.info("Workflow completed") utils.teardown_loggers() diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index f2c66ac17..fb5df83f0 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -153,6 +153,17 @@ def _get_parser(): ), default="aic", ) + optional.add_argument( + "--tree", + dest="tree", + help=( + "Decision tree to use. You may use a " + "packaged tree (kundu, minimal) or supply a JSON " + "file which matches the decision tree file " + "specification." + ), + default="minimal", + ) optional.add_argument( "--seed", dest="fixed_seed", @@ -270,13 +281,6 @@ def _get_parser(): default=False, ) optional.add_argument( - "--quiet", dest="quiet", help=argparse.SUPPRESS, action="store_true", default=False - ) - optional.add_argument("-v", "--version", action="version", version=verstr) - parser._action_groups.append(optional) - - rerungrp = parser.add_argument_group("Arguments for Rerunning the Workflow") - rerungrp.add_argument( "--t2smap", dest="t2smap", metavar="FILE", @@ -284,7 +288,7 @@ def _get_parser(): help=("Precalculated T2* map in the same space as the input data."), default=None, ) - rerungrp.add_argument( + optional.add_argument( "--mix", dest="mixm", metavar="FILE", @@ -292,27 +296,19 @@ def _get_parser(): help=("File containing mixing matrix. If not provided, ME-PCA & ME-ICA is done."), default=None, ) - rerungrp.add_argument( - "--ctab", - dest="ctab", - metavar="FILE", - type=lambda x: is_valid_file(parser, x), - help=( - "File containing a component table from which " - "to extract pre-computed classifications. " - "Requires --mix." - ), - default=None, + + optional.add_argument( + "--quiet", dest="quiet", help=argparse.SUPPRESS, action="store_true", default=False ) - rerungrp.add_argument( - "--manacc", - dest="manacc", - metavar="INT", - type=int, - nargs="+", - help=("List of manually accepted components. Requires --ctab and --mix."), - default=None, + parser.add_argument( + "--force", + "-f", + dest="force", + action="store_true", + help="Force overwriting of files. Default False.", ) + optional.add_argument("-v", "--version", action="version", version=verstr) + parser._action_groups.append(optional) return parser @@ -326,6 +322,7 @@ def tedana_workflow( prefix="", fittype="loglin", combmode="t2s", + tree="minimal", tedpca="aic", fixed_seed=42, maxit=500, @@ -338,10 +335,9 @@ def tedana_workflow( low_mem=False, debug=False, quiet=False, + force=False, t2smap=None, mixm=None, - ctab=None, - manacc=None, ): """ Run the "canonical" TE-Dependent ANAlysis workflow. @@ -394,15 +390,6 @@ def tedana_workflow( mixm : :obj:`str` or None, optional File containing mixing matrix, to be used when re-running the workflow. If not provided, ME-PCA and ME-ICA are done. Default is None. - ctab : :obj:`str` or None, optional - File containing component table from which to extract pre-computed - classifications, to be used with 'mixm' when re-running the workflow. - Default is None. - manacc : :obj:`list` of :obj:`int` or None, optional - List of manually accepted components. Can be a list of the components - numbers or None. - If provided, this parameter requires ``mixm`` and ``ctab`` to be provided as well. - Default is None. Other Parameters ---------------- @@ -487,6 +474,7 @@ def tedana_workflow( out_dir=out_dir, prefix=prefix, config="auto", + force=force, verbose=verbose, ) @@ -516,26 +504,6 @@ def tedana_workflow( elif mixm is not None: raise IOError("Argument 'mixm' must be an existing file.") - if ctab is not None and op.isfile(ctab): - ctab = op.abspath(ctab) - # Allow users to re-run on same folder - metrics_name = io_generator.get_name("ICA metrics tsv") - if ctab != metrics_name: - shutil.copyfile(ctab, metrics_name) - shutil.copyfile(ctab, op.join(io_generator.out_dir, op.basename(ctab))) - elif ctab is not None: - raise IOError("Argument 'ctab' must be an existing file.") - - if ctab and not mixm: - LGR.warning("Argument 'ctab' requires argument 'mixm'.") - ctab = None - elif manacc is not None and (not mixm or not ctab): - LGR.warning("Argument 'manacc' requires arguments 'mixm' and 'ctab'.") - manacc = None - elif manacc is not None: - # coerce to list of integers - manacc = [int(m) for m in manacc] - if t2smap is not None and op.isfile(t2smap): t2smap_file = io_generator.get_name("t2star img") t2smap = op.abspath(t2smap) @@ -695,9 +663,8 @@ def tedana_workflow( "ICA", metrics=required_metrics, ) - comptable, metric_metadata = selection.kundu_selection_v2(comptable, n_echos, n_vols) - - n_bold_comps = comptable[comptable.classification == "accepted"].shape[0] + ica_selection = selection.automatic_selection(comptable, n_echos, n_vols, tree=tree) + n_bold_comps = ica_selection.n_bold_comps if (n_restarts < maxrestart) and (n_bold_comps == 0): LGR.warning("No BOLD components found. Re-attempting ICA.") elif n_bold_comps == 0: @@ -713,49 +680,48 @@ def tedana_workflow( mixing_file = io_generator.get_name("ICA mixing tsv") mmix = pd.read_table(mixing_file).values - if ctab is None: - required_metrics = [ - "kappa", - "rho", - "countnoise", - "countsigFT2", - "countsigFS0", - "dice_FT2", - "dice_FS0", - "signal-noise_t", - "variance explained", - "normalized variance explained", - "d_table_score", - ] - comptable = metrics.collect.generate_metrics( - catd, - data_oc, - mmix, - masksum_clf, - tes, - io_generator, - "ICA", - metrics=required_metrics, - ) - comptable, metric_metadata = selection.kundu_selection_v2(comptable, n_echos, n_vols) - else: - LGR.info("Using supplied component table for classification") - comptable = pd.read_table(ctab) - # Change rationale value of rows with NaN to empty strings - comptable.loc[comptable.rationale.isna(), "rationale"] = "" - - if manacc is not None: - comptable, metric_metadata = selection.manual_selection(comptable, acc=manacc) + required_metrics = [ + "kappa", + "rho", + "countnoise", + "countsigFT2", + "countsigFS0", + "dice_FT2", + "dice_FS0", + "signal-noise_t", + "variance explained", + "normalized variance explained", + "d_table_score", + ] + comptable = metrics.collect.generate_metrics( + catd, + data_oc, + mmix, + masksum_clf, + tes, + io_generator, + "ICA", + metrics=required_metrics, + ) + ica_selection = selection.automatic_selection( + comptable, + n_echos, + n_vols, + tree=tree, + ) - # Write out ICA files. + # TODO The ICA mixing matrix should be written out after it is created + # It is currently being writen after component selection is done + # and rewritten if an existing mixing matrix is given as an input comp_names = comptable["Component"].values mixing_df = pd.DataFrame(data=mmix, columns=comp_names) io_generator.save_file(mixing_df, "ICA mixing tsv") betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask_denoise), mask_denoise) io_generator.save_file(betas_oc, "z-scored ICA components img") - # Save component table and associated json - io_generator.save_file(comptable, "ICA metrics tsv") + # Save component selector and tree + ica_selection.to_files(io_generator) + # Save metrics and metadata metric_metadata = metrics.collect.get_metadata(comptable) io_generator.save_file(metric_metadata, "ICA metrics json") @@ -769,25 +735,27 @@ def tedana_workflow( "Description": "ICA fit to dimensionally-reduced optimally combined data.", "Method": "tedana", } - with open(io_generator.get_name("ICA decomposition json"), "w") as fo: - json.dump(decomp_metadata, fo, sort_keys=True, indent=4) + io_generator.save_file(decomp_metadata, "ICA decomposition json") - if comptable[comptable.classification == "accepted"].shape[0] == 0: + if ica_selection.n_bold_comps == 0: LGR.warning("No BOLD components detected! Please check data and results!") + # TODO: un-hack separate comptable + comptable = ica_selection.component_table + mmix_orig = mmix.copy() if tedort: - acc_idx = comptable.loc[~comptable.classification.str.contains("rejected")].index.values - rej_idx = comptable.loc[comptable.classification.str.contains("rejected")].index.values - acc_ts = mmix[:, acc_idx] - rej_ts = mmix[:, rej_idx] + comps_accepted = ica_selection.accepted_comps + comps_rejected = ica_selection.rejected_comps + acc_ts = mmix[:, comps_accepted] + rej_ts = mmix[:, comps_rejected] betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0] pred_rej_ts = np.dot(acc_ts, betas) resid = rej_ts - pred_rej_ts - mmix[:, rej_idx] = resid + mmix[:, comps_rejected] = resid comp_names = [ io.add_decomp_prefix(comp, prefix="ica", max_value=comptable.index.max()) - for comp in comptable.index.values + for comp in range(ica_selection.n_comps) ] mixing_df = pd.DataFrame(data=mmix, columns=comp_names) io_generator.save_file(mixing_df, "ICA orthogonalized mixing tsv") @@ -812,6 +780,9 @@ def tedana_workflow( if verbose: io.writeresults_echoes(catd, mmix, mask_denoise, comptable, io_generator) + # Write out registry of outputs + io_generator.save_self() + # Write out BIDS-compatible description file derivative_metadata = { "Name": "tedana Outputs", diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py new file mode 100644 index 000000000..f4e916594 --- /dev/null +++ b/tedana/workflows/tedana_reclassify.py @@ -0,0 +1,439 @@ +""" +Run the reclassification workflow for a previous tedana run +""" +import argparse +import datetime +import json +import logging +import os +import os.path as op +import shutil +import sys +from glob import glob + +import numpy as np +import pandas as pd +from nilearn.masking import compute_epi_mask +from scipy import stats +from threadpoolctl import threadpool_limits + +import tedana.gscontrol as gsc +from tedana import ( + __version__, + io, + reporting, + selection, + utils, + stats, +) +from tedana.workflows.parser_utils import is_valid_file + +LGR = logging.getLogger("GENERAL") +RepLGR = logging.getLogger("REPORT") +RefLGR = logging.getLogger("REFERENCES") + + +def main(): + from ..info import __version__ + + verstr = "tedana v{}".format(__version__) + parser = argparse.ArgumentParser() + parser.add_argument( + "registry", + dest="registry", + help="File registry from a previous tedana run", + ) + parser.add_argument( + "--manacc", + dest="manual_accept", + nargs="+", + type=int, + help="Component indices to accept (zero-indexed).", + ) + parser.add_argument( + "--manrej", + dest="manual_reject", + nargs="+", + type=int, + help="Component indices to reject (zero-indexed).", + ) + parser.add_argument( + "--config", + dest="config", + help="File naming configuration. Default auto (prepackaged).", + ) + parser.add_argument( + "--out-dir", + dest="out_dir", + type=str, + metavar="PATH", + help="Output directory.", + default=".", + ) + parser.add_argument( + "--prefix", dest="prefix", type=str, help="Prefix for filenames generated.", default="" + ) + parser.add_argument( + "--convention", + dest="convention", + action="store", + choices=["orig", "bids"], + help=("Filenaming convention. bids will use the latest BIDS derivatives version."), + default="bids", + ) + parser.add_argument( + "--tedort", + dest="tedort", + action="store_true", + help=("Orthogonalize rejected components w.r.t. accepted components prior to denoising."), + default=False, + ) + parser.add_argument( + "--mir", + dest="mir", + action="store_true", + help="Run minimum image regression.", + ) + parser.add_argument( + "--no-reports", + dest="no_reports", + action="store_true", + help=( + "Creates a figures folder with static component " + "maps, timecourse plots and other diagnostic " + "images and displays these in an interactive " + "reporting framework" + ), + default=False, + ) + parser.add_argument( + "--png-cmap", dest="png_cmap", type=str, help="Colormap for figures", default="coolwarm" + ) + parser.add_argument( + "--debug", + dest="debug", + action="store_true", + help=( + "Logs in the terminal will have increased " + "verbosity, and will also be written into " + "a .tsv file in the output directory." + ), + default=False, + ) + parser.add_argument( + "--force", + "-f", + dest="force", + action="store_true", + help="Force overwriting of files. Default False.", + ) + parser.add_argument( + "--quiet", dest="quiet", help=argparse.SUPPRESS, action="store_true", default=False + ) + parser.add_argument("-v", "--version", action="version", version=verstr) + + parser.parse_args() + + # Run post-tedana + + +def post_tedana( + registry, + accept=[], + reject=[], + out_dir=".", + config="auto", + convention="bids", + prefix="", + tedort=False, + mir=False, + no_reports=False, + png_cmap="coolwarm", + force=False, + debug=False, + quiet=False, +): + """ + Run the post-tedana manual classification workflow. + + Please remember to cite [1]_. + + Parameters + ---------- + registry: :obj:`str` + The previously run registry as a JSON file. + accept: :obj: `list` + A list of integer values of components to accept in this workflow. + reject: :obj: `list` + A list of integer values of components to reject in this workflow. + out_dir : :obj:`str`, optional + Output directory. + tedort : :obj:`bool`, optional + Orthogonalize rejected components w.r.t. accepted ones prior to + denoising. Default is False. + mir : :obj:`bool`, optional + Run minimum image regression after denoising. Default is False. + no_reports : obj:'bool', optional + Do not generate .html reports and .png plots. Default is false such + that reports are generated. + png_cmap : obj:'str', optional + Name of a matplotlib colormap to be used when generating figures. + Cannot be used with --no-png. Default is 'coolwarm'. + debug : :obj:`bool`, optional + Whether to run in debugging mode or not. Default is False. + force : :obj:`bool`, optional + Whether to force file overwrites. Default is False. + quiet : :obj:`bool`, optional + If True, suppresses logging/printing of messages. Default is False. + + Notes + ----- + This workflow writes out several files. For a complete list of the files + generated by this workflow, please visit + https://tedana.readthedocs.io/en/latest/outputs.html + + References + ---------- + .. [1] DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L., + Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S., + Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S., + Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M., + Whitaker, K., & Handwerker, D. A. (2021). + TE-dependent analysis of multi-echo fMRI with tedana. + Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669. + """ + out_dir = op.abspath(out_dir) + if not op.isdir(out_dir): + os.mkdir(out_dir) + + # Check that there is no overlap in accepted/rejected components + acc = set(accept) + rej = set(reject) + in_both = [] + for a in acc: + if a in rej: + in_both.append(a) + for r in rej: + if r in acc and not r in rej: + in_both.append(r) + if len(in_both) != 0: + raise ValueError("The following components were both accepted and rejected: " f"{in_both}") + + # boilerplate + basename = "report" + extension = "txt" + repname = op.join(out_dir, (basename + "." + extension)) + repex = op.join(out_dir, (basename + "*")) + previousreps = glob(repex) + previousreps.sort(reverse=True) + for f in previousreps: + previousparts = op.splitext(f) + newname = previousparts[0] + "_old" + previousparts[1] + os.rename(f, newname) + refname = op.join(out_dir, "_references.txt") + + # create logfile name + basename = "tedana_" + extension = "tsv" + start_time = datetime.datetime.now().strftime("%Y-%m-%dT%H%M%S") + logname = op.join(out_dir, (basename + start_time + "." + extension)) + utils.setup_loggers(logname, repname, refname, quiet=quiet, debug=debug) + + LGR.info("Using output directory: {}".format(out_dir)) + + ioh = io.InputHarvester(registry) + comptable = ioh.get_file_contents("ICA metrics tsv") + xcomp = ioh.get_file_contents("ICA cross component metrics json") + status_table = ioh.get_file_contents("ICA status table tsv") + previous_tree_fname = ioh.get_file_path("ICA decision tree json") + mmix = np.asarray(ioh.get_file_contents("ICA mixing tsv")) + mask_denoise = ioh.get_file_contents("adaptive mask img") + # If global signal was removed in the previous run, we can assume that + # the user wants to use that file again. If not, use the default of + # optimally combined data. + gskey = "removed gs combined img" + if ioh.get_file_path(gskey): + data_oc = ioh.get_file_contents(gskey) + used_gs = True + else: + data_oc = ioh.get_file_contents("combined img") + used_gs = False + io_generator = io.OutputGenerator( + data_oc, + convention=convention, + prefix=prefix, + config=config, + force=force, + verbose=False, + out_dir=out_dir, + ) + + # Make a new selector with the added files + selector = selection.ComponentSelector.ComponentSelector( + previous_tree_fname, comptable, cross_component_metrics=xcomp, status_table=status_table + ) + + selector.add_manual(accept, "accepted") + selector.add_manual(reject, "rejected") + selector.select() + comptable = selector.component_table + + # NOTE: most of these will be identical to previous, but this makes + # things easier for programs which will view the data after running. + # First, make the output generator + comp_names = comptable["Component"].values + mixing_df = pd.DataFrame(data=mmix, columns=comp_names) + to_copy = [ + "z-scored ICA components img", + "ICA mixing tsv", + "ICA decomposition json", + "ICA metrics json", + ] + if used_gs: + to_copy.append(gskey) + to_copy.append("has gs combined img") + + for tc in to_copy: + print(tc) + io_generator.save_file(ioh.get_file_contents(tc), tc) + + # Save component selector and tree + selector.to_files(io_generator) + + if selector.n_bold_comps == 0: + LGR.warning("No BOLD components detected! Please check data and results!") + + mmix_orig = mmix.copy() + # TODO: make this a function + if tedort: + comps_accepted = selector.accepted_comps + comps_rejected = selector.rejected_comps + acc_ts = mmix[:, comps_accepted] + rej_ts = mmix[:, comps_rejected] + betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0] + pred_rej_ts = np.dot(acc_ts, betas) + resid = rej_ts - pred_rej_ts + # TODO rej_idx not here right now. Need to fix bug + mmix[:, rej_idx] = resid + comp_names = [ + io.add_decomp_prefix(comp, prefix="ica", max_value=comptable.index.max()) + for comp in range(selector.n_comps) + ] + mixing_df = pd.DataFrame(data=mmix, columns=comp_names) + io_generator.save_file(mixing_df, "ICA orthogonalized mixing tsv") + RepLGR.info( + "Rejected components' time series were then " + "orthogonalized with respect to accepted components' time " + "series." + ) + + n_vols = data_oc.shape[3] + img_t_r = io_generator.reference_img.header.get_zooms()[-1] + mask_denoise = utils.reshape_niimg(mask_denoise).astype(bool) + data_oc = utils.reshape_niimg(data_oc) + + # TODO: make a better result-writing function + # #############################################!!!! + # TODO: make a better time series creation function + # - get_ts_fit_tag(include=[], exclude=[]) + # - get_ts_regress/residual_tag(include=[], exclude=[]) + # How to handle [acc/rej] + tag ? + io.writeresults( + data_oc, + mask=mask_denoise, + comptable=comptable, + mmix=mmix, + n_vols=n_vols, + io_generator=io_generator, + ) + + if mir: + io_generator.force = True + gsc.minimum_image_regression(data_oc, mmix, mask_denoise, comptable, io_generator) + io_generator.force = False + + # Write out BIDS-compatible description file + derivative_metadata = { + "Name": "tedana Outputs", + "BIDSVersion": "1.5.0", + "DatasetType": "derivative", + "GeneratedBy": [ + { + "Name": "tedana_reclassify", + "Version": __version__, + "Description": ( + "A denoising pipeline for the identification and removal " + "of non-BOLD noise from multi-echo fMRI data." + ), + "CodeURL": "https://github.com/ME-ICA/tedana", + } + ], + } + io_generator.save_file(derivative_metadata, "data description json") + + with open(repname, "r") as fo: + report = [line.rstrip() for line in fo.readlines()] + report = " ".join(report) + with open(repname, "w") as fo: + fo.write(report) + + if not no_reports: + LGR.info("Making figures folder with static component maps and timecourse plots.") + + dn_ts, hikts, lowkts = io.denoise_ts(data_oc, mmix, mask_denoise, comptable) + + # Figure out which control methods were used + gscontrol = [] + if used_gs: + gscontrol.append("gsr") + if mir: + gscontrol.append("mir") + gscontrol = None if gscontrol is [] else gscontrol + + reporting.static_figures.carpet_plot( + optcom_ts=data_oc, + denoised_ts=dn_ts, + hikts=hikts, + lowkts=lowkts, + mask=mask_denoise, + io_generator=io_generator, + gscontrol=gscontrol, + ) + reporting.static_figures.comp_figures( + data_oc, + mask=mask_denoise, + comptable=comptable, + mmix=mmix_orig, + io_generator=io_generator, + png_cmap=png_cmap, + ) + + if sys.version_info.major == 3 and sys.version_info.minor < 6: + warn_msg = ( + "Reports requested but Python version is less than " + "3.6.0. Dynamic reports will not be generated." + ) + LGR.warn(warn_msg) + else: + LGR.info("Generating dynamic report") + reporting.generate_report(io_generator, tr=img_t_r) + + io_generator.save_self() + LGR.info("Workflow completed") + utils.teardown_loggers() + os.remove(refname) + + +def _main(argv=None): + """Tedana entry point""" + # TODO change this _main function to fix _get_parser and tedana_workflow + options = _get_parser().parse_args(argv) + kwargs = vars(options) + n_threads = kwargs.pop("n_threads") + n_threads = None if n_threads == -1 else n_threads + with threadpool_limits(limits=n_threads, user_api=None): + tedana_workflow(**kwargs) + + +if __name__ == "__main__": + _main() From 7663afb9606e4edf6b5a225d0131e2de02d0b816 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 8 Aug 2022 16:25:56 -0400 Subject: [PATCH 002/177] Fix commented-out tedana workflow --- tedana/tests/test_integration.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index e632011b7..5b16f075f 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -124,10 +124,8 @@ def test_integration_four_echo(skip_integration): out_dir = "/tmp/data/four-echo/TED.four-echo" out_dir_manual = "/tmp/data/four-echo/TED.four-echo-manual" - """ if os.path.exists(out_dir): shutil.rmtree(out_dir) - """ if os.path.exists(out_dir_manual): shutil.rmtree(out_dir_manual) @@ -138,7 +136,6 @@ def test_integration_four_echo(skip_integration): prepend += "sub-PILOT_ses-01_task-localizerDetection_run-01_echo-" suffix = "_space-sbref_desc-preproc_bold+orig.HEAD" datalist = [prepend + str(i + 1) + suffix for i in range(4)] - """ tedana_cli.tedana_workflow( data=datalist, tes=[11.8, 28.04, 44.28, 60.52], @@ -149,7 +146,6 @@ def test_integration_four_echo(skip_integration): debug=True, verbose=True, ) - """ post_tedana( op.join(out_dir, "desc-tedana_registry.json"), From 1284ff5446fa59d336df2a02e8a4bb084755ef76 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 8 Aug 2022 16:39:14 -0400 Subject: [PATCH 003/177] Appease the style checker --- tedana/selection/ComponentSelector.py | 11 ++++++----- tedana/selection/selection_nodes.py | 23 ++++++++++++----------- tedana/selection/selection_utils.py | 4 +++- tedana/selection/tedica.py | 3 +-- tedana/tests/test_ComponentSelector.py | 8 +++++--- tedana/tests/test_selection_nodes.py | 8 ++++---- tedana/tests/test_selection_utils.py | 8 ++++---- tedana/workflows/tedana_reclassify.py | 9 +-------- 8 files changed, 36 insertions(+), 38 deletions(-) diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 9e31a591e..d40c760ab 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -2,20 +2,21 @@ Functions that include workflows to identify and label TE-dependent and TE-independent components. """ -import os.path as op import inspect import logging -from pkg_resources import resource_filename -from numpy import asarray +import os.path as op + import pandas as pd +from numpy import asarray +from pkg_resources import resource_filename +from tedana.io import load_json +from tedana.selection import selection_nodes from tedana.selection.selection_utils import ( clean_dataframe, confirm_metrics_exist, log_classification_counts, ) -from tedana.selection import selection_nodes -from tedana.io import load_json from tedana.utils import get_resource_path LGR = logging.getLogger("GENERAL") diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 932210215..8ffbd5fc2 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -2,24 +2,25 @@ Functions that will be used as steps in a decision tree """ import logging + import numpy as np import pandas as pd - -# from scipy import stats - from scipy.stats import scoreatpercentile -from tedana.stats import getfbounds -from tedana.selection.selection_utils import ( - confirm_metrics_exist, - selectcomps2use, - log_decision_tree_step, + +from tedana.metrics.dependence import generate_decision_table_score +from tedana.selection.selection_utils import ( # get_new_meanmetricrank, change_comptable_classifications, - getelbow, + confirm_metrics_exist, get_extend_factor, + getelbow, kappa_elbow_kundu, - # get_new_meanmetricrank, + log_decision_tree_step, + selectcomps2use, ) -from tedana.metrics.dependence import generate_decision_table_score +from tedana.stats import getfbounds + +# from scipy import stats + LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 1fd0850fe..6e13b00e9 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -4,9 +4,11 @@ import logging import re + import numpy as np -from tedana.stats import getfbounds + from tedana.metrics.dependence import generate_decision_table_score +from tedana.stats import getfbounds LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index d2f4a6d92..20bd8735d 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -6,9 +6,8 @@ import numpy as np from scipy import stats -from tedana.stats import getfbounds -from tedana.selection.ComponentSelector import ComponentSelector from tedana.metrics import collect +from tedana.selection.ComponentSelector import ComponentSelector from tedana.selection.selection_utils import clean_dataframe, getelbow from tedana.stats import getfbounds diff --git a/tedana/tests/test_ComponentSelector.py b/tedana/tests/test_ComponentSelector.py index 4607bcb04..774f4f950 100644 --- a/tedana/tests/test_ComponentSelector.py +++ b/tedana/tests/test_ComponentSelector.py @@ -1,13 +1,15 @@ """Tests for the decision tree modularization""" -import pytest -import json, os, glob +import glob +import json +import os import os.path as op import numpy as np import pandas as pd +import pytest -from tedana.selection import ComponentSelector from tedana import io +from tedana.selection import ComponentSelector THIS_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index ef459c528..ce409670d 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -1,13 +1,13 @@ """Tests for the tedana.selection.selection_nodes module.""" +import os from re import S + import numpy as np -import pytest -import os import pandas as pd +import pytest +from tedana.selection import selection_nodes, selection_utils from tedana.selection.ComponentSelector import ComponentSelector -from tedana.selection import selection_utils -from tedana.selection import selection_nodes from tedana.tests.test_selection_utils import sample_component_table, sample_selector THIS_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index e4a4aafe3..472fa054e 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -1,12 +1,12 @@ """Tests for the tedana.selection.selection_utils module.""" -import numpy as np -import pytest import os + +import numpy as np import pandas as pd +import pytest -from tedana.selection.ComponentSelector import ComponentSelector from tedana.selection import selection_utils - +from tedana.selection.ComponentSelector import ComponentSelector THIS_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index f4e916594..dadbb7865 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -18,14 +18,7 @@ from threadpoolctl import threadpool_limits import tedana.gscontrol as gsc -from tedana import ( - __version__, - io, - reporting, - selection, - utils, - stats, -) +from tedana import __version__, io, reporting, selection, stats, utils from tedana.workflows.parser_utils import is_valid_file LGR = logging.getLogger("GENERAL") From bfbc509a5b32fb67ee2cdf8ba0d75e74515dc137 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 8 Aug 2022 17:37:46 -0400 Subject: [PATCH 004/177] All tremble before the mighty linter --- tedana/io.py | 5 +- tedana/metrics/collect.py | 3 +- tedana/reporting/static_figures.py | 2 +- tedana/selection/ComponentSelector.py | 55 ++++++++++----------- tedana/selection/selection_nodes.py | 68 +++++++++++++++++--------- tedana/selection/selection_utils.py | 19 ++++--- tedana/selection/tedica.py | 4 +- tedana/tests/test_ComponentSelector.py | 5 +- tedana/tests/test_selection_nodes.py | 54 +++++++++++--------- tedana/tests/test_selection_utils.py | 15 ++++-- tedana/workflows/tedana_reclassify.py | 25 ++-------- 11 files changed, 135 insertions(+), 120 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index 7c9b10919..6e489e123 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -29,17 +29,18 @@ class CustomEncoder(json.JSONEncoder): """Convert some types because of JSON serialization and numpy incompatibilities + # noqa: E501 See here: https://stackoverflow.com/questions/50916422/python-typeerror-object-of-type-int64-is-not-json-serializable/50916741 """ def default(self, obj): # int64 non-serializable but is a numpy output if isinstance(obj, np.integer): return int(obj) - + # containers that are not serializable if isinstance(obj, np.ndarray): return obj.tolist() - if isinstance (obj, set): + if isinstance(obj, set): return list(obj) return super(CustomEncoder, self).default(obj) diff --git a/tedana/metrics/collect.py b/tedana/metrics/collect.py index bd2dc6b49..ea6d511d3 100644 --- a/tedana/metrics/collect.py +++ b/tedana/metrics/collect.py @@ -541,7 +541,8 @@ def get_metadata(comptable): metric_metadata["classification_tags"] = { "LongName": "Component classification tags", "Description": ( - "A single tag or a comma separated list of tags to describe why a component received its classification" + "A single tag or a comma separated list of tags to describe why a component" + " received its classification" ), } if "rationale" in comptable: diff --git a/tedana/reporting/static_figures.py b/tedana/reporting/static_figures.py index 185f8cf6f..1afce5446 100644 --- a/tedana/reporting/static_figures.py +++ b/tedana/reporting/static_figures.py @@ -201,7 +201,7 @@ def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): expl_text = "" # Remove trailing ';' from rationale column - #comptable["rationale"] = comptable["rationale"].str.rstrip(";") + # comptable["rationale"] = comptable["rationale"].str.rstrip(";") for compnum in comptable.index.values: if comptable.loc[compnum, "classification"] == "accepted": line_color = "g" diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index d40c760ab..099014c93 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -6,9 +6,7 @@ import logging import os.path as op -import pandas as pd from numpy import asarray -from pkg_resources import resource_filename from tedana.io import load_json from tedana.selection import selection_nodes @@ -52,11 +50,17 @@ def load_config(tree): The `dict` has several required fields to describe the entire tree `tree_id`: :obj:`str` The name of the tree `info`: :obj:`str` A brief description of the tree for info logging - `report`: :obj:`str` A narrative description of the tree that could be used in report logging + `report`: :obj:`str` + A narrative description of the tree that could be used in report logging `refs`: :obj:`str` Publications that should be referenced, when this tree is used - `necessary_metrics`: :obj:`list[str]` The metrics in `component_table` that will be used by this tree - `intermediate_classifications`: :obj:`list[str]` User specified component classification labels. 'accepted', 'rejected', and 'unclassified' are defaults that don't need to be included here - `classification_tags`: :obj:`list[str]` Descriptive labels that can be used to explain why a component was accepted or rejected. For example, ["Likely BOLD","Low variance"] + `necessary_metrics`: :obj:`list[str]` + The metrics in `component_table` that will be used by this tree + `intermediate_classifications`: :obj:`list[str]` + User specified component classification labels. 'accepted', 'rejected', and + 'unclassified' are defaults that don't need to be included here + `classification_tags`: :obj:`list[str]` + Descriptive labels that can be used to explain why a component was accepted or rejected. + For example, ["Likely BOLD","Low variance"] `nodes`: :obj:`list[dict]` Each dictionary includes the information to run one node in the decision tree. Each node should either be able to change component classifications (function names starting with dec_) @@ -208,9 +212,7 @@ def validate_tree(tree): nonstandard_labels = compclass.difference(all_classifications) if nonstandard_labels: LGR.warning( - "{} in node {} of the decision tree includes a classification label that was not predefined".format( - compclass, i - ) + f"{compclass} in node {i} of the decision tree includes a classification " ) if "decide_comps" in node.get("parameters").keys(): tmp_comp = node["parameters"]["decide_comps"] @@ -220,7 +222,8 @@ def validate_tree(tree): nonstandard_labels = compclass.difference(all_decide_comps) if nonstandard_labels: LGR.warning( - f"{compclass} in node {i} of the decision tree includes a classification label that was not predefined" + f"{compclass} in node {i} of the decision tree includes a classification " + "label that was not predefined" ) tagset = set() @@ -233,7 +236,8 @@ def validate_tree(tree): undefined_classification_tags = tagset.difference(set(tree.get("classification_tags"))) if undefined_classification_tags: LGR.warning( - f"{tagset} in node {i} of the decision tree includes a classification tag that was not predefined" + f"{tagset} in node {i} of the decision tree includes a classification " + "tag that was not predefined" ) if err_msg: @@ -414,7 +418,7 @@ def select(self): current_node_idx """ # TODO: force-add classification tags - if not "classification_tags" in self.component_table.columns: + if "classification_tags" not in self.component_table.columns: self.component_table["classification_tags"] = "" # this will crash the program with an error message if not all # necessary_metrics are in the comptable @@ -513,11 +517,13 @@ def are_only_necessary_metrics_used(self): not_used = self.necessary_metrics - self.tree["used_metrics"] if len(not_declared) > 0: LGR.warning( - f"Decision tree {self.tree_name} used the following metrics that were not declared as necessary: {not_declared}" + f"Decision tree {self.tree_name} used the following metrics that were " + "not declared as necessary: {not_declared}" ) if len(not_used) > 0: LGR.warning( - f"Decision tree {self.tree_name} did not use the following metrics that were declared as necessary: {not_used}" + f"Decision tree {self.tree_name} did not use the following metrics " + "that were declared as necessary: {not_used}" ) def are_all_components_accepted_or_rejected(self): @@ -532,7 +538,9 @@ def are_all_components_accepted_or_rejected(self): for nonfinal_class in nonfinal_classifications: numcomp = asarray(self.component_table["classification"] == nonfinal_class).sum() LGR.warning( - f"{numcomp} components have a final classification of {nonfinal_class}. At the end of the selection process, all components are expected to be 'accepted' or 'rejected'" + f"{numcomp} components have a final classification of {nonfinal_class}. " + "At the end of the selection process, all components are expected " + "to be 'accepted' or 'rejected'" ) @property @@ -573,19 +581,10 @@ def to_files(self, io_generator): io_generator: tedana.io.OutputGenerator The output generator to use for filename generation and saving. """ - comptable_fname = io_generator.save_file( - self.component_table, - "ICA metrics tsv", - ) - xcomp_fname = io_generator.save_file( + io_generator.save_file(self.component_table, "ICA metrics tsv") + io_generator.save_file( self.cross_component_metrics, "ICA cross component metrics json", ) - status_fname = io_generator.save_file( - self.component_status_table, - "ICA status table tsv", - ) - tree_fname = io_generator.save_file( - self.tree, - "ICA decision tree json", - ) + io_generator.save_file(self.component_status_table, "ICA status table tsv") + io_generator.save_file(self.tree, "ICA decision tree json") diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 8ffbd5fc2..939df7751 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -373,7 +373,8 @@ def identify_used_metric(val, isnum=False): ) else: raise ValueError( - f"{val} is neither a metric in selector.component_table nor selector.cross_component_metrics" + f"{val} is neither a metric in selector.component_table " + "nor selector.cross_component_metrics" ) if isnum: if not isinstance(val, (int, float)): @@ -493,7 +494,7 @@ def operator_scale_descript(val_scale, val): ) def parse_vals(val): - """Get the actual metric values for the selected components or return the constant int or float""" + """Get the metric values for the selected components or relevant constant""" if isinstance(val, str): return selector.component_table.loc[comps2use, val].copy() else: @@ -511,19 +512,19 @@ def parse_vals(val): ) else: - left1_val = parse_vals(left) - right1_val = parse_vals(right) + _ = parse_vals(left) + _ = parse_vals(right) decision_boolean = eval(f"(left_scale*left1_val) {op} (right_scale * right1_val)") if is_compound >= 2: - left2_val = parse_vals(left2) - right2_val = parse_vals(right2) + _ = parse_vals(left2) + _ = parse_vals(right2) statement1 = decision_boolean.copy() statement2 = eval(f"(left2_scale*left2_val) {op2} (right2_scale * right2_val)") # logical dot product for compound statement decision_boolean = statement1 * statement2 if is_compound == 3: - left3_val = parse_vals(left3) - right3_val = parse_vals(right3) + _ = parse_vals(left3) + _ = parse_vals(right3) # statement 1 is now the combination of the first two conditional statements statement1 = decision_boolean.copy() # statement 2 is now the third conditional statement @@ -775,13 +776,15 @@ def calc_kappa_rho_elbows_kundu( "kappa_elbow_kundu" in outputs["calc_cross_comp_metrics"] ): LGR.warning( - f"kappa_elbow_kundu already calculated. Overwriting previous value in {function_name_idx}" + "kappa_elbow_kundu already calculated." + f"Overwriting previous value in {function_name_idx}" ) if ("rho_elbow_kundu" in selector.cross_component_metrics) and ( "rho_elbow_kundu" in outputs["calc_cross_comp_metrics"] ): LGR.warning( - f"rho_elbow_kundu already calculated. Overwriting previous value in {function_name_idx}" + "rho_elbow_kundu already calculated." + f"Overwriting previous value in {function_name_idx}" ) if "varex_upper_p" in selector.cross_component_metrics: LGR.warning( @@ -992,7 +995,11 @@ def dec_classification_doesnt_exist( ) else: # do_comps_exist is None: # should be True for all components in comps2use - # decision_boolean = pd.Series(data=False, index=np.arange(len(selector.component_table)), dtype=bool) + # decision_boolean = pd.Series( + # data=False, + # index=np.arange(len(selector.component_table)), + # dtype=bool + # ) # decision_boolean.iloc[comps2use] = True decision_boolean = pd.Series(True, index=comps2use) @@ -1061,7 +1068,7 @@ def calc_varex_thresh( function_name_idx = f"Step {selector.current_node_idx}: calc_varex_thresh" thresh_label = thresh_label.lower() - if thresh_label is None or thresh_label is "": + if thresh_label is None or thresh_label == "": varex_name = "varex_thresh" perc_name = "perc" else: @@ -1157,7 +1164,8 @@ def calc_extend_factor( {custom_node_label} {only_used_metrics} extend_factor: :obj:`float` - If a number, then use rather than calculating anything. If None than calculate. default=None + If a number, then use rather than calculating anything. + If None than calculate. default=None Returns ------- @@ -1229,7 +1237,8 @@ def calc_max_good_meanmetricrank( {decide_comps} metric_suffix: :obj:`str` By default, this will output a value called "max_good_meanmetricrank" - If this variable is not None or "" then it will output: "max_good_meanmetricrank_[metric_suffix] + If this variable is not None or "" then it will output: + "max_good_meanmetricrank_[metric_suffix]" {log_extra} {custom_node_label} {only_used_metrics} @@ -1253,7 +1262,7 @@ def calc_max_good_meanmetricrank( if ( (metric_suffix is not None) - and (metric_suffix is not "") + and (metric_suffix != "") and isinstance(metric_suffix, str) ): metric_name = f"max_good_meanmetricrank_{metric_suffix}" @@ -1273,7 +1282,8 @@ def calc_max_good_meanmetricrank( if metric_name in selector.cross_component_metrics: LGR.warning( - f"max_good_meanmetricrank already calculated. Overwriting previous value in {function_name_idx}" + "max_good_meanmetricrank already calculated." + f"Overwriting previous value in {function_name_idx}" ) if custom_node_label: @@ -1377,7 +1387,9 @@ def calc_varex_kappa_ratio( if "varex kappa ratio" in selector.component_table: raise ValueError( - f"'varex kappa ratio' is already a column in the component_table. Recalculating in {function_name_idx} can cause problems since these are only calculated on a subset of components" + "'varex kappa ratio' is already a column in the component_table." + f"Recalculating in {function_name_idx} can cause problems since these " + "are only calculated on a subset of components" ) if custom_node_label: @@ -1416,7 +1428,8 @@ def calc_varex_kappa_ratio( * selector.component_table.loc[comps2use, "variance explained"] / selector.component_table.loc[comps2use, "kappa"] ) - # Unclear if necessary, but this may clean up a weird issue on passing references in a data frame + # Unclear if necessary, but this may clean up a weird issue on passing + # references in a data frame. # See longer comment in selection_utils.comptable_classification_changer selector.component_table = selector.component_table.copy() @@ -1442,7 +1455,8 @@ def calc_revised_meanmetricrank_guesses( only_used_metrics=False, ): """ - Calculates a new d_table_score (meanmetricrank) on a subset of components defiend in decide_comps + Calculates a new d_table_score (meanmetricrank) on a subset of + components defined in decide_comps. Also saves a bunch of cross_component_metrics that are used for various thresholds. These are: num_acc_guess: A guess of the final number of accepted components @@ -1505,25 +1519,30 @@ def calc_revised_meanmetricrank_guesses( if "conservative_guess" in selector.cross_component_metrics: LGR.warning( - f"conservative_guess already calculated. Overwriting previous value in {function_name_idx}" + "conservative_guess already calculated. " + f"Overwriting previous value in {function_name_idx}" ) if "restrict_factor" in selector.cross_component_metrics: LGR.warning( - f"restrict_factor already calculated. Overwriting previous value in {function_name_idx}" + "restrict_factor already calculated. " + f"Overwriting previous value in {function_name_idx}" ) if not isinstance(restrict_factor, (int, float)): raise ValueError(f"restrict_factor needs to be a number. It is: {restrict_factor}") if f"d_table_score_node{selector.current_node_idx}" in selector.component_table: raise ValueError( - f"d_table_score_node{selector.current_node_idx} is already a column in the component_table. Recalculating in {function_name_idx} can cause problems since these are only calculated on a subset of components" + f"d_table_score_node{selector.current_node_idx} is already a column" + f"in the component_table. Recalculating in {function_name_idx} can " + "cause problems since these are only calculated on a subset of components" ) for xcompmetric in outputs["used_cross_component_metrics"]: if xcompmetric not in selector.cross_component_metrics: raise ValueError( - f"{xcompmetric} not in cross_component_metrics. It needs to be calculated before {function_name_idx}" + f"{xcompmetric} not in cross_component_metrics. " + f"It needs to be calculated before {function_name_idx}" ) if custom_node_label: @@ -1583,7 +1602,8 @@ def calc_revised_meanmetricrank_guesses( selector.component_table.loc[ comps2use, f"d_table_score_node{selector.current_node_idx}" ] = tmp_d_table_score - # Unclear if necessary, but this may clean up a weird issue on passing references in a data frame + # Unclear if necessary, but this may clean up a weird issue on passing + # references in a data frame. # See longer comment in selection_utils.comptable_classification_changer selector.component_table = selector.component_table.copy() diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 6e13b00e9..459e89795 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -3,7 +3,6 @@ """ import logging -import re import numpy as np @@ -65,17 +64,21 @@ def selectcomps2use(selector, decide_comps): # decide_comps is already a string of indices if len(selector.component_table) <= max(decide_comps): raise ValueError( - f"decide_comps for selectcomps2use is selecting for a component with index {max(decide_comps)} (0 indexing) which is greater than the number of components: {len(selector.component_table)}" + "decide_comps for selectcomps2use is selecting for a component with index" + f"{max(decide_comps)} (0 indexing) which is greater than the number " + f"of components: {len(selector.component_table)}" ) elif min(decide_comps) < 0: raise ValueError( - f"decide_comps for selectcomps2use is selecting for a component with index {min(decide_comps)}, which is less than 0" + "decide_comps for selectcomps2use is selecting for a component " + f"with index {min(decide_comps)}, which is less than 0" ) else: comps2use = decide_comps else: raise ValueError( - f"decide_comps in selectcomps2use needs to be a list or a single element of strings or integers. It is {decide_comps}" + "decide_comps in selectcomps2use needs to be a list or a single element " + f"of strings or integers. It is {decide_comps}" ) # If no components are selected, then return None. @@ -268,7 +271,8 @@ def comptable_classification_changer( ) else: LGR.info( - f"Step {selector.current_node_idx}: No components fit criterion {boolstate} to change classification" + f"Step {selector.current_node_idx}: No components fit criterion " + f"{boolstate} to change classification" ) return selector @@ -424,7 +428,8 @@ def log_decision_tree_step( LGR.info(f"{function_name_idx} calculated: {', '.join(calc_summaries)}") else: LGR.warning( - f"{function_name_idx} logged to write out cross_component_metrics, but none were calculated" + f"{function_name_idx} logged to write out cross_component_metrics, " + "but none were calculated" ) @@ -650,8 +655,6 @@ def get_extend_factor(n_vols=None, extend_factor=None): return extend_factor -# This will likely need to be revived to run the kundu decision tree, but it will be slightly differe -# So commenting out for now. def get_new_meanmetricrank(component_table, comps2use, decision_node_idx, calc_new_rank=False): """ If a revised d_table_score was already calculated, use that. diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 20bd8735d..4367c31a3 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -4,12 +4,10 @@ import logging import numpy as np -from scipy import stats from tedana.metrics import collect from tedana.selection.ComponentSelector import ComponentSelector -from tedana.selection.selection_utils import clean_dataframe, getelbow -from tedana.stats import getfbounds +from tedana.selection.selection_utils import clean_dataframe LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") diff --git a/tedana/tests/test_ComponentSelector.py b/tedana/tests/test_ComponentSelector.py index 774f4f950..957498295 100644 --- a/tedana/tests/test_ComponentSelector.py +++ b/tedana/tests/test_ComponentSelector.py @@ -4,11 +4,9 @@ import os import os.path as op -import numpy as np import pandas as pd import pytest -from tedana import io from tedana.selection import ComponentSelector THIS_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -94,7 +92,8 @@ def dicts_to_test(treechoice): "functionname": "manual_classify", "parameters": { "new_classification": "accepted", - # Warning for an non-predefined classification used to select components to operate on + # Warning for an non-predefined classification used to select + # components to operate on "decide_comps": "random2notpredefined", }, "kwargs": { diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index ce409670d..89a90ddda 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -1,14 +1,10 @@ """Tests for the tedana.selection.selection_nodes module.""" import os -from re import S -import numpy as np -import pandas as pd import pytest -from tedana.selection import selection_nodes, selection_utils -from tedana.selection.ComponentSelector import ComponentSelector -from tedana.tests.test_selection_utils import sample_component_table, sample_selector +from tedana.selection import selection_nodes +from tedana.tests.test_selection_utils import sample_selector THIS_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -38,7 +34,8 @@ def test_manual_classify_smoke(): custom_node_label="custom label", tag="test tag", ) - # There should be 4 selected components and component_status_table should have a new column "Node 0" + # There should be 4 selected components and component_status_table should + # have a new column "Node 0" assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 4 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 assert f"Node {selector.current_node_idx}" in selector.component_status_table @@ -79,7 +76,8 @@ def test_dec_left_op_right_succeeds(): ) assert len(used_metrics - {"kappa", "rho"}) == 0 - # Standard execution where components with kappa>rho are changed from "provisional accept" to "accepted" + # Standard execution where components with kappa>rho are changed from + # "provisional accept" to "accepted" # And all extra logging code and options are run # left and right are both component_table_metrics selector = selection_nodes.dec_left_op_right( @@ -98,7 +96,8 @@ def test_dec_left_op_right_succeeds(): tag_ifTrue="test true tag", tag_ifFalse="test false tag", ) - # scales are set to make sure 3 components are true and 1 is false using the sample component table + # scales are set to make sure 3 components are true and 1 is false using + # the sample component table assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 3 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 1 assert f"Node {selector.current_node_idx}" in selector.component_status_table @@ -118,7 +117,8 @@ def test_dec_left_op_right_succeeds(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 assert f"Node {selector.current_node_idx}" not in selector.component_status_table - # Re-initializing selector so that it has components classificated as "provisional accept" again + # Re-initializing selector so that it has components classificated as + # "provisional accept" again selector = sample_selector(options="provclass") # Test when left is a component_table_metric, & right is a cross_component_metric selector = selection_nodes.dec_left_op_right( @@ -446,7 +446,8 @@ def test_calc_kappa_rho_elbows_kundu(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 - # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + # Run warning logging code for if any of the cross_component_metrics + # already existed and would be over-written selector = sample_selector(options="unclass") selector.cross_component_metrics["kappa_elbow_kundu"] = 1 selector.cross_component_metrics["rho_elbow_kundu"] = 1 @@ -511,10 +512,10 @@ def test_calc_kappa_rho_elbows_kundu(): selector = selection_nodes.calc_kappa_rho_elbows_kundu(selector, "NotAClassification") calc_cross_comp_metrics = {"kappa_elbow_kundu", "rho_elbow_kundu", "varex_upper_p"} assert ( - selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] == None + selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] is None ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] == None - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] == None + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] is None + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] is None def test_dec_classification_doesnt_exist_smoke(): @@ -634,7 +635,8 @@ def test_calc_varex_thresh_smoke(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_thresh"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["perc"] == 90 - # Run warning logging code to see if any of the cross_component_metrics already existed and would be over-written + # Run warning logging code to see if any of the cross_component_metrics + # already existed and would be over-written selector = sample_selector(options="provclass") selector.cross_component_metrics["varex_upper_thresh"] = 1 selector.cross_component_metrics["upper_perc"] = 1 @@ -672,7 +674,7 @@ def test_calc_varex_thresh_smoke(): selector, decide_comps="NotAClassification", thresh_label="upper", percentile_thresh=90 ) assert ( - selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_thresh"] == None + selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_thresh"] is None ) # percentile_thresh doesn't depend on components and is assigned assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["upper_perc"] == 90 @@ -702,7 +704,8 @@ def test_calc_extend_factor_smoke(): assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["extend_factor"] > 0 - # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + # Run warning logging code for if any of the cross_component_metrics + # already existed and would be over-written selector = sample_selector() selector.cross_component_metrics["extend_factor"] = 1.0 selector = selection_nodes.calc_extend_factor(selector) @@ -750,7 +753,8 @@ def test_max_good_meanmetricrank_smoke(): selector.tree["nodes"][selector.current_node_idx]["outputs"]["max_good_meanmetricrank"] > 0 ) - # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + # Run warning logging code for if any of the cross_component_metrics + # already existed and would be over-written selector = sample_selector("provclass") selector.cross_component_metrics["max_good_meanmetricrank"] = 10 selector.cross_component_metrics["extend_factor"] = 2.0 @@ -774,7 +778,7 @@ def test_max_good_meanmetricrank_smoke(): selector = selection_nodes.calc_max_good_meanmetricrank(selector, "NotAClassification") assert ( selector.tree["nodes"][selector.current_node_idx]["outputs"]["max_good_meanmetricrank"] - == None + is None ) @@ -806,7 +810,8 @@ def test_calc_varex_kappa_ratio_smoke(): assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_rate"] > 0 - # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + # Run warning logging code for if any of the cross_component_metrics + # already existed and would be over-written selector = sample_selector("provclass") selector.cross_component_metrics["kappa_rate"] = 10 selector = selection_nodes.calc_varex_kappa_ratio(selector, "provisional accept") @@ -817,7 +822,7 @@ def test_calc_varex_kappa_ratio_smoke(): # Log without running if no components of decide_comps are in the component table selector = sample_selector() selector = selection_nodes.calc_varex_kappa_ratio(selector, "NotAClassification") - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_rate"] == None + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_rate"] is None # Raise error if "varex kappa ratio" is already in component_table selector = sample_selector("provclass") @@ -867,7 +872,8 @@ def test_calc_revised_meanmetricrank_guesses_smoke(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["conservative_guess"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["restrict_factor"] == 2 - # Run warning logging code for if any of the cross_component_metrics already existed and would be over-written + # Run warning logging code for if any of the cross_component_metrics + # already existed and would be over-written selector = sample_selector("provclass") selector.cross_component_metrics["kappa_elbow_kundu"] = 19.1 selector.cross_component_metrics["rho_elbow_kundu"] = 15.2 @@ -888,9 +894,9 @@ def test_calc_revised_meanmetricrank_guesses_smoke(): selector.cross_component_metrics["kappa_elbow_kundu"] = 19.1 selector.cross_component_metrics["rho_elbow_kundu"] = 15.2 selector = selection_nodes.calc_revised_meanmetricrank_guesses(selector, "NotAClassification") - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["num_acc_guess"] == None + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["num_acc_guess"] is None assert ( - selector.tree["nodes"][selector.current_node_idx]["outputs"]["conservative_guess"] == None + selector.tree["nodes"][selector.current_node_idx]["outputs"]["conservative_guess"] is None ) # Raise error if "d_table_score_node0" is already in component_table diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index 472fa054e..d8969b184 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -91,7 +91,10 @@ def test_selectcomps2use_succeeds(): comps2use = selection_utils.selectcomps2use(selector, decide_comps) assert ( len(comps2use) == decide_comps_lengths[idx] - ), f"selectcomps2use test should select {decide_comps_lengths[idx]} with decide_comps={decide_comps}, but it selected {len(comps2use)}" + ), ( + f"selectcomps2use test should select {decide_comps_lengths[idx]} with " + f"decide_comps={decide_comps}, but it selected {len(comps2use)}" + ) def test_selectcomps2use_fails(): @@ -104,7 +107,7 @@ def test_selectcomps2use_fails(): ["accepted", 4], # needs to be either int or string, not both [4, 3, -1, 9], # no index should be < 0 [2, 4, 6, 21], # no index should be > number of 0 indexed components - 22, ## no index should be > number of 0 indexed components + 22, # no index should be > number of 0 indexed components ] for decide_comps in decide_comps_options: with pytest.raises(ValueError): @@ -152,7 +155,8 @@ def validate_changes(expected_classification): ) validate_changes("rejected") - # Change from accepted to rejected, which should output a warning (test if the warning appears?) + # Change from accepted to rejected, which should output a warning + # (test if the warning appears?) selector = sample_selector(options="provclass") decision_boolean = selector.component_table["classification"] == "accepted" boolstate = True @@ -181,7 +185,8 @@ def test_change_comptable_classifications_succeeds(): selector = sample_selector(options="provclass") - # Given the rho values in the sample table, decision_boolean should have 2 True and 2 False values + # Given the rho values in the sample table, decision_boolean should have + # 2 True and 2 False values comps2use = selection_utils.selectcomps2use(selector, "provisional accept") rho = selector.component_table.loc[comps2use, "rho"] decision_boolean = rho < 13.5 @@ -198,7 +203,7 @@ def test_change_comptable_classifications_succeeds(): assert numTrue == 2 assert numFalse == 2 # check every element that was supposed to change, did change - changeidx = decision_boolean.index[np.asarray(decision_boolean) == True] + changeidx = decision_boolean.index[np.asarray(decision_boolean) is True] new_vals = selector.component_table.loc[changeidx, "classification"] for val in new_vals: assert val == "accepted" diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index dadbb7865..20eb11e53 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -3,23 +3,17 @@ """ import argparse import datetime -import json import logging import os import os.path as op -import shutil import sys from glob import glob import numpy as np import pandas as pd -from nilearn.masking import compute_epi_mask -from scipy import stats -from threadpoolctl import threadpool_limits import tedana.gscontrol as gsc -from tedana import __version__, io, reporting, selection, stats, utils -from tedana.workflows.parser_utils import is_valid_file +from tedana import __version__, io, reporting, selection, utils LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") @@ -207,7 +201,7 @@ def post_tedana( if a in rej: in_both.append(a) for r in rej: - if r in acc and not r in rej: + if r in acc and r not in rej: in_both.append(r) if len(in_both) != 0: raise ValueError("The following components were both accepted and rejected: " f"{in_both}") @@ -306,7 +300,7 @@ def post_tedana( betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0] pred_rej_ts = np.dot(acc_ts, betas) resid = rej_ts - pred_rej_ts - # TODO rej_idx not here right now. Need to fix bug + rej_idx = comps_accepted[comps_accepted].index mmix[:, rej_idx] = resid comp_names = [ io.add_decomp_prefix(comp, prefix="ica", max_value=comptable.index.max()) @@ -417,16 +411,5 @@ def post_tedana( os.remove(refname) -def _main(argv=None): - """Tedana entry point""" - # TODO change this _main function to fix _get_parser and tedana_workflow - options = _get_parser().parse_args(argv) - kwargs = vars(options) - n_threads = kwargs.pop("n_threads") - n_threads = None if n_threads == -1 else n_threads - with threadpool_limits(limits=n_threads, user_api=None): - tedana_workflow(**kwargs) - - if __name__ == "__main__": - _main() + main() From f01a9a9e0f421904eb0822b580239aaf0d45c1bb Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 8 Aug 2022 18:08:55 -0400 Subject: [PATCH 005/177] Actually fix incorrect style checker issue --- tedana/selection/selection_nodes.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 939df7751..eb9f6db8b 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -512,19 +512,19 @@ def parse_vals(val): ) else: - _ = parse_vals(left) - _ = parse_vals(right) + left1_val = parse_vals(left) # noqa: F841 + right1_val = parse_vals(right) # noqa: F841 decision_boolean = eval(f"(left_scale*left1_val) {op} (right_scale * right1_val)") if is_compound >= 2: - _ = parse_vals(left2) - _ = parse_vals(right2) + left2_val = parse_vals(left2) # noqa: F841 + right2_val = parse_vals(right2) # noqa: F841 statement1 = decision_boolean.copy() statement2 = eval(f"(left2_scale*left2_val) {op2} (right2_scale * right2_val)") # logical dot product for compound statement decision_boolean = statement1 * statement2 if is_compound == 3: - _ = parse_vals(left3) - _ = parse_vals(right3) + left3_val = parse_vals(left3) # noqa: F841 + right3_val = parse_vals(right3) # noqa: F841 # statement 1 is now the combination of the first two conditional statements statement1 = decision_boolean.copy() # statement 2 is now the third conditional statement From 29b6fbaf7d975899afb15d9947753ae148fcc5da Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 8 Aug 2022 18:13:34 -0400 Subject: [PATCH 006/177] Unfix another style checker error --- tedana/tests/test_selection_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index d8969b184..b1f387ac4 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -203,7 +203,7 @@ def test_change_comptable_classifications_succeeds(): assert numTrue == 2 assert numFalse == 2 # check every element that was supposed to change, did change - changeidx = decision_boolean.index[np.asarray(decision_boolean) is True] + changeidx = decision_boolean.index[np.asarray(decision_boolean) == True] # noqa: E712 new_vals = selector.component_table.loc[changeidx, "classification"] for val in new_vals: assert val == "accepted" From ac3488242dd8a83774c7b9e5a7ec3612bb4934a4 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 8 Aug 2022 18:22:09 -0400 Subject: [PATCH 007/177] Attempt to make Black happy, even though it does not actually say what's wrong --- tedana/io.py | 1 + tedana/reporting/static_figures.py | 8 +++++++- tedana/selection/ComponentSelector.py | 4 +--- tedana/selection/selection_nodes.py | 6 +----- tedana/tests/test_selection_utils.py | 4 +--- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index 6e489e123..fed2b08a2 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -32,6 +32,7 @@ class CustomEncoder(json.JSONEncoder): # noqa: E501 See here: https://stackoverflow.com/questions/50916422/python-typeerror-object-of-type-int64-is-not-json-serializable/50916741 """ + def default(self, obj): # int64 non-serializable but is a numpy output if isinstance(obj, np.integer): diff --git a/tedana/reporting/static_figures.py b/tedana/reporting/static_figures.py index 1afce5446..d90f914ba 100644 --- a/tedana/reporting/static_figures.py +++ b/tedana/reporting/static_figures.py @@ -43,7 +43,13 @@ def _trim_edge_zeros(arr): def carpet_plot( - optcom_ts, denoised_ts, hikts, lowkts, mask, io_generator, gscontrol=None + optcom_ts, + denoised_ts, + hikts, + lowkts, + mask, + io_generator, + gscontrol=None ): """Generate a set of carpet plots for the combined and denoised data. diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 099014c93..ca806a674 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -211,9 +211,7 @@ def validate_tree(tree): compclass = compclass | set(tmp_comp) nonstandard_labels = compclass.difference(all_classifications) if nonstandard_labels: - LGR.warning( - f"{compclass} in node {i} of the decision tree includes a classification " - ) + LGR.warning(f"{compclass} in node {i} of the decision tree includes a classification") if "decide_comps" in node.get("parameters").keys(): tmp_comp = node["parameters"]["decide_comps"] if isinstance(tmp_comp, str): diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index eb9f6db8b..276114816 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1260,11 +1260,7 @@ def calc_max_good_meanmetricrank( function_name_idx = f"Step {selector.current_node_idx}: calc_max_good_meanmetricrank" - if ( - (metric_suffix is not None) - and (metric_suffix != "") - and isinstance(metric_suffix, str) - ): + if ((metric_suffix is not None) and (metric_suffix != "") and isinstance(metric_suffix, str)): metric_name = f"max_good_meanmetricrank_{metric_suffix}" else: metric_name = "max_good_meanmetricrank" diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index b1f387ac4..e279fb3c6 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -89,9 +89,7 @@ def test_selectcomps2use_succeeds(): for idx, decide_comps in enumerate(decide_comps_options): comps2use = selection_utils.selectcomps2use(selector, decide_comps) - assert ( - len(comps2use) == decide_comps_lengths[idx] - ), ( + assert (len(comps2use) == decide_comps_lengths[idx]), ( f"selectcomps2use test should select {decide_comps_lengths[idx]} with " f"decide_comps={decide_comps}, but it selected {len(comps2use)}" ) From 9e8159a6cd9c73b3033843d3508bed77f9985d2f Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Tue, 9 Aug 2022 10:28:54 -0400 Subject: [PATCH 008/177] ran black --- tedana/io.py | 3 ++- tedana/reporting/static_figures.py | 34 ++++++---------------------- tedana/selection/selection_nodes.py | 2 +- tedana/tests/test_selection_utils.py | 2 +- 4 files changed, 11 insertions(+), 30 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index fed2b08a2..18dcad683 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -47,7 +47,7 @@ def default(self, obj): return super(CustomEncoder, self).default(obj) -class OutputGenerator(): +class OutputGenerator: """A class for managing tedana outputs. Parameters @@ -318,6 +318,7 @@ def save_self(self): class InputHarvester: """Turns a registry file into a lookup table to get previous data.""" + loaders = { "json": lambda f: load_json(f), "tsv": lambda f: pd.read_csv(f, delimiter="\t"), diff --git a/tedana/reporting/static_figures.py b/tedana/reporting/static_figures.py index d90f914ba..81c739983 100644 --- a/tedana/reporting/static_figures.py +++ b/tedana/reporting/static_figures.py @@ -42,15 +42,7 @@ def _trim_edge_zeros(arr): return arr[bounding_box] -def carpet_plot( - optcom_ts, - denoised_ts, - hikts, - lowkts, - mask, - io_generator, - gscontrol=None -): +def carpet_plot(optcom_ts, denoised_ts, hikts, lowkts, mask, io_generator, gscontrol=None): """Generate a set of carpet plots for the combined and denoised data. Parameters @@ -129,9 +121,7 @@ def carpet_plot( title="Optimally Combined Data (Pre-GSR)", ) fig.tight_layout() - fig.savefig( - os.path.join(io_generator.out_dir, "figures", "carpet_optcom_nogsr.svg") - ) + fig.savefig(os.path.join(io_generator.out_dir, "figures", "carpet_optcom_nogsr.svg")) if (gscontrol is not None) and ("mir" in gscontrol): mir_denoised_img = io_generator.get_name("mir denoised img") @@ -144,9 +134,7 @@ def carpet_plot( title="Denoised Data (Post-MIR)", ) fig.tight_layout() - fig.savefig( - os.path.join(io_generator.out_dir, "figures", "carpet_denoised_mir.svg") - ) + fig.savefig(os.path.join(io_generator.out_dir, "figures", "carpet_denoised_mir.svg")) mir_denoised_img = io_generator.get_name("ICA accepted mir denoised img") fig, ax = plt.subplots(figsize=(14, 7)) @@ -158,9 +146,7 @@ def carpet_plot( title="High-Kappa Data (Post-MIR)", ) fig.tight_layout() - fig.savefig( - os.path.join(io_generator.out_dir, "figures", "carpet_accepted_mir.svg") - ) + fig.savefig(os.path.join(io_generator.out_dir, "figures", "carpet_accepted_mir.svg")) def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): @@ -211,20 +197,14 @@ def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): for compnum in comptable.index.values: if comptable.loc[compnum, "classification"] == "accepted": line_color = "g" - expl_text = ( - "accepted reason(s): " + comptable.loc[compnum, "classification_tags"] - ) + expl_text = "accepted reason(s): " + comptable.loc[compnum, "classification_tags"] elif comptable.loc[compnum, "classification"] == "rejected": line_color = "r" - expl_text = ( - "rejected reason(s): " + comptable.loc[compnum, "classification_tags"] - ) + expl_text = "rejected reason(s): " + comptable.loc[compnum, "classification_tags"] elif comptable.loc[compnum, "classification"] == "ignored": line_color = "k" - expl_text = ( - "ignored reason(s): " + comptable.loc[compnum, "classification_tags"] - ) + expl_text = "ignored reason(s): " + comptable.loc[compnum, "classification_tags"] else: # Classification not added # If new, this will keep code running diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 276114816..3724d339e 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1260,7 +1260,7 @@ def calc_max_good_meanmetricrank( function_name_idx = f"Step {selector.current_node_idx}: calc_max_good_meanmetricrank" - if ((metric_suffix is not None) and (metric_suffix != "") and isinstance(metric_suffix, str)): + if (metric_suffix is not None) and (metric_suffix != "") and isinstance(metric_suffix, str): metric_name = f"max_good_meanmetricrank_{metric_suffix}" else: metric_name = "max_good_meanmetricrank" diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index e279fb3c6..a9eb1def6 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -89,7 +89,7 @@ def test_selectcomps2use_succeeds(): for idx, decide_comps in enumerate(decide_comps_options): comps2use = selection_utils.selectcomps2use(selector, decide_comps) - assert (len(comps2use) == decide_comps_lengths[idx]), ( + assert len(comps2use) == decide_comps_lengths[idx], ( f"selectcomps2use test should select {decide_comps_lengths[idx]} with " f"decide_comps={decide_comps}, but it selected {len(comps2use)}" ) From fd4abf29154e7b53ad8938908a80a028c56b9100 Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Wed, 10 Aug 2022 14:37:24 -0400 Subject: [PATCH 009/177] Added elbows to reports --- tedana/reporting/dynamic_figures.py | 82 +++++++++++++++++++- tedana/reporting/html_report.py | 54 ++++++++++++- tedana/resources/decision_trees/kundu.json | 4 +- tedana/resources/decision_trees/minimal.json | 13 ++-- 4 files changed, 138 insertions(+), 15 deletions(-) diff --git a/tedana/reporting/dynamic_figures.py b/tedana/reporting/dynamic_figures.py index 87a987eab..60b16d04e 100644 --- a/tedana/reporting/dynamic_figures.py +++ b/tedana/reporting/dynamic_figures.py @@ -124,6 +124,7 @@ def _create_data_struct(comptable_path, color_mapping=color_mapping): color=df["color"], size=df["var_exp_size"], classif=df["classification"], + classtag=df["classification_tags"], angle=df["angle"], ) ) @@ -131,7 +132,7 @@ def _create_data_struct(comptable_path, color_mapping=color_mapping): return cds -def _create_kr_plt(comptable_cds): +def _create_kr_plt(comptable_cds, kappa_elbow=None, rho_elbow=None): """ Create Dymamic Kappa/Rho Scatter Plot @@ -140,6 +141,10 @@ def _create_kr_plt(comptable_cds): comptable_cds: bokeh.models.ColumnDataSource Data structure containing a limited set of columns from the comp_table + kappa_elbow, rho_elbow: :obj:`float` :obj:`int` + The elbow thresholds for kappa and rho to display on the plots + Defaults=None + Returns ------- fig: bokeh.plotting.figure.Figure @@ -152,6 +157,7 @@ def _create_kr_plt(comptable_cds): ("Kappa", "@kappa{0.00}"), ("Rho", "@rho{0.00}"), ("Var. Expl.", "@varexp{0.00}%"), + ("Tags", "@classtag"), ] ) fig = plotting.figure( @@ -171,6 +177,50 @@ def _create_kr_plt(comptable_cds): source=comptable_cds, legend_group="classif", ) + + if rho_elbow: + rho_elbow_line = models.Span( + location=rho_elbow, + dimension="width", + line_color="#000033", + line_width=1, + line_alpha=0.75, + line_dash="dashed", + name="rho elbow", + ) + rho_elbow_label = models.Label( + x=300, + y=rho_elbow * 1.02, + x_units="screen", + text="rho elbow", + text_color="#000033", + text_alpha=0.75, + text_font_size="10px", + ) + fig.add_layout(rho_elbow_line) + fig.add_layout(rho_elbow_label) + if kappa_elbow: + kappa_elbow_line = models.Span( + location=kappa_elbow, + dimension="height", + line_color="#000033", + line_width=1, + line_alpha=0.75, + line_dash="dashed", + name="kappa elbow", + ) + kappa_elbow_label = models.Label( + x=kappa_elbow * 1.02, + y=300, + y_units="screen", + text="kappa elbow", + text_color="#000033", + text_alpha=0.75, + text_font_size="10px", + ) + fig.add_layout(kappa_elbow_line) + fig.add_layout(kappa_elbow_label) + fig.xaxis.axis_label = "Kappa" fig.yaxis.axis_label = "Rho" fig.toolbar.logo = None @@ -181,7 +231,7 @@ def _create_kr_plt(comptable_cds): def _create_sorted_plt( - comptable_cds, n_comps, x_var, y_var, title=None, x_label=None, y_label=None + comptable_cds, n_comps, x_var, y_var, title=None, x_label=None, y_label=None, elbow=None ): """ Create dynamic sorted plots @@ -206,6 +256,10 @@ def _create_sorted_plt( y_label: str Y-axis label + elbow: :obj:`float` :obj:`int` + The elbow threshold for kappa or rho to display on the plot + Default=None + Returns ------- fig: bokeh.plotting.figure.Figure @@ -217,6 +271,7 @@ def _create_sorted_plt( ("Kappa", "@kappa{0.00}"), ("Rho", "@rho{0.00}"), ("Var. Expl.", "@varexp{0.00}%"), + ("Tags", "@classtag"), ] ) fig = plotting.figure( @@ -236,6 +291,28 @@ def _create_sorted_plt( fig.x_range = models.Range1d(-1, n_comps + 1) fig.toolbar.logo = None + if elbow: + elbow_line = models.Span( + location=elbow, + dimension="width", + line_color="#000033", + line_width=1, + line_alpha=0.75, + line_dash="dashed", + name="elbow", + ) + elbow_label = models.Label( + x=20, + y=elbow * 1.02, + x_units="screen", + text="elbow", + text_color="#000033", + text_alpha=0.75, + text_font_size="10px", + ) + fig.add_layout(elbow_line) + fig.add_layout(elbow_label) + return fig @@ -250,6 +327,7 @@ def _create_varexp_pie_plt(comptable_cds, n_comps): ("Kappa", "@kappa{0.00}"), ("Rho", "@rho{0.00}"), ("Var. Exp.", "@varexp{0.00}%"), + ("Tags", "@classtag"), ], ) fig.wedge( diff --git a/tedana/reporting/html_report.py b/tedana/reporting/html_report.py index 5477035b0..c754ab412 100644 --- a/tedana/reporting/html_report.py +++ b/tedana/reporting/html_report.py @@ -9,6 +9,11 @@ from tedana import __version__ from tedana.reporting import dynamic_figures as df +from tedana.io import load_json + +import logging + +LGR = logging.getLogger("GENERAL") def _generate_buttons(out_dir): @@ -119,15 +124,58 @@ def generate_report(io_generator, tr): comptable_path = io_generator.get_name("ICA metrics tsv") comptable_cds = df._create_data_struct(comptable_path) + # Load the cross component metrics, including the kappa & rho elbows + cross_component_metrics_path = io_generator.get_name("ICA cross component metrics json") + cross_comp_metrics_dict = load_json(cross_component_metrics_path) + + def get_elbow_val(elbow_prefix): + """ + Find cross component metrics that begin with elbow_prefix and output the value + Current prefixes are kappa_elbow_kundu and rho_elbow_kundu. This flexability + means anything that begins [kappa/rho]_elbow will be found and used regardless + of the suffix. If more than one metric has the prefix then the alphabetically + first one will be used and a warning will be logged + """ + elbow_val = [val for key, val in cross_comp_metrics_dict.items() if elbow_prefix in key] + if not elbow_val or len(elbow_val) == 0: + LGR.warning( + f"No {elbow_prefix} saved in cross_component_metrics so not displaying in report" + ) + return None + elif len(elbow_val) > 1: + LGR.warning( + f"More than one key saved in cross_component_metrics begins with {elbow_prefix}. Displaying the alphabetially first one in report" + ) + return elbow_val[0] + else: + return elbow_val[0] # Return a value, not a list with a single value + + kappa_elbow = get_elbow_val("kappa_elbow") + rho_elbow = get_elbow_val("rho_elbow") + # Create kappa rho plot - kappa_rho_plot = df._create_kr_plt(comptable_cds) + kappa_rho_plot = df._create_kr_plt(comptable_cds, kappa_elbow=kappa_elbow, rho_elbow=rho_elbow) # Create sorted plots kappa_sorted_plot = df._create_sorted_plt( - comptable_cds, n_comps, "kappa_rank", "kappa", "Kappa Rank", "Kappa" + comptable_cds, + n_comps, + "kappa_rank", + "kappa", + title="Kappa Rank", + x_label="Components sorted by Kappa", + y_label="Kappa", + elbow=kappa_elbow, ) rho_sorted_plot = df._create_sorted_plt( - comptable_cds, n_comps, "rho_rank", "rho", "Rho Rank", "Rho" + comptable_cds, + n_comps, + "rho_rank", + "rho", + title="Rho Rank", + x_label="Components sorted by Rho", + y_label="Rho", + elbow=rho_elbow, ) varexp_pie_plot = df._create_varexp_pie_plt(comptable_cds, n_comps) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index f40fba90f..faaaffbe7 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -132,8 +132,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "nochange", - "ifFalse": "provisionalreject", + "ifTrue": "provisionalreject", + "ifFalse": "nochange", "decide_comps": [ "provisionalreject", "provisionalaccept" diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index 3f5827732..3ebe946a1 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -114,7 +114,7 @@ "functionname": "dec_left_op_right", "parameters": { "ifTrue": "provisionalaccept", - "ifFalse": "nochange", + "ifFalse": "provisionalreject", "decide_comps": "unclassified", "op": ">", "left": "kappa", @@ -145,10 +145,10 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "nochange", - "ifFalse": "provisionalreject", + "ifTrue": "provisionalreject", + "ifFalse": "nochange", "decide_comps": [ - "unclassified", + "provisionalreject", "provisionalaccept" ], "op": ">", @@ -165,10 +165,7 @@ "parameters": { "ifTrue": "accepted", "ifFalse": "nochange", - "decide_comps": [ - "provisionalreject", - "unclassified" - ] + "decide_comps": "provisionalreject" }, "kwargs": { "var_metric": "variance explained", From d3f0b6bc01ac382c5c9986a66364a0cd33c84cce Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Tue, 16 Aug 2022 16:24:05 -0400 Subject: [PATCH 010/177] fixing kundu tree and added calc_median --- tedana/resources/decision_trees/kundu.json | 29 ++++-- tedana/selection/ComponentSelector.py | 60 +++++++----- tedana/selection/selection_nodes.py | 102 ++++++++++++++++++++- tedana/tests/test_selection_nodes.py | 77 ++++++++++++++++ 4 files changed, 237 insertions(+), 31 deletions(-) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index faaaffbe7..28946af56 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -67,11 +67,22 @@ "right": "countsigFT2" }, "kwargs": { - "log_extra_info": "Reject if countsig_in S0clusters > T2clusters", + "left2": "countsigFT2", + "op2": ">", + "right2": 0, + "log_extra_info": "Reject if countsig_in S0clusters > T2clusters & countsig_in_T2clusters>0", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" } }, + { + "functionname": "calc_median", + "parameters": { + "decide_comps": "all", + "metric_name": "variance explained", + "median_label": "varex" + } + }, { "functionname": "dec_left_op_right", "parameters": { @@ -83,7 +94,10 @@ "right": "dice_FT2" }, "kwargs": { - "log_extra_info": "Reject if DICE S0>T2", + "left2": "variance explained", + "op2": ">", + "right2": "median_varex", + "log_extra_info": "Reject if DICE S0>T2 & varex>median", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" } @@ -99,7 +113,10 @@ "right": "signal-noise_t" }, "kwargs": { - "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0", + "left2": "variance explained", + "op2": ">", + "right2": "median_varex", + "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0 & varex>median", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" } @@ -295,7 +312,7 @@ "provisionalreject" ], "op": ">", - "left": "d_table_score_node17", + "left": "d_table_score_node18", "right": "conservative_guess" }, "kwargs": { @@ -322,7 +339,7 @@ "provisionalreject" ], "op": ">", - "left": "d_table_score_node17", + "left": "d_table_score_node18", "right": "num_acc_guess" }, "kwargs": { @@ -361,7 +378,7 @@ "provisionalreject" ], "op": ">", - "left": "d_table_score_node17", + "left": "d_table_score_node18", "right": "num_acc_guess" }, "kwargs": { diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index ca806a674..2b8f377c8 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -180,13 +180,15 @@ def validate_tree(tree): i, invalid_params ) - invalid_kwargs = set(node.get("kwargs").keys()) - kwargs - if len(invalid_kwargs) > 0: - err_msg += ( - "Node {} has additional, undefined optional parameters (kwargs): {}\n".format( - i, invalid_kwargs + # Only if kwargs are inputted, make sure they are all valid + if node.get("kwargs") is not None: + invalid_kwargs = set(node.get("kwargs").keys()) - kwargs + if len(invalid_kwargs) > 0: + err_msg += ( + "Node {} has additional, undefined optional parameters (kwargs): {}\n".format( + i, invalid_kwargs + ) ) - ) # Gather all the classification labels used in each tree both for # changing classifications and for decide_comps which defines which @@ -224,19 +226,20 @@ def validate_tree(tree): "label that was not predefined" ) - tagset = set() - if "tag_ifTrue" in node.get("kwargs").keys(): - tagset.update(set([node["kwargs"]["tag_ifTrue"]])) - if "tag_ifFalse" in node.get("kwargs").keys(): - tagset.update(set([node["kwargs"]["tag_ifFalse"]])) - if "tag" in node.get("kwargs").keys(): - tagset.update(set([node["kwargs"]["tag"]])) - undefined_classification_tags = tagset.difference(set(tree.get("classification_tags"))) - if undefined_classification_tags: - LGR.warning( - f"{tagset} in node {i} of the decision tree includes a classification " - "tag that was not predefined" - ) + if node.get("kwargs") is not None: + tagset = set() + if "tag_ifTrue" in node.get("kwargs").keys(): + tagset.update(set([node["kwargs"]["tag_ifTrue"]])) + if "tag_ifFalse" in node.get("kwargs").keys(): + tagset.update(set([node["kwargs"]["tag_ifFalse"]])) + if "tag" in node.get("kwargs").keys(): + tagset.update(set([node["kwargs"]["tag"]])) + undefined_classification_tags = tagset.difference(set(tree.get("classification_tags"))) + if undefined_classification_tags: + LGR.warning( + f"{tagset} in node {i} of the decision tree includes a classification " + "tag that was not predefined" + ) if err_msg: raise TreeError("\n" + err_msg) @@ -429,17 +432,28 @@ def select(self): # parse the variables to use with the function fcn = getattr(selection_nodes, node["functionname"]) - params, kwargs = node["parameters"], node["kwargs"] + params = node["parameters"] + params = self.check_null(params, node["functionname"]) - kwargs = self.check_null(kwargs, node["functionname"]) + + if "kwargs" in node: + kwargs = node["kwargs"] + kwargs = self.check_null(kwargs, node["functionname"]) + all_params = {**params, **kwargs} + else: + kwargs = None + all_params = {**params} # log the function name and parameters used LGR.info( "Step {}: Running function {} with parameters: {}".format( - self.current_node_idx, node["functionname"], {**params, **kwargs} + self.current_node_idx, node["functionname"], all_params ) ) # run the decision node function - self = fcn(self, **params, **kwargs) + if kwargs is not None: + self = fcn(self, **params, **kwargs) + else: + self = fcn(self, **params) self.tree["used_metrics"].update( self.tree["nodes"][self.current_node_idx]["outputs"]["used_metrics"] ) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 3724d339e..b8da33d4d 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -423,9 +423,9 @@ def operator_scale_descript(val_scale, val): is_compound = 0 # If any of the values for the second boolean statement are set - if left2 or right2 or op2: + if left2 is not None or right2 is not None or op2 is not None: # Check if they're all set & use them all or raise an error - if left2 and right2 and op2: + if left2 is not None and right2 is not None and op2 is not None: is_compound = 2 left2_scale, left2, right2_scale, right2 = confirm_valid_conditional( left2_scale, left2, right2_scale, right2, op2 @@ -685,6 +685,104 @@ def dec_variance_lessthan_thresholds( ) +def calc_median( + selector, + decide_comps, + metric_name, + median_label, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, +): + """ + Calculates the median across comopnents for the metric defined by metric_name + + Parameters + ---------- + {selector} + {decide_comps} + metric_name: :obj:`str` + The name of a column in selector.component_table. The median of + the values in this column will be calculated + median_label: :obj:`str` + The median will be saved in "median_(median_label)" + {log_extra} + {custom_node_label} + {only_used_metrics} + + Returns + ------- + {basicreturns} + + """ + + function_name_idx = f"Step {selector.current_node_idx}: calc_median" + if not isinstance(median_label, str): + raise ValueError( + f"{function_name_idx}: median_label must be a string. It is: {median_label}" + ) + else: + label_name = f"median_{median_label}" + + if not isinstance(metric_name, str): + raise ValueError( + f"{function_name_idx}: metric_name must be a string. It is: {metric_name}" + ) + + outputs = { + "decision_node_idx": selector.current_node_idx, + "node_label": None, + label_name: None, + "used_metrics": set([metric_name]), + "calc_cross_comp_metrics": [label_name], + } + + if only_used_metrics: + return outputs["used_metrics"] + + if label_name in selector.cross_component_metrics: + LGR.warning( + f"{label_name} already calculated. Overwriting previous value in {function_name_idx}" + ) + + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + outputs["node_label"] = f"Calc {label_name}" + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + comps2use = selectcomps2use(selector, decide_comps) + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + + if not comps2use: + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + ) + else: + + outputs[label_name] = np.median(selector.component_table.loc[comps2use, metric_name]) + + selector.cross_component_metrics[label_name] = outputs[label_name] + + log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +calc_median.__doc__ = calc_median.__doc__.format(**decision_docs) + + def calc_kappa_rho_elbows_kundu( selector, decide_comps, diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 89a90ddda..05a3c5e7c 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -518,6 +518,83 @@ def test_calc_kappa_rho_elbows_kundu(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] is None +def test_calc_median_smoke(): + """Smoke tests for calc_median""" + + selector = sample_selector() + decide_comps = "all" + + # Outputs just the metrics used in this function {"variance explained"} + used_metrics = selection_nodes.calc_median( + selector, + decide_comps, + metric_name="variance explained", + median_label="varex", + only_used_metrics=True, + ) + assert len(used_metrics - set(["variance explained"])) == 0 + + # Standard call to this function. + selector = selection_nodes.calc_median( + selector, + decide_comps, + metric_name="variance explained", + median_label="varex", + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = {"median_varex"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["median_varex"] > 0 + + # repeating standard call and should make a warning because metric_varex already exists + selector = selection_nodes.calc_median( + selector, decide_comps, metric_name="variance explained", median_label="varex" + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["median_varex"] > 0 + + # Log without running if no components of decide_comps are in the component table + selector = sample_selector() + selector = selection_nodes.calc_median( + selector, + decide_comps="NotAClassification", + metric_name="variance explained", + median_label="varex", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["median_varex"] is None + + # Crashes because median_label is not a string + with pytest.raises(ValueError): + selector = selection_nodes.calc_median( + selector, + decide_comps, + metric_name="variance explained", + median_label=5, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + + # Crashes because median_name is not a string + with pytest.raises(ValueError): + selector = selection_nodes.calc_median( + selector, + decide_comps, + metric_name=5, + median_label="varex", + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + + def test_dec_classification_doesnt_exist_smoke(): """Smoke tests for dec_classification_doesnt_exist""" From e72c1e0872306ddddaa42aec30874a46831329fb Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Wed, 17 Aug 2022 11:28:55 -0400 Subject: [PATCH 011/177] kundu.json added comment --- tedana/resources/decision_trees/kundu.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 28946af56..3e24c219a 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -129,7 +129,8 @@ "kwargs": { "log_extra_info": "", "log_extra_report": "" - } + }, + "_comment": "In original code, this was followed by a step that turned everything not rejected to ignored (accepted) if there are no unclassified components left. At this point, components are either rejected or unclassified so it is not clear how that could ever happen" }, { "functionname": "dec_left_op_right", From f34f1b8d10d56ca7e7f5f77522e081977f5abd38 Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Wed, 17 Aug 2022 12:30:42 -0400 Subject: [PATCH 012/177] kundu kappa_elbow is GTE not GT --- tedana/resources/decision_trees/kundu.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 3e24c219a..d57a99797 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -138,7 +138,7 @@ "ifTrue": "provisionalaccept", "ifFalse": "provisionalreject", "decide_comps": "unclassified", - "op": ">", + "op": ">=", "left": "kappa", "right": "kappa_elbow_kundu" }, From de87de33d47f1adad0e2b0c860c9173d12e59ab5 Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Wed, 17 Aug 2022 16:14:00 -0400 Subject: [PATCH 013/177] kundu dtm matches main and minimal updated --- tedana/resources/decision_trees/kundu.json | 39 ++++------- tedana/resources/decision_trees/minimal.json | 25 +++++-- tedana/selection/selection_nodes.py | 42 +++++++++-- tedana/tests/test_selection_nodes.py | 73 +++++++++++++++++++- 4 files changed, 142 insertions(+), 37 deletions(-) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index d57a99797..6eddcf6d6 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -261,19 +261,19 @@ "provisionalaccept", "provisionalreject" ], - "op": "<", + "op": ">", "left": "d_table_score", "right": "max_good_meanmetricrank" }, "kwargs": { "tag_ifTrue": "Low variance", - "op2": "<", + "op2": "<=", "left2": "variance explained", "right2": "varex_lower_thresh", - "op3": ">", + "op3": "<=", "left3": "kappa", "right3": "kappa_elbow_kundu", - "log_extra_info": "If low variance and good kappa & d_table_scores accept even if rho or other metrics are bad" + "log_extra_info": "If low variance, accept even if bad kappa & d_table_scores" } }, { @@ -365,14 +365,14 @@ "percentile_thresh": 25 }, "kwargs": { + "num_lowest_var_comps": "num_acc_guess", "log_extra_info": "Calculuate a low variance threshold based on the 25th percentile variance component" - }, - "_comment": "In the original kundu code, this is run only on the first num_acc_guess remaining component... probably sorted for the lowest variance. Adding that functionality here would be messy and unlikely to significantly alter results" + } }, { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", + "ifTrue": "accepted", "ifFalse": "nochange", "decide_comps": [ "provisionalaccept", @@ -394,7 +394,7 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", + "ifTrue": "accepted", "ifFalse": "nochange", "decide_comps": [ "provisionalaccept", @@ -417,29 +417,16 @@ "functionname": "manual_classify", "parameters": { "new_classification": "accepted", - "decide_comps": "provisionalaccept" - }, - "kwargs": { - "log_extra_info": "Anything that is still provisionalaccept should be accepted", - "log_extra_report": "", - "tag": "Likely BOLD" - } - }, - { - "functionname": "manual_classify", - "parameters": { - "new_classification": "rejected", "decide_comps": [ - "provisionalreject", - "unclassified" + "provisionalaccept", + "provisionalreject" ] }, "kwargs": { - "log_extra_info": "Anything that is still provisionalreject should be rejected", + "log_extra_info": "Anything still provisional (accepted or rejected) should be accepted", "log_extra_report": "", - "tag": "Unlikely BOLD" - }, - "_comment": "According to a comment in the meica 2.7 code, nothing should be provisionalreject by this point." + "tag": "Likely BOLD" + } } ] } \ No newline at end of file diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index 3ebe946a1..0b4f3ceaa 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -63,11 +63,22 @@ "right": "countsigFT2" }, "kwargs": { - "log_extra_info": "Reject if countsig_in S0clusters > T2clusters", + "left2": "countsigFT2", + "op2": ">", + "right2": 0, + "log_extra_info": "Reject if countsig_in S0clusters > T2clusters & countsig_in_T2clusters>0", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" } }, + { + "functionname": "calc_median", + "parameters": { + "decide_comps": "all", + "metric_name": "variance explained", + "median_label": "varex" + } + }, { "functionname": "dec_left_op_right", "parameters": { @@ -79,7 +90,10 @@ "right": "dice_FT2" }, "kwargs": { - "log_extra_info": "Reject if dice S0>T2", + "left2": "variance explained", + "op2": ">", + "right2": "median_varex", + "log_extra_info": "Reject if DICE S0>T2 & varex>median", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" } @@ -95,7 +109,10 @@ "right": "signal-noise_t" }, "kwargs": { - "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0", + "left2": "variance explained", + "op2": ">", + "right2": "median_varex", + "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0 & varex>median", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" } @@ -116,7 +133,7 @@ "ifTrue": "provisionalaccept", "ifFalse": "provisionalreject", "decide_comps": "unclassified", - "op": ">", + "op": ">=", "left": "kappa", "right": "kappa_elbow_kundu" }, diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index b8da33d4d..8c7de5172 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1133,6 +1133,7 @@ def calc_varex_thresh( decide_comps, thresh_label, percentile_thresh, + num_lowest_var_comps=None, log_extra_report="", log_extra_info="", custom_node_label="", @@ -1154,6 +1155,11 @@ def calc_varex_thresh( A percentile threshold to apply to components to set the variance threshold. In the original kundu decision tree this was 90 for varex_upper_thresh and 25 for varex_lower_thresh + num_lowest_var_comps: :obj:`str` :obj:`int` + percentile can be calculated on the num_lowest_var_comps components with the + lowest variance. Either input an integer directory or input a string that is + a parameter stored in selector.cross_component_metrics ("num_acc_guess" in + original decision tree). Default is None {log_extra} {custom_node_label} {only_used_metrics} @@ -1177,6 +1183,7 @@ def calc_varex_thresh( "decision_node_idx": selector.current_node_idx, "node_label": None, varex_name: None, + "num_lowest_var_comps": num_lowest_var_comps, "used_metrics": set(["variance explained"]), } if ( @@ -1204,6 +1211,19 @@ def calc_varex_thresh( f"{perc_name} already calculated. Overwriting previous value in {function_name_idx}" ) + if num_lowest_var_comps is not None: + if isinstance(num_lowest_var_comps, str): + if num_lowest_var_comps in selector.cross_component_metrics: + num_lowest_var_comps = selector.cross_component_metrics[num_lowest_var_comps] + else: + raise ValueError( + f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) is not in selector.cross_component_metrics" + ) + if not isinstance(num_lowest_var_comps, int): + raise ValueError( + f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) is used as an array index and should be an integer" + ) + if custom_node_label: outputs["node_label"] = custom_node_label else: @@ -1226,11 +1246,23 @@ def calc_varex_thresh( decide_comps=decide_comps, ) else: - - outputs[varex_name] = scoreatpercentile( - selector.component_table.loc[comps2use, "variance explained"], percentile_thresh - ) - + if num_lowest_var_comps is None: + outputs[varex_name] = scoreatpercentile( + selector.component_table.loc[comps2use, "variance explained"], percentile_thresh + ) + else: + # Using only the first num_lowest_var_comps components sorted to include lowest variance + if num_lowest_var_comps <= len(comps2use): + sorted_varex = np.sort( + (selector.component_table.loc[comps2use, "variance explained"]).to_numpy() + ) + outputs[varex_name] = scoreatpercentile( + sorted_varex[:num_lowest_var_comps], percentile_thresh + ) + else: + raise ValueError( + f"{function_name_idx}: num_lowest_var_comps ({num_lowest_var_comps}) needs to be <= len(comps2use) ({len(comps2use)})" + ) selector.cross_component_metrics[varex_name] = outputs[varex_name] log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 05a3c5e7c..8342a9cf4 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -665,8 +665,8 @@ def test_calc_varex_thresh_smoke(): """Smoke tests for calc_varex_thresh""" # Standard use of this function requires some components to be "provisional accept" - selector = sample_selector(options="provclass") - decide_comps = "provisional accept" + selector = sample_selector() + decide_comps = "all" # Outputs just the metrics used in this function {"variance explained"} used_metrics = selection_nodes.calc_varex_thresh( @@ -712,6 +712,75 @@ def test_calc_varex_thresh_smoke(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_thresh"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["perc"] == 90 + # Standard call using num_lowest_var_comps as an integer + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps, + thresh_label="new_lower", + percentile_thresh=25, + num_lowest_var_comps=8, + ) + calc_cross_comp_metrics = {"varex_new_lower_thresh", "new_lower_perc"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_new_lower_thresh"] > 0 + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["new_lower_perc"] == 25 + + # Standard call using num_lowest_var_comps as a value in cross_component_metrics + selector.cross_component_metrics["num_acc_guess"] = 10 + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps, + thresh_label="new_lower", + percentile_thresh=25, + num_lowest_var_comps="num_acc_guess", + ) + calc_cross_comp_metrics = {"varex_new_lower_thresh", "new_lower_perc"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_new_lower_thresh"] > 0 + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["new_lower_perc"] == 25 + + # Raise error if num_lowest_var_comps is a string, but not in cross_component_metrics + with pytest.raises(ValueError): + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps, + thresh_label="new_lower", + percentile_thresh=25, + num_lowest_var_comps="NotACrossCompMetric", + ) + + # Raise error if num_lowest_var_comps is not an integer + with pytest.raises(ValueError): + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps, + thresh_label="new_lower", + percentile_thresh=25, + num_lowest_var_comps=9.5, + ) + + # Raise error if num_lowest_var_comps is larger than the number of selected components + with pytest.raises(ValueError): + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps, + thresh_label="new_lower", + percentile_thresh=25, + num_lowest_var_comps=55, + ) + # Run warning logging code to see if any of the cross_component_metrics # already existed and would be over-written selector = sample_selector(options="provclass") From b7219fd2fd5615c334cc6895b3fed90fb035425f Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Wed, 17 Aug 2022 16:18:19 -0400 Subject: [PATCH 014/177] flake8 style fixes --- tedana/reporting/html_report.py | 3 ++- tedana/selection/selection_nodes.py | 12 ++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/tedana/reporting/html_report.py b/tedana/reporting/html_report.py index c754ab412..5f2d1223a 100644 --- a/tedana/reporting/html_report.py +++ b/tedana/reporting/html_report.py @@ -144,7 +144,8 @@ def get_elbow_val(elbow_prefix): return None elif len(elbow_val) > 1: LGR.warning( - f"More than one key saved in cross_component_metrics begins with {elbow_prefix}. Displaying the alphabetially first one in report" + "More than one key saved in cross_component_metrics begins with " + f"{elbow_prefix}. Displaying the alphabetially first one in report" ) return elbow_val[0] else: diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 8c7de5172..ce44c0df1 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1217,11 +1217,13 @@ def calc_varex_thresh( num_lowest_var_comps = selector.cross_component_metrics[num_lowest_var_comps] else: raise ValueError( - f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) is not in selector.cross_component_metrics" + f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) " + "is not in selector.cross_component_metrics" ) if not isinstance(num_lowest_var_comps, int): raise ValueError( - f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) is used as an array index and should be an integer" + f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) " + "is used as an array index and should be an integer" ) if custom_node_label: @@ -1251,7 +1253,8 @@ def calc_varex_thresh( selector.component_table.loc[comps2use, "variance explained"], percentile_thresh ) else: - # Using only the first num_lowest_var_comps components sorted to include lowest variance + # Using only the first num_lowest_var_comps components sorted to include + # lowest variance if num_lowest_var_comps <= len(comps2use): sorted_varex = np.sort( (selector.component_table.loc[comps2use, "variance explained"]).to_numpy() @@ -1261,7 +1264,8 @@ def calc_varex_thresh( ) else: raise ValueError( - f"{function_name_idx}: num_lowest_var_comps ({num_lowest_var_comps}) needs to be <= len(comps2use) ({len(comps2use)})" + f"{function_name_idx}: num_lowest_var_comps ({num_lowest_var_comps})" + f"needs to be <= len(comps2use) ({len(comps2use)})" ) selector.cross_component_metrics[varex_name] = outputs[varex_name] From f81dac24004f346ade0677f2577c43cd69d9f8c7 Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 18 Aug 2022 10:08:24 -0400 Subject: [PATCH 015/177] fixed linting --- tedana/reporting/html_report.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tedana/reporting/html_report.py b/tedana/reporting/html_report.py index 5f2d1223a..217699545 100644 --- a/tedana/reporting/html_report.py +++ b/tedana/reporting/html_report.py @@ -1,3 +1,4 @@ +import logging import os from os.path import join as opj from pathlib import Path @@ -8,10 +9,8 @@ from bokeh import embed, layouts, models from tedana import __version__ -from tedana.reporting import dynamic_figures as df from tedana.io import load_json - -import logging +from tedana.reporting import dynamic_figures as df LGR = logging.getLogger("GENERAL") @@ -145,7 +144,8 @@ def get_elbow_val(elbow_prefix): elif len(elbow_val) > 1: LGR.warning( "More than one key saved in cross_component_metrics begins with " - f"{elbow_prefix}. Displaying the alphabetially first one in report" + f"{elbow_prefix}. The lines on the plots will be for {elbow_val[0]} " + f"NOT {elbow_val[1:]}" ) return elbow_val[0] else: From 20548f4912d6007ee4063b5b18e437d832dedbe7 Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 18 Aug 2022 10:34:52 -0400 Subject: [PATCH 016/177] fixed report elbow warning --- tedana/reporting/html_report.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/tedana/reporting/html_report.py b/tedana/reporting/html_report.py index 217699545..50b8bbad0 100644 --- a/tedana/reporting/html_report.py +++ b/tedana/reporting/html_report.py @@ -135,21 +135,26 @@ def get_elbow_val(elbow_prefix): of the suffix. If more than one metric has the prefix then the alphabetically first one will be used and a warning will be logged """ - elbow_val = [val for key, val in cross_comp_metrics_dict.items() if elbow_prefix in key] - if not elbow_val or len(elbow_val) == 0: + + elbow_keys = [k for k in cross_comp_metrics_dict.keys() if elbow_prefix in k] + elbow_keys.sort() + if len(elbow_keys) == 0: LGR.warning( f"No {elbow_prefix} saved in cross_component_metrics so not displaying in report" ) return None - elif len(elbow_val) > 1: + elif len(elbow_keys) == 1: + return cross_comp_metrics_dict[elbow_keys[0]] + else: + printed_key = elbow_keys[0] + unprinted_keys = elbow_keys[1:] + LGR.warning( "More than one key saved in cross_component_metrics begins with " - f"{elbow_prefix}. The lines on the plots will be for {elbow_val[0]} " - f"NOT {elbow_val[1:]}" + f"{elbow_prefix}. The lines on the plots will be for {printed_key} " + f"NOT {unprinted_keys}" ) - return elbow_val[0] - else: - return elbow_val[0] # Return a value, not a list with a single value + return cross_comp_metrics_dict[printed_key] kappa_elbow = get_elbow_val("kappa_elbow") rho_elbow = get_elbow_val("rho_elbow") From 2802e38a7804622e87ba16aebb037b535ca4c377 Mon Sep 17 00:00:00 2001 From: handwerkerd <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 18 Aug 2022 10:45:36 -0400 Subject: [PATCH 017/177] removed unneeded second d_table calc function --- tedana/selection/selection_nodes.py | 2 +- tedana/selection/selection_utils.py | 106 ---------------------------- 2 files changed, 1 insertion(+), 107 deletions(-) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index ce44c0df1..1a3db99fd 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -8,7 +8,7 @@ from scipy.stats import scoreatpercentile from tedana.metrics.dependence import generate_decision_table_score -from tedana.selection.selection_utils import ( # get_new_meanmetricrank, +from tedana.selection.selection_utils import ( change_comptable_classifications, confirm_metrics_exist, get_extend_factor, diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 459e89795..35493a372 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -6,7 +6,6 @@ import numpy as np -from tedana.metrics.dependence import generate_decision_table_score from tedana.stats import getfbounds LGR = logging.getLogger("GENERAL") @@ -653,108 +652,3 @@ def get_extend_factor(n_vols=None, extend_factor=None): LGR.error(error_msg) raise ValueError(error_msg) return extend_factor - - -def get_new_meanmetricrank(component_table, comps2use, decision_node_idx, calc_new_rank=False): - """ - If a revised d_table_score was already calculated, use that. - If not, calculate a new d_table_score based on the components - identified in comps2use - - Parameters - ---------- - component_table - comps2use - decision_node_idx: :obj:`int` - The index for the current decision node - calc_new_rank: :obj:`bool` - calculate a new d_table_score even if revised scores with the same - labels were already calculated - - Return - ------ - meanmetricrank - comptable - """ - rank_label = f"d_table_score_node{decision_node_idx}" - if not calc_new_rank and (rank_label in component_table.columns): - # return existing - LGR.info( - f"{rank_label} already calculated so not recalculating in node {decision_node_idx}" - ) - return component_table[rank_label], component_table - - # get the array of ranks - ranks = generate_decision_table_score( - component_table.loc[comps2use, "kappa"], - component_table.loc[comps2use, "dice_FT2"], - component_table.loc[comps2use, "signal-noise_t"], - component_table.loc[comps2use, "countnoise"], - component_table.loc[comps2use, "countsigFT2"], - ) - # see if we need to make a new column - if rank_label not in component_table.columns: - component_table[rank_label] = np.zeros(component_table.shape[0]) * np.nan - - # fill in the column with the components of interest - for c, rank in zip(comps2use, ranks): - component_table.loc[c, rank_label] = rank - - return component_table[rank_label].copy(), component_table.copy() - - -# Not currently being used and hopefully will never again be used -# def prev_classified_comps(comptable, decision_node_idx, classification_label, prev_X_steps=0): -# """ -# Output a list of components with a specific label during the current or -# previous X steps of the decision tree. For example, if -# classification_label = ['provisionalaccept'] and prev_X_steps = 0 -# then this outputs the indices of components that are currenlty -# classsified as provisionalaccept. If prev_X_steps=2, then this will -# output components that are classified as provisionalaccept or were -# classified as such any time before the previous two decision tree steps - -# Parameters -# ---------- -# comptable -# n_echos: :obj:`int` -# The number of echos in the multi-echo data set -# decision_node_idx: :obj:`int` -# The index of the node in the decision tree that called this function -# classification_label: :obj:`list[str]` -# A list of strings containing classification labels to identify in components -# For example: ['provisionalaccept'] -# prev_X_steps: :obj:`int` -# If 0, then just count the number of provisionally accepted or rejected -# or unclassified components in the current node. If this is a positive -# integer, then also check if a component was a in one of those three -# categories in ignore_prev_X_steps previous nodes. default=0 - -# Returns -# ------- -# full_comps2use: :obj:`list[int]` -# A list of indices of components that have or add classification_lable -# """ - -# full_comps2use = selectcomps2use(comptable, classification_label) -# rationales = comptable["rationale"] - -# if prev_X_steps > 0: # if checking classifications in prevision nodes -# for compidx in range(len(comptable)): -# tmp_rationale = rationales.values[compidx] -# tmp_list = re.split(":|;| ", tmp_rationale) -# while "" in tmp_list: # remove blank strings after splitting rationale -# tmp_list.remove("") -# # Check the previous nodes -# # This is inefficient, but it should work -# for didx in range(max(0, decision_node_idx - prev_X_steps), decision_node_idx): -# if str(didx) in tmp_list: -# didx_loc = tmp_list.index(str(didx)) -# if didx_loc > 1: -# tmp_classifier = tmp_list[didx_loc - 1] -# if tmp_classifier in classification_label: -# full_comps2use.append(compidx) - -# full_comps2use = list(set(full_comps2use)) - -# return full_comps2use From 92ae3eb0d3819c370d5eaae7d88508270d5fa32d Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 18 Aug 2022 21:16:55 -0400 Subject: [PATCH 018/177] Links building decision trees to index --- .../{building_decision_trees.rst => building decision trees.rst} | 0 docs/index.rst | 1 + 2 files changed, 1 insertion(+) rename docs/{building_decision_trees.rst => building decision trees.rst} (100%) diff --git a/docs/building_decision_trees.rst b/docs/building decision trees.rst similarity index 100% rename from docs/building_decision_trees.rst rename to docs/building decision trees.rst diff --git a/docs/index.rst b/docs/index.rst index 491d8d874..28bb4c5a3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -182,6 +182,7 @@ tedana is licensed under GNU Lesser General Public License version 2.1. multi-echo usage approach + building decision trees outputs faq support From f8d479b303f113172337ef46ac79513087a500de Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 18 Aug 2022 21:28:59 -0400 Subject: [PATCH 019/177] Adds ComponentSelector to API docs --- docs/api.rst | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/api.rst b/docs/api.rst index 1cc3bf7eb..7d627189c 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -116,6 +116,8 @@ API :no-members: :no-inherited-members: + + .. currentmodule:: tedana.selection .. autosummary:: @@ -123,9 +125,15 @@ API :template: function.rst tedana.selection.manual_selection - tedana.selection.kundu_selection_v2 tedana.selection.kundu_tedpca +.. autosummary:: tedana.selection.ComponentSelector + :toctree: generated/ + + tedana.selection.ComponentSelector + + + .. _api_gscontrol_ref: From e544bf9dab3f90184127e5e8324c3a1cd18c820b Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 18 Aug 2022 21:30:07 -0400 Subject: [PATCH 020/177] Set language to English --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 856a557b6..6ae340ca7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -92,7 +92,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. From 617fa9a45a98c734393414f715d5aea674b44b10 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 18 Aug 2022 21:32:23 -0400 Subject: [PATCH 021/177] Fix dead nilearn link --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 6ae340ca7..5daf2e996 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -172,5 +172,5 @@ def setup(app): "matplotlib": ("https://matplotlib.org/", None), "nibabel": ("https://nipy.org/nibabel/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), - "nilearn": ("http://nilearn.github.io/", None), + "nilearn": ("http://nilearn.github.io/stable", None), } From ed083a52a3dedccd6faf2f28e1cda9278dc48eb3 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 31 Aug 2022 10:35:01 -0400 Subject: [PATCH 022/177] Add load_config and ComponentSelector to API docs --- docs/api.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index 7d627189c..3ccf4b63f 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -130,9 +130,8 @@ API .. autosummary:: tedana.selection.ComponentSelector :toctree: generated/ - tedana.selection.ComponentSelector - - + tedana.selection.ComponentSelector.ComponentSelector + tedana.selection.ComponentSelector.load_config .. _api_gscontrol_ref: From e60f87ae29bb2ba40cc0eb654ed33f740b5441cc Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 7 Sep 2022 17:47:00 -0400 Subject: [PATCH 023/177] Fix mixing matrix over-save bug --- tedana/workflows/tedana.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index fb5df83f0..71f2e2972 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -715,7 +715,8 @@ def tedana_workflow( # and rewritten if an existing mixing matrix is given as an input comp_names = comptable["Component"].values mixing_df = pd.DataFrame(data=mmix, columns=comp_names) - io_generator.save_file(mixing_df, "ICA mixing tsv") + if not op.exists(io_generator.get_name("ICA mixing tsv")): + io_generator.save_file(mixing_df, "ICA mixing tsv") betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask_denoise), mask_denoise) io_generator.save_file(betas_oc, "z-scored ICA components img") From 800f1815500cdd304e98bdbfc987c6595c3caeb3 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Tue, 13 Sep 2022 10:51:38 -0400 Subject: [PATCH 024/177] Separately modularized kappa & rho elbow calcs and created liberal rho elbow (#15) * kundu tree provisionalreject to unclassified * calc_rho_elbow progress * calc_rho_elbow done * Removed calc_varex_upper_p * Removed kappa_rho_elbow tests * both decision trees running * linting fixes --- tedana/resources/decision_trees/kundu.json | 62 ++-- tedana/resources/decision_trees/minimal.json | 22 +- tedana/selection/selection_nodes.py | 310 ++++++++++--------- tedana/selection/selection_utils.py | 205 ++++++++++-- tedana/tests/test_selection_nodes.py | 155 ++++++---- tedana/tests/test_selection_utils.py | 110 ++++++- 6 files changed, 622 insertions(+), 242 deletions(-) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 6eddcf6d6..839b11851 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -15,8 +15,7 @@ "d_table_score" ], "intermediate_classifications": [ - "provisionalaccept", - "provisionalreject" + "provisionalaccept" ], "classification_tags": [ "Likely BOLD", @@ -122,38 +121,50 @@ } }, { - "functionname": "calc_kappa_rho_elbows_kundu", + "functionname": "calc_kappa_elbow", "parameters": { - "decide_comps": "unclassified" + "decide_comps": "all" }, "kwargs": { "log_extra_info": "", "log_extra_report": "" }, - "_comment": "In original code, this was followed by a step that turned everything not rejected to ignored (accepted) if there are no unclassified components left. At this point, components are either rejected or unclassified so it is not clear how that could ever happen" + "_comment": "" + }, + { + "functionname": "calc_rho_elbow", + "parameters": { + "decide_comps": "all" + }, + "kwargs": { + "subset_decide_comps": "unclassified", + "rho_elbow_type": "kundu", + "log_extra_info": "", + "log_extra_report": "" + }, + "_comment": "" }, { "functionname": "dec_left_op_right", "parameters": { "ifTrue": "provisionalaccept", - "ifFalse": "provisionalreject", + "ifFalse": "nochange", "decide_comps": "unclassified", "op": ">=", "left": "kappa", "right": "kappa_elbow_kundu" }, "kwargs": { - "log_extra_info": "Provisionally accept if kappa>elbow and provisionally reject if kappaelbow", "log_extra_report": "" } }, { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "provisionalreject", + "ifTrue": "unclassified", "ifFalse": "nochange", "decide_comps": [ - "provisionalreject", "provisionalaccept" ], "op": ">", @@ -161,7 +172,7 @@ "right": "rho_elbow_kundu" }, "kwargs": { - "log_extra_info": "Provisionally reject if rho>elbow", + "log_extra_info": "Move any provisionally accepted components back to unclassified if rho>elbow", "log_extra_report": "" } }, @@ -171,7 +182,6 @@ "new_classification": "accepted", "decide_comps": [ "provisionalaccept", - "provisionalreject", "unclassified" ], "class_comp_exists": "provisionalaccept" @@ -238,7 +248,7 @@ "ifFalse": "nochange", "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ], "op": ">", "left": "d_table_score", @@ -259,7 +269,7 @@ "ifFalse": "nochange", "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ], "op": ">", "left": "d_table_score", @@ -282,13 +292,13 @@ "new_classification": "accepted", "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ], - "class_comp_exists": "provisionalreject" + "class_comp_exists": "unclassified" }, "kwargs": { "tag_ifTrue": "Likely BOLD", - "log_extra_info": "If nothing left is provisionalreject, then accept all", + "log_extra_info": "If nothing left is unclassified, then accept all", "log_extra_report": "" } }, @@ -297,7 +307,7 @@ "parameters": { "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ] }, "kwargs": {}, @@ -310,10 +320,10 @@ "ifFalse": "nochange", "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ], "op": ">", - "left": "d_table_score_node18", + "left": "d_table_score_node19", "right": "conservative_guess" }, "kwargs": { @@ -337,10 +347,10 @@ "ifFalse": "nochange", "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ], "op": ">", - "left": "d_table_score_node18", + "left": "d_table_score_node19", "right": "num_acc_guess" }, "kwargs": { @@ -359,7 +369,7 @@ "parameters": { "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ], "thresh_label": "new_lower", "percentile_thresh": 25 @@ -376,10 +386,10 @@ "ifFalse": "nochange", "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ], "op": ">", - "left": "d_table_score_node18", + "left": "d_table_score_node19", "right": "num_acc_guess" }, "kwargs": { @@ -398,7 +408,7 @@ "ifFalse": "nochange", "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ], "op": "<=", "left": "kappa", @@ -419,7 +429,7 @@ "new_classification": "accepted", "decide_comps": [ "provisionalaccept", - "provisionalreject" + "unclassified" ] }, "kwargs": { diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index 0b4f3ceaa..5aa50ec11 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -118,14 +118,28 @@ } }, { - "functionname": "calc_kappa_rho_elbows_kundu", + "functionname": "calc_kappa_elbow", "parameters": { - "decide_comps": "unclassified" + "decide_comps": "all" }, "kwargs": { "log_extra_info": "", "log_extra_report": "" - } + }, + "_comment": "" + }, + { + "functionname": "calc_rho_elbow", + "parameters": { + "decide_comps": "all" + }, + "kwargs": { + "subset_decide_comps": "unclassified", + "rho_elbow_type": "liberal", + "log_extra_info": "", + "log_extra_report": "" + }, + "_comment": "" }, { "functionname": "dec_left_op_right", @@ -170,7 +184,7 @@ ], "op": ">", "left": "rho", - "right": "rho_elbow_kundu" + "right": "rho_elbow_liberal" }, "kwargs": { "log_extra_info": "rho>elbow", diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 1a3db99fd..6bfdf18fb 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -12,12 +12,12 @@ change_comptable_classifications, confirm_metrics_exist, get_extend_factor, - getelbow, kappa_elbow_kundu, + rho_elbow_kundu_liberal, log_decision_tree_step, selectcomps2use, ) -from tedana.stats import getfbounds + # from scipy import stats @@ -783,19 +783,16 @@ def calc_median( calc_median.__doc__ = calc_median.__doc__.format(**decision_docs) -def calc_kappa_rho_elbows_kundu( +def calc_kappa_elbow( selector, decide_comps, log_extra_report="", log_extra_info="", custom_node_label="", only_used_metrics=False, - kappa_only=False, - rho_only=False, ): """ - Calculates 'elbows' for kappa and rho values across compnents and thresholds - on kappa>kappa_elbow & rhokappa_elbow threshold. default=False - rho_only: :obj:`bool`, optional - Only use the rho>rho_elbow threshold. default=False - Returns ------- @@ -816,59 +808,34 @@ def calc_kappa_rho_elbows_kundu( Note ---- - This script is currently hard coded for a specific way to calculate kappa and rho elbows - based on the method by Kundu in the MEICA v2.7 code. Another elbow calculation would - require a distinct function. Ideally, there can be one elbow function can allows for - some more flexible options + This script is currently hard coded for a specific way to calculate the kappa elbow + based on the method by Kundu in the MEICA v2.7 code. This uses the minimum of + a kappa elbow calculation on all components and on a subset of nonsignificant + components. To get the same funcationality in MEICA v2.7, decide_comps must be 'all' + Additional options could be added to this function or distinct functions + for some more flexible options - This also uses all unclassified components as part of the elbow calculation, irregardless - of what is in decide_comps. """ - # If kappa_only or rho_only is true kappa or rho might not actually be - # used, but, as of now, both are required to run this function - outputs = { "decision_node_idx": selector.current_node_idx, "node_label": None, "n_echos": selector.n_echos, - "varex_upper_p": None, - } - if not (kappa_only ^ rho_only): - # if neither or both kappa and rho_only are set - outputs["used_metrics"] = set(["kappa", "rho"]) - outputs["calc_cross_comp_metrics"] = [ - "kappa_elbow_kundu", - "rho_elbow_kundu", - "varex_upper_p", - ] - outputs["kappa_elbow_kundu"] = None - outputs["rho_elbow_kundu"] = None - calc_kappa = True - calc_rho = True - elif kappa_only: - outputs["used_metrics"] = set(["kappa"]) - outputs["calc_cross_comp_metrics"] = [ + "used_metrics": set(["kappa"]), + "calc_cross_comp_metrics": [ "kappa_elbow_kundu", - "varex_upper_p", - ] - outputs["kappa_elbow_kundu"] = None - calc_kappa = True - calc_rho = False - elif rho_only: - outputs["used_metrics"] = set(["rho"]) - outputs["calc_cross_comp_metrics"] = [ - "rho_elbow_kundu", - "varex_upper_p", - ] - outputs["rho_elbow_kundu"] = None - calc_kappa = False - calc_rho = True + "kappa_allcomps_elbow", + "kappa_nonsig_elbow", + ], + "kappa_elbow_kundu": None, + "kappa_allcomps_elbow": None, + "kappa_nonsig_elbow": None, + } if only_used_metrics: return outputs["used_metrics"] - function_name_idx = f"Step {selector.current_node_idx}: calc_kappa_rho_elbows_kundu" + function_name_idx = f"Step {selector.current_node_idx}: calc_kappa_elbow" if ("kappa_elbow_kundu" in selector.cross_component_metrics) and ( "kappa_elbow_kundu" in outputs["calc_cross_comp_metrics"] @@ -877,13 +844,134 @@ def calc_kappa_rho_elbows_kundu( "kappa_elbow_kundu already calculated." f"Overwriting previous value in {function_name_idx}" ) - if ("rho_elbow_kundu" in selector.cross_component_metrics) and ( - "rho_elbow_kundu" in outputs["calc_cross_comp_metrics"] + + if custom_node_label: + outputs["node_label"] = custom_node_label + else: + outputs["node_label"] = "Calc Kappa Elbow" + + if log_extra_info: + LGR.info(log_extra_info) + if log_extra_report: + RepLGR.info(log_extra_report) + + comps2use = selectcomps2use(selector, decide_comps) + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + + if not comps2use: + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + ) + else: + ( + outputs["kappa_elbow_kundu"], + outputs["kappa_allcomps_elbow"], + outputs["kappa_nonsig_elbow"], + ) = kappa_elbow_kundu(selector.component_table, selector.n_echos, comps2use=comps2use) + selector.cross_component_metrics["kappa_elbow_kundu"] = outputs["kappa_elbow_kundu"] + selector.cross_component_metrics["kappa_allcomps_elbow"] = outputs["kappa_allcomps_elbow"] + selector.cross_component_metrics["kappa_nonsig_elbow"] = outputs["kappa_nonsig_elbow"] + + log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) + + selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs + + return selector + + +calc_kappa_elbow.__doc__ = calc_kappa_elbow.__doc__.format(**decision_docs) + + +def calc_rho_elbow( + selector, + decide_comps, + subset_decide_comps="unclassified", + rho_elbow_type="kundu", + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, +): + """ + Calculates elbow for rho across components + + Parameters + ---------- + {selector} + {decide_comps} + subset_decide_comps: :obj:`str` + This is a string with a single component classification label. For the + elbow calculation used by Kundu in MEICA v.27 thresholds are based + on all components and on unclassified components. default='unclassified' + rho_elbow_type: :obj:`str` + The algorithm used to calculate the rho elbow. Current options are: + kundu (default): Method used by Kundu in MEICA v2.7. It is the mean between + the rho elbow calculated on all components and a subset of unclassificated + components with some extra quirks + liberal: Same as kundu but is the maximum of the two elbows, which will minimize + the number of components rejected by having values greater than the rho elbow + {log_extra} + {custom_node_label} + {only_used_metrics} + + Returns + ------- + {basicreturns} + + Note + ---- + This script is currently hard coded for a specific way to calculate the rho elbow + based on the method by Kundu in the MEICA v2.7 code. To get the same funcationality + in MEICA v2.7, decide_comps must be 'all' and subset_decide_comps must be 'unclassified' + + """ + + function_name_idx = f"Step {selector.current_node_idx}: calc_rho_elbow" + + if rho_elbow_type == "kundu".lower(): + elbow_name = "rho_elbow_kundu" + elif rho_elbow_type == "liberal".lower(): + elbow_name = "rho_elbow_liberal" + else: + raise ValueError( + f"{function_name_idx}: rho_elbow_type must be 'kundu' or 'liberal' " + f"It is {rho_elbow_type} " + ) + + outputs = { + "decision_node_idx": selector.current_node_idx, + "node_label": None, + "n_echos": selector.n_echos, + "calc_cross_comp_metrics": [ + elbow_name, + "varex_upper_p", + "rho_allcomps_elbow", + "rho_unclassified_elbow", + "elbow_f05", + ], + "used_metrics": set(["kappa", "rho", "variance explained"]), + elbow_name: None, + "varex_upper_p": None, + "rho_allcomps_elbow": None, + "rho_unclassified_elbow": None, + "elbow_f05": None, + } + + if only_used_metrics: + return outputs["used_metrics"] + + if (elbow_name in selector.cross_component_metrics) and ( + elbow_name in outputs["calc_cross_comp_metrics"] ): LGR.warning( - "rho_elbow_kundu already calculated." + f"{elbow_name} already calculated." f"Overwriting previous value in {function_name_idx}" ) + if "varex_upper_p" in selector.cross_component_metrics: LGR.warning( f"varex_upper_p already calculated. Overwriting previous value in {function_name_idx}" @@ -892,18 +980,7 @@ def calc_kappa_rho_elbows_kundu( if custom_node_label: outputs["node_label"] = custom_node_label else: - if not (kappa_only ^ rho_only): - outputs["node_label"] = "Calc Kappa & Rho Elbows" - elif kappa_only: - outputs["node_label"] = "Calc Kappa Elbow" - elif rho_only: - outputs["node_label"] = "Calc Rho Elbow" - - LGR.info( - "Note: This matches the elbow selecton criteria in Kundu's MEICA v2.7" - " except there is a variance threshold that is used for the rho criteria that " - "really didn't make sense and is being excluded." - ) + outputs["node_label"] = "Calc Rho Elbow" if log_extra_info: LGR.info(log_extra_info) @@ -915,79 +992,36 @@ def calc_kappa_rho_elbows_kundu( selector.component_table, outputs["used_metrics"], function_name=function_name_idx ) - unclassified_comps2use = selectcomps2use(selector, "unclassified") - - if (not comps2use) or (not unclassified_comps2use): - if not comps2use: - # outputs["numTrue"] = 0 - # outputs["numFalse"] = 0 - log_decision_tree_step( - function_name_idx, - comps2use, - decide_comps=decide_comps, - # ifTrue=outputs["numTrue"], - # ifFalse=outputs["numFalse"], - ) - if not unclassified_comps2use: - # outputs["numTrue"] = 0 - # outputs["numFalse"] = 0 - log_decision_tree_step( - function_name_idx, - comps2use, - decide_comps="unclassified", - # ifTrue=outputs["numTrue"], - # ifFalse=outputs["numFalse"], - ) + subset_comps2use = selectcomps2use(selector, subset_decide_comps) + + if not comps2use: + log_decision_tree_step( + function_name_idx, + comps2use, + decide_comps=decide_comps, + ) else: - if calc_kappa: - outputs["kappa_elbow_kundu"] = kappa_elbow_kundu( - selector.component_table, selector.n_echos - ) - selector.cross_component_metrics["kappa_elbow_kundu"] = outputs["kappa_elbow_kundu"] - - # The first elbow used to be for rho values of the unclassified components - # excluding a few based on differences of variance. Now it's all unclassified - # components - # Upper limit for variance explained is median across components with high - # Kappa values. High Kappa is defined as Kappa above Kappa elbow. - f05, _, f01 = getfbounds(selector.n_echos) - outputs["varex_upper_p"] = np.median( - selector.component_table.loc[ - selector.component_table["kappa"] - > getelbow(selector.component_table["kappa"], return_val=True), - "variance explained", - ] + ( + outputs[elbow_name], + outputs["varex_upper_p"], + outputs["rho_allcomps_elbow"], + outputs["rho_unclassified_elbow"], + outputs["elbow_f05"], + ) = rho_elbow_kundu_liberal( + selector.component_table, + selector.n_echos, + rho_elbow_type=rho_elbow_type, + comps2use=comps2use, + subset_comps2use=subset_comps2use, ) + selector.cross_component_metrics[elbow_name] = outputs[elbow_name] selector.cross_component_metrics["varex_upper_p"] = outputs["varex_upper_p"] + selector.cross_component_metrics["rho_allcomps_elbow"] = outputs["rho_allcomps_elbow"] + selector.cross_component_metrics["rho_unclassified_elbow"] = outputs[ + "rho_unclassified_elbow" + ] + selector.cross_component_metrics["elbow_f05"] = outputs["elbow_f05"] - ncls = unclassified_comps2use.copy() - for i_loop in range(3): - temp_comptable = selector.component_table.loc[ncls].sort_values( - by=["variance explained"], ascending=False - ) - diff_vals = temp_comptable["variance explained"].diff(-1) - diff_vals = diff_vals.fillna(0) - ncls = temp_comptable.loc[diff_vals < outputs["varex_upper_p"]].index.values - # kappa_elbow was already calculated in kappa_elbow_kundu above - # kappas_nonsig = comptable.loc[comptable["kappa"] < f01, "kappa"] - # kappa_elbow = np.min( - # ( - # getelbow(kappas_nonsig, return_val=True), - # getelbow(comptable["kappa"], return_val=True), - # ) - # ) - if calc_rho: - outputs["rho_elbow_kundu"] = np.mean( - ( - getelbow(selector.component_table.loc[ncls, "rho"], return_val=True), - getelbow(selector.component_table["rho"], return_val=True), - f05, - ) - ) - selector.cross_component_metrics["rho_elbow_kundu"] = outputs["rho_elbow_kundu"] - - # print(('numTrue={}, numFalse={}, numcomps2use={}'.format( - # numTrue, numFalse, len(comps2use)))) log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs @@ -995,7 +1029,7 @@ def calc_kappa_rho_elbows_kundu( return selector -calc_kappa_rho_elbows_kundu.__doc__ = calc_kappa_rho_elbows_kundu.__doc__.format(**decision_docs) +calc_rho_elbow.__doc__ = calc_rho_elbow.__doc__.format(**decision_docs) def dec_classification_doesnt_exist( diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 35493a372..8fb4eeb66 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -332,7 +332,7 @@ def confirm_metrics_exist(component_table, necessary_metrics, function_name=None If metrics_exist is False then raise an error and end the program - Notes + Note ----- This doesn't check if there are data in each metric's column, just that the columns exist. Also, this requires identical strings for the names @@ -562,50 +562,219 @@ def getelbow(arr, return_val=False): return k_min_ind -def kappa_elbow_kundu(comptable, n_echos): +def kappa_elbow_kundu(component_table, n_echos, comps2use=None): """ Calculate an elbow for kappa using the approach originally in Prantik Kundu's MEICA v2.7 code Parameters ---------- - comptable : (C x M) :obj:`pandas.DataFrame` + component_table : (C x M) :obj:`pandas.DataFrame` Component metric table. One row for each component, with a column for each metric. The index should be the component number. Only the 'kappa' column is used in this function n_echos: :obj:`int` The number of echos in the multi-echo data + comps2use: :obj:`list[int]` + A list of component indices used to calculate the elbow + default=None which means use all components Returns ------- kappa_elbow: :obj:`float` The 'elbow' value for kappa values, above which components are considered - more likely to contain T2* weighted signals + more likely to contain T2* weighted signals. + minimum of kappa_allcomps_elbow and kappa_nonsig_elbow + kappa_allcomps_elbow: :obj:`float` + The elbow for kappa values using all components in comps2use + kappa_nonsig_elbow: :obj:`float` + The elbow for kappa values excluding kappa values above a threshold + None if there are fewer than 6 values remaining after thresholding + + Note + ---- + The kappa elbow calculation in Kundu's original meica code calculates + one elbow using all components' kappa values, one elbow excluding kappa + values above a threshold, and then selects the lower of the two thresholds. + This is replicated by setting comps2use to None or by giving a list that + includes all component numbers. If comps2use includes indices for only a + subset of components then the kappa values from just those components + will be used for both elbow calculations. """ + + # If comps2use is None then set to a list of all component numbers + if not comps2use: + comps2use = list(range(component_table.shape[0])) + kappas2use = component_table.loc[comps2use, "kappa"].to_numpy() + # low kappa threshold - f05, _, f01 = getfbounds(n_echos) + _, _, f01 = getfbounds(n_echos) # get kappa values for components below a significance threshold - kappas_nonsig = comptable.loc[comptable["kappa"] < f01, "kappa"] + kappas_nonsig = kappas2use[kappas2use < f01] - # Would an elbow from all Kappa values *ever* be lower than one from + kappa_allcomps_elbow = getelbow(kappas2use, return_val=True) + # How often would an elbow from all Kappa values ever be lower than one from # a subset of lower values? - # Note: Only use the subset of values if it includes at least 5 data point + # Note: Only use the subset of values if it includes at least 6 data points # That is enough to calculate an elbow of a curve - # This is an arbitrary threshold not from the original meica as is + # This is an arbitrary threshold not from the original meica and is # worth reconsidering at some point - if kappas_nonsig.size > 5: - kappa_elbow = np.min( - ( - getelbow(kappas_nonsig, return_val=True), - getelbow(comptable["kappa"], return_val=True), - ) - ) + if kappas_nonsig.size >= 6: + kappa_nonsig_elbow = getelbow(kappas_nonsig, return_val=True) + + kappa_elbow = np.min((kappa_nonsig_elbow, kappa_allcomps_elbow)) LGR.info(("Calculating kappa elbow based on min of all and nonsig components.")) else: - kappa_elbow = getelbow(comptable["kappa"], return_val=True) + kappa_elbow = kappa_allcomps_elbow + kappa_nonsig_elbow = None LGR.info(("Calculating kappa elbow based on all components.")) - return kappa_elbow + return kappa_elbow, kappa_allcomps_elbow, kappa_nonsig_elbow + + +def rho_elbow_kundu_liberal( + component_table, n_echos, rho_elbow_type="kundu", comps2use=None, subset_comps2use=-1 +): + """ + Calculate an elbow for rho using the approach originally in + Prantik Kundu's MEICA v2.7 code and with a slightly more + liberal threshold + + Parameters + ---------- + component_table : (C x M) :obj:`pandas.DataFrame` + Component metric table. One row for each component, with a column for + each metric. The index should be the component number. + Only the 'kappa' column is used in this function + n_echos: :obj:`int` + The number of echos in the multi-echo data + rho_elbow_type: :obj:`str` + The algorithm used to calculate the rho elbow. Current options are: + kundu (default): Method used by Kundu in MEICA v2.7. It is the mean between + the rho elbow calculated on all components and a subset of unclassificated + components with some extra quirks + liberal: Same as kundu but is the maximum of the two elbows, which will minimize + the number of components rejected by having values greater than the rho elbow + comps2use: :obj:`list[int]` + A list of component indices used to calculate the elbow + default=None which means use all components + subset_comps2use: :obj:`list[int]` + A list of component indices used to calculate the elbow + If None then only calculate a threshold using all components + default=-1 which means use only 'unclassified' components + + + Returns + ------- + rho_elbow: :obj:`float` + The 'elbow' value for rho values, above which components are considered + more likely to contain S0 weighted signals + varex_upper_p: :obj:`float` + This is the median "variance explained" across components with kappa values + greater than the kappa_elbow calculated using all components + None if subset_comps2use is None + rho_allcomps_elbow: :obj:`float` + rho elbow calculated using all components in comps2use + rho_unclassified_elbow: :obj:`float` + rho elbow clculated using all components in subset_comps2use + None if subset_comps2use is None + elbow_f05: :obj:`float` + A significant threshold based on the number of echoes. Used + as part of the mean for rho_elbow_type=='kundu' + + Note + ---- + The rho elbow calculation in Kundu's original meica code calculates + one elbow using all components' rho values, one elbow using only + unclassified components (plus some quirky stuff with high variance components), + on threshold based on the number of echos, and takes the mean of those 3 values + To replicate the original code, comps2use should include indices for all components + and subset_comps2use should includes indices for unclassified components + + Also, in practice, one of these elbows is sometimes extremely low and the + mean creates an overly agressive rho threshold (values >rho_elbow are more + likely rejected). The liberal threshold option takes the max of the two + elbows based on rho values. The assumption is that the thrshold on + unclassified components is always lower and can likely be excluded. Both + rho elbows are now logged so that it will be possible to confirm this with + data & make additional adjustments to this threshold + """ + + if rho_elbow_type not in ["kundu", "liberal"]: + raise ValueError( + f"rho_elbow_kundu_liberal: rho_elbow_type must be 'kundu' or 'liberal'" + f"It is {rho_elbow_type} " + ) + + # If comps2use is None then set to a list of all component numbers + if not comps2use: + comps2use = list(range(component_table.shape[0])) + + # If subset_comps2use is -1 then set to a list of all unclassified components + if subset_comps2use == -1: + subset_comps2use = component_table.index[ + component_table["classification"] == "unclassified" + ].tolist() + + # One rho elbow threshold set just on the number of echoes + elbow_f05, _, _ = getfbounds(n_echos) + + # One rho elbow threshold set using all componets in comps2use + rhos_comps2use = component_table.loc[comps2use, "rho"].to_numpy() + rho_allcomps_elbow = getelbow(rhos_comps2use, return_val=True) + + # low kappa threshold + # get kappa values for components below a significance threshold + # kappas_nonsig = kappas2use[kappas2use < f01] + + # Only calculate + if not subset_comps2use: + LGR.warning( + "No unclassified components for rho elbow calculation only elbow based " + "on all components is used" + ) + varex_upper_p = None + rho_unclassified_elbow = None + rho_elbow = rho_allcomps_elbow + + else: + # Calculating varex_upper_p + # Upper limit for variance explained is median across components with high + # Kappa values. High Kappa is defined as Kappa above Kappa elbow. + kappa_comps2use = component_table.loc[comps2use, "kappa"] + high_kappa_idx = list( + kappa_comps2use.index[ + kappa_comps2use + > getelbow(component_table.loc[comps2use, "kappa"], return_val=True) + ] + ) + varex_upper_p = np.median( + component_table.loc[ + high_kappa_idx, + "variance explained", + ] + ) + + # Removing large gaps in variance in the subset_comps2use before + # calculating this subset elbow threshold + for i_loop in range(3): + temp_comptable = component_table.loc[subset_comps2use].sort_values( + by=["variance explained"], ascending=False + ) + diff_vals = temp_comptable["variance explained"].diff(-1) + diff_vals = diff_vals.fillna(0) + subset_comps2use = temp_comptable.loc[diff_vals < varex_upper_p].index.values + + rho_unclassified_elbow = getelbow( + component_table.loc[subset_comps2use, "rho"], return_val=True + ) + + if rho_elbow_type == "kundu": + rho_elbow = np.mean((rho_allcomps_elbow, rho_unclassified_elbow, elbow_f05)) + else: # rho_elbow_type == 'liberal' + rho_elbow = np.maximum(rho_allcomps_elbow, rho_unclassified_elbow) + + return rho_elbow, varex_upper_p, rho_allcomps_elbow, rho_unclassified_elbow, elbow_f05 def get_extend_factor(n_vols=None, extend_factor=None): diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 8342a9cf4..723fc9b14 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -415,107 +415,158 @@ def test_dec_variance_lessthan_thresholds_smoke(): assert f"Node {selector.current_node_idx}" in selector.component_status_table -def test_calc_kappa_rho_elbows_kundu(): - """Smoke tests for calc_kappa_rho_elbows_kundu""" +def test_calc_kappa_elbow(): + """Smoke tests for calc_kappa_elbow""" - # Standard use of this function requires some components to be "unclassified" - selector = sample_selector(options="unclass") + selector = sample_selector() decide_comps = "all" - # Outputs just the metrics used in this function {"variance explained"} - used_metrics = selection_nodes.calc_kappa_rho_elbows_kundu( - selector, decide_comps, only_used_metrics=True - ) - assert len(used_metrics - {"kappa", "rho"}) == 0 + # Outputs just the metrics used in this function + used_metrics = selection_nodes.calc_kappa_elbow(selector, decide_comps, only_used_metrics=True) + assert len(used_metrics - {"kappa"}) == 0 # Standard call to this function. - selector = selection_nodes.calc_kappa_rho_elbows_kundu( + selector = selection_nodes.calc_kappa_elbow( selector, decide_comps, log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) - calc_cross_comp_metrics = {"kappa_elbow_kundu", "rho_elbow_kundu", "varex_upper_p"} + calc_cross_comp_metrics = { + "kappa_elbow_kundu", + "kappa_allcomps_elbow", + "kappa_nonsig_elbow", + } output_calc_cross_comp_metrics = set( selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] ) # Confirming the intended metrics are added to outputs and they have non-zero values assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] > 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_allcomps_elbow"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_nonsig_elbow"] > 0 - # Run warning logging code for if any of the cross_component_metrics - # already existed and would be over-written - selector = sample_selector(options="unclass") - selector.cross_component_metrics["kappa_elbow_kundu"] = 1 - selector.cross_component_metrics["rho_elbow_kundu"] = 1 - selector.cross_component_metrics["varex_upper_p"] = 1 - decide_comps = "all" - selector = selection_nodes.calc_kappa_rho_elbows_kundu( + # Using a subset of components for decide_comps. + selector = selection_nodes.calc_kappa_elbow( selector, - decide_comps, + decide_comps="accepted", log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", ) + calc_cross_comp_metrics = { + "kappa_elbow_kundu", + "kappa_allcomps_elbow", + "kappa_nonsig_elbow", + } + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 - assert selector.cross_component_metrics["kappa_elbow_kundu"] > 2 - assert selector.cross_component_metrics["rho_elbow_kundu"] > 2 - assert selector.cross_component_metrics["varex_upper_p"] > 2 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_allcomps_elbow"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_nonsig_elbow"] > 0 + + +def test_calc_rho_elbow(): + """Smoke tests for calc_rho_elbow""" - # Run with kappa_only==True selector = sample_selector(options="unclass") - selector = selection_nodes.calc_kappa_rho_elbows_kundu(selector, decide_comps, kappa_only=True) - calc_cross_comp_metrics = {"kappa_elbow_kundu", "varex_upper_p"} + decide_comps = "all" + + # Outputs just the metrics used in this function + used_metrics = selection_nodes.calc_rho_elbow(selector, decide_comps, only_used_metrics=True) + assert len(used_metrics - {"kappa", "rho", "variance explained"}) == 0 + + # Standard call to this function. + selector = selection_nodes.calc_rho_elbow( + selector, + decide_comps, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = { + "rho_elbow_kundu", + "varex_upper_p", + "rho_allcomps_elbow", + "rho_unclassified_elbow", + "elbow_f05", + } output_calc_cross_comp_metrics = set( selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] ) # Confirming the intended metrics are added to outputs and they have non-zero values assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] > 0 - assert "rho_elbow_kundu" not in selector.tree["nodes"][selector.current_node_idx]["outputs"] assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_allcomps_elbow"] > 0 + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_unclassified_elbow"] > 0 + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["elbow_f05"] > 0 - # Run with rho_only==True - selector = sample_selector(options="unclass") - selector = selection_nodes.calc_kappa_rho_elbows_kundu(selector, decide_comps, rho_only=True) - calc_cross_comp_metrics = {"rho_elbow_kundu", "varex_upper_p"} + # Standard call to this function using rho_elbow_type="liberal" + selector = selection_nodes.calc_rho_elbow( + selector, + decide_comps, + rho_elbow_type="liberal", + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + ) + calc_cross_comp_metrics = { + "rho_elbow_liberal", + "varex_upper_p", + "rho_allcomps_elbow", + "rho_unclassified_elbow", + "elbow_f05", + } output_calc_cross_comp_metrics = set( selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] ) # Confirming the intended metrics are added to outputs and they have non-zero values assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 - assert "kappa_elbow_kundu" not in selector.tree["nodes"][selector.current_node_idx]["outputs"] assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_liberal"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_allcomps_elbow"] > 0 + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_unclassified_elbow"] > 0 + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["elbow_f05"] > 0 - # Should run normally with both kappa_only and rho_only==True - selector = sample_selector(options="unclass") - selector = selection_nodes.calc_kappa_rho_elbows_kundu( - selector, decide_comps, kappa_only=True, rho_only=True + # Using a subset of components for decide_comps. + selector = selection_nodes.calc_rho_elbow( + selector, + decide_comps=["accepted", "unclassified"], + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", ) - calc_cross_comp_metrics = {"kappa_elbow_kundu", "rho_elbow_kundu", "varex_upper_p"} + calc_cross_comp_metrics = { + "rho_elbow_kundu", + "varex_upper_p", + "rho_allcomps_elbow", + "rho_unclassified_elbow", + "elbow_f05", + } output_calc_cross_comp_metrics = set( selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] ) # Confirming the intended metrics are added to outputs and they have non-zero values assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] > 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 - - # Log without running if no components of class decide_comps or no components - # classified as "unclassified" are in the component table - selector = sample_selector() - selector = selection_nodes.calc_kappa_rho_elbows_kundu(selector, "NotAClassification") - calc_cross_comp_metrics = {"kappa_elbow_kundu", "rho_elbow_kundu", "varex_upper_p"} + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_allcomps_elbow"] > 0 assert ( - selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] is None + selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_unclassified_elbow"] > 0 ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] is None - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] is None + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["elbow_f05"] > 0 + + with pytest.raises(ValueError): + selection_nodes.calc_rho_elbow(selector, decide_comps, rho_elbow_type="perfect") def test_calc_median_smoke(): diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index a9eb1def6..e4d6f3c1a 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -352,13 +352,115 @@ def test_kappa_elbow_kundu_smoke(): component_table = sample_component_table() - kappa_elbow = selection_utils.kappa_elbow_kundu(component_table, n_echos=3) - assert isinstance(kappa_elbow, float) + # Normal execution. With n_echoes==5 a few components will be excluded for the nonsig threshold + ( + kappa_elbow_kundu, + kappa_allcomps_elbow, + kappa_nonsig_elbow, + ) = selection_utils.kappa_elbow_kundu(component_table, n_echos=5) + assert isinstance(kappa_elbow_kundu, float) + assert isinstance(kappa_allcomps_elbow, float) + assert isinstance(kappa_nonsig_elbow, float) # For the sample component_table, when n_echos=6, there are fewer than 5 components # that are greater than an f01 threshold and a different condition in kappa_elbow_kundu is run - kappa_elbow = selection_utils.kappa_elbow_kundu(component_table, n_echos=6) - assert isinstance(kappa_elbow, float) + ( + kappa_elbow_kundu, + kappa_allcomps_elbow, + kappa_nonsig_elbow, + ) = selection_utils.kappa_elbow_kundu(component_table, n_echos=6) + assert isinstance(kappa_elbow_kundu, float) + assert isinstance(kappa_allcomps_elbow, float) + assert isinstance(kappa_nonsig_elbow, type(None)) + + # Run using only a subset of components + ( + kappa_elbow_kundu, + kappa_allcomps_elbow, + kappa_nonsig_elbow, + ) = selection_utils.kappa_elbow_kundu( + component_table, + n_echos=5, + comps2use=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 17, 18, 20], + ) + assert isinstance(kappa_elbow_kundu, float) + assert isinstance(kappa_allcomps_elbow, float) + assert isinstance(kappa_nonsig_elbow, float) + + +def test_rho_elbow_kundu_liberal_smoke(): + """A smoke test for the rho_elbow_kundu_liberal function""" + + component_table = sample_component_table(options="unclass") + # Normal execution with default kundu threshold + ( + rho_elbow_kundu, + varex_upper_p, + rho_allcomps_elbow, + rho_unclassified_elbow, + elbow_f05, + ) = selection_utils.rho_elbow_kundu_liberal(component_table, n_echos=3) + assert isinstance(rho_elbow_kundu, float) + assert isinstance(varex_upper_p, float) + assert isinstance(rho_allcomps_elbow, float) + assert isinstance(rho_unclassified_elbow, float) + assert isinstance(elbow_f05, float) + + # Normal execution with liberal threshold + ( + rho_elbow_kundu, + varex_upper_p, + rho_allcomps_elbow, + rho_unclassified_elbow, + elbow_f05, + ) = selection_utils.rho_elbow_kundu_liberal( + component_table, n_echos=3, rho_elbow_type="liberal" + ) + assert isinstance(rho_elbow_kundu, float) + assert isinstance(varex_upper_p, float) + assert isinstance(rho_allcomps_elbow, float) + assert isinstance(rho_unclassified_elbow, float) + assert isinstance(elbow_f05, float) + + # Run using only a subset of components + ( + rho_elbow_kundu, + varex_upper_p, + rho_allcomps_elbow, + rho_unclassified_elbow, + elbow_f05, + ) = selection_utils.rho_elbow_kundu_liberal( + component_table, + n_echos=3, + rho_elbow_type="kundu", + comps2use=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 17, 18, 20], + subset_comps2use=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 18, 20], + ) + assert isinstance(rho_elbow_kundu, float) + assert isinstance(varex_upper_p, float) + assert isinstance(rho_allcomps_elbow, float) + assert isinstance(rho_unclassified_elbow, float) + assert isinstance(elbow_f05, float) + + # Run with no unclassified components and thus subset_comps2use is empty + component_table = sample_component_table() + ( + rho_elbow_kundu, + varex_upper_p, + rho_allcomps_elbow, + rho_unclassified_elbow, + elbow_f05, + ) = selection_utils.rho_elbow_kundu_liberal(component_table, n_echos=3) + assert isinstance(rho_elbow_kundu, float) + assert isinstance(varex_upper_p, type(None)) + assert isinstance(rho_allcomps_elbow, float) + assert isinstance(rho_unclassified_elbow, type(None)) + assert isinstance(elbow_f05, float) + + with pytest.raises(ValueError): + selection_utils.rho_elbow_kundu_liberal( + component_table, n_echos=3, rho_elbow_type="perfect" + ) def test_get_extend_factor_smoke(): From d1366ad83e801e225484da01cf8f59b6cde27060 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 15 Sep 2022 16:57:35 -0400 Subject: [PATCH 025/177] Enable tedana_reclassify as console script --- pyproject.toml | 5 +++++ setup.cfg | 1 + tedana/workflows/tedana_reclassify.py | 3 +-- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d71606108..68e0c7b1c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,3 +26,8 @@ exclude = ''' [tool.isort] profile = "black" multi_line_output = 3 + +[project.scripts] +tedana = "tedana.workflows.tedana:_main" +tedana_reclassify = "tedana.workflows.tedana_reclassify:main" + diff --git a/setup.cfg b/setup.cfg index 8a817bab7..516b0ed5c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -61,6 +61,7 @@ all = console_scripts = t2smap = tedana.workflows.t2smap:_main tedana = tedana.workflows.tedana:_main + tedana_reclassify = tedana.workflows.tedana_reclassify:main [options.package_data] * = diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 20eb11e53..0b595f664 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -21,13 +21,12 @@ def main(): - from ..info import __version__ + from tedana import __version__ verstr = "tedana v{}".format(__version__) parser = argparse.ArgumentParser() parser.add_argument( "registry", - dest="registry", help="File registry from a previous tedana run", ) parser.add_argument( From b65b13322db44d226778256252eed5f683b7f41d Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 22 Sep 2022 15:23:57 -0400 Subject: [PATCH 026/177] No errors if no xcomp but also no decide_comps (#16) --- tedana/selection/selection_nodes.py | 47 ++++++++++++++++++++++------ tedana/tests/test_selection_nodes.py | 31 +++++++++++++++++- 2 files changed, 67 insertions(+), 11 deletions(-) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 6bfdf18fb..54886188a 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1245,16 +1245,31 @@ def calc_varex_thresh( f"{perc_name} already calculated. Overwriting previous value in {function_name_idx}" ) + comps2use = selectcomps2use(selector, decide_comps) + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + if num_lowest_var_comps is not None: if isinstance(num_lowest_var_comps, str): if num_lowest_var_comps in selector.cross_component_metrics: num_lowest_var_comps = selector.cross_component_metrics[num_lowest_var_comps] + elif not comps2use: + # Note: It is possible the comps2use requested for this function + # is not empty, but the comps2use requested to calcualte + # {num_lowest_var_comps} was empty. Given the way this node is + # used, that's unlikely, but worth a comment. + LGR.info( + f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) " + "is not in selector.cross_component_metrics, but no components with " + f"{decide_comps} remain by this node so nothing happens" + ) else: raise ValueError( f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) " "is not in selector.cross_component_metrics" ) - if not isinstance(num_lowest_var_comps, int): + if not isinstance(num_lowest_var_comps, int) and comps2use: raise ValueError( f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) " "is used as an array index and should be an integer" @@ -1270,11 +1285,6 @@ def calc_varex_thresh( if log_extra_report: RepLGR.info(log_extra_report) - comps2use = selectcomps2use(selector, decide_comps) - confirm_metrics_exist( - selector.component_table, outputs["used_metrics"], function_name=function_name_idx - ) - if not comps2use: log_decision_tree_step( function_name_idx, @@ -1650,6 +1660,11 @@ def calc_revised_meanmetricrank_guesses( unclear how much the relative magnitudes will change and when the recalculation will affect results, but this was in the original kundu tree and will be replicated here to allow for comparisions + + This also hard-codes for kappa_elbow_kundu and rho_elbow_kundu in + the cross component metrics. If someone decides to keep using + this function with other elbow thresholds, the code would need to + be altered to account for that """ function_name_idx = f"Step {selector.current_node_idx}: calc_revised_meanmetricrank_guesses" @@ -1702,12 +1717,24 @@ def calc_revised_meanmetricrank_guesses( "cause problems since these are only calculated on a subset of components" ) + comps2use = selectcomps2use(selector, decide_comps) + confirm_metrics_exist( + selector.component_table, outputs["used_metrics"], function_name=function_name_idx + ) + for xcompmetric in outputs["used_cross_component_metrics"]: if xcompmetric not in selector.cross_component_metrics: - raise ValueError( - f"{xcompmetric} not in cross_component_metrics. " - f"It needs to be calculated before {function_name_idx}" - ) + if not comps2use: + LGR.info( + f"{function_name_idx}: {xcompmetric} is not in " + "selector.cross_component_metrics, but no components with " + f"{decide_comps} remain by this node so nothing happens" + ) + else: + raise ValueError( + f"{xcompmetric} not in cross_component_metrics. " + f"It needs to be calculated before {function_name_idx}" + ) if custom_node_label: outputs["node_label"] = custom_node_label diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 723fc9b14..bea20ea62 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -812,6 +812,22 @@ def test_calc_varex_thresh_smoke(): num_lowest_var_comps="NotACrossCompMetric", ) + # Do not raise error if num_lowest_var_comps is a string & not in cross_component_metrics, + # but decide_comps doesn't select any components + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps="NoComponents", + thresh_label="new_lower", + percentile_thresh=25, + num_lowest_var_comps="NotACrossCompMetric", + ) + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_new_lower_thresh"] + is None + ) + # percentile_thresh doesn't depend on components and is assigned + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["new_lower_perc"] == 25 + # Raise error if num_lowest_var_comps is not an integer with pytest.raises(ValueError): selector = selection_nodes.calc_varex_thresh( @@ -833,7 +849,7 @@ def test_calc_varex_thresh_smoke(): ) # Run warning logging code to see if any of the cross_component_metrics - # already existed and would be over-written + # already exists and would be over-written selector = sample_selector(options="provclass") selector.cross_component_metrics["varex_upper_thresh"] = 1 selector.cross_component_metrics["upper_perc"] = 1 @@ -1124,3 +1140,16 @@ def test_calc_revised_meanmetricrank_guesses_smoke(): selector = selection_nodes.calc_revised_meanmetricrank_guesses( selector, ["provisional accept", "provisional reject", "unclassified"] ) + + # Do not raise error if kappa_elbow_kundu isn't in cross_component_metrics + # and there are no components in decide_comps + selector = sample_selector("provclass") + selector.cross_component_metrics["rho_elbow_kundu"] = 15.2 + + selector = selection_nodes.calc_revised_meanmetricrank_guesses( + selector, decide_comps="NoComponents" + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["num_acc_guess"] is None + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["conservative_guess"] is None + ) From df9afbe5235ebda9a76b98afe139c2db949aa5bc Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 16 Nov 2022 12:52:02 -0500 Subject: [PATCH 027/177] Update tedana/io.py Co-authored-by: Taylor Salo --- tedana/io.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index 4691dd726..8eba1da76 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -28,8 +28,7 @@ class CustomEncoder(json.JSONEncoder): """Convert some types because of JSON serialization and numpy incompatibilities - # noqa: E501 - See here: https://stackoverflow.com/questions/50916422/python-typeerror-object-of-type-int64-is-not-json-serializable/50916741 + See here: https://stackoverflow.com/q/50916422/2589328 """ def default(self, obj): From 2c9dd197563d548d732ab44af699d6b45ce91fee Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 16 Nov 2022 17:00:33 -0500 Subject: [PATCH 028/177] Appease style checker --- tedana/workflows/tedana_reclassify.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 895d66744..b7c68f1bc 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -365,9 +365,9 @@ def post_tedana( # Collect BibTeX entries for cited papers references = get_description_references(report) - + with open(bibtex_file, "w") as fo: - fo.write(references) + fo.write(references) if not no_reports: LGR.info("Making figures folder with static component maps and timecourse plots.") From 9ae1b90f21c522fcf9fc677f0a5156447f9d0434 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 16 Nov 2022 17:04:10 -0500 Subject: [PATCH 029/177] Appease the style checker? --- tedana/selection/selection_nodes.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 54886188a..d9d7ada39 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -13,12 +13,11 @@ confirm_metrics_exist, get_extend_factor, kappa_elbow_kundu, - rho_elbow_kundu_liberal, log_decision_tree_step, + rho_elbow_kundu_liberal, selectcomps2use, ) - # from scipy import stats From df7048afba603dba3e4c994d58f958e1af39ff15 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 16 Nov 2022 18:00:35 -0500 Subject: [PATCH 030/177] Force to use up to date setuptools; installation bug otherwise --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4d4a2d768..2c751976d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ tedana = "tedana.workflows.tedana:_main" tedana_reclassify = "tedana.workflows.tedana_reclassify:main" [build-system] -requires = ["setuptools==58.2.0", "wheel"] +requires = ["setuptools>=64", "wheel"] [tool.black] line-length = 99 From e60bb8162148a274d18ace1d3e90a3bb0b662513 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 16 Nov 2022 18:14:32 -0500 Subject: [PATCH 031/177] Remove out of date make entry --- Makefile | 3 --- 1 file changed, 3 deletions(-) diff --git a/Makefile b/Makefile index 2c0e026ce..ba8b71886 100644 --- a/Makefile +++ b/Makefile @@ -23,9 +23,6 @@ three-echo: four-echo: @py.test --log-cli-level=INFO --cov-append --cov-report term-missing --cov=tedana -k test_integration_four_echo tedana/tests/test_integration.py -four-echo: - @py.test --cov-append --cov-report term-missing --cov=tedana -k test_integration_four_echo tedana/tests/test_integration.py - five-echo: @py.test --log-cli-level=INFO --cov-append --cov-report term-missing --cov=tedana -k test_integration_five_echo tedana/tests/test_integration.py From 7073604d3e0b620e6e9e4040bdf77adb9909d80f Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 17 Nov 2022 10:21:33 -0500 Subject: [PATCH 032/177] Create functional reclassify CLI --- tedana/workflows/tedana_reclassify.py | 42 +++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index b7c68f1bc..bd6012152 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -23,7 +23,7 @@ def main(): from tedana import __version__ - verstr = "tedana v{}".format(__version__) + verstr = "tedana_reclassify v{}".format(__version__) parser = argparse.ArgumentParser() parser.add_argument( "registry", @@ -47,6 +47,7 @@ def main(): "--config", dest="config", help="File naming configuration. Default auto (prepackaged).", + default="auto", ) parser.add_argument( "--out-dir", @@ -118,9 +119,24 @@ def main(): ) parser.add_argument("-v", "--version", action="version", version=verstr) - parser.parse_args() + args = parser.parse_args() # Run post-tedana + post_tedana( + args.registry, + accept=args.manual_accept, + reject=args.manual_reject, + out_dir=args.out_dir, + config=args.config, + convention=args.convention, + tedort=args.tedort, + mir=args.mir, + no_reports=args.no_reports, + png_cmap=args.png_cmap, + force=args.force, + debug=args.debug, + quiet=args.quiet, + ) def post_tedana( @@ -193,8 +209,20 @@ def post_tedana( os.mkdir(out_dir) # Check that there is no overlap in accepted/rejected components - acc = set(accept) - rej = set(reject) + if accept: + acc = set(accept) + else: + acc = () + if reject: + rej = set(reject) + else: + rej = () + + if (not accept) and (not reject): + raise ValueError( + 'Must manually accept or reject at least one component' + ) + in_both = [] for a in acc: if a in rej: @@ -259,8 +287,10 @@ def post_tedana( previous_tree_fname, comptable, cross_component_metrics=xcomp, status_table=status_table ) - selector.add_manual(accept, "accepted") - selector.add_manual(reject, "rejected") + if accept: + selector.add_manual(accept, "accepted") + if reject: + selector.add_manual(reject, "rejected") selector.select() comptable = selector.component_table From 37456d22563e34b05206a05e0d38de85ff352039 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 17 Nov 2022 10:54:36 -0500 Subject: [PATCH 033/177] Replace blanks with n/a --- tedana/io.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tedana/io.py b/tedana/io.py index 8eba1da76..15aa0b7d6 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -307,7 +307,9 @@ def save_tsv(self, data, name): data_type = type(data) if not isinstance(data, pd.DataFrame): raise TypeError(f"data must be pd.Data, not type {data_type}.") - data.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) + # Replace blanks with numpy NaN + deblanked = data.replace('', np.nan) + deblanked.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) def save_self(self): fname = self.save_file(self.registry, "registry json") From ae150757a5933c10f9b350d88384ec784d851a7d Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 17 Nov 2022 11:02:28 -0500 Subject: [PATCH 034/177] Maybe appease black --- tedana/workflows/tedana_reclassify.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index bd6012152..8e547d365 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -219,9 +219,7 @@ def post_tedana( rej = () if (not accept) and (not reject): - raise ValueError( - 'Must manually accept or reject at least one component' - ) + raise ValueError('Must manually accept or reject at least one component') in_both = [] for a in acc: From cce4d3753ba789ef8e2cc1594df7d1e604487ff0 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 18 Nov 2022 17:57:19 -0500 Subject: [PATCH 035/177] Fix typo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Eneko Uruñuela --- docs/building decision trees.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index ce71d3ad0..b3586c6a0 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -193,7 +193,7 @@ In addition to the intermediate and default ("accepted" "rejected" "unclassified for functions that should be applied to all components regardless of their classifications Most decision functions also include "ifTrue" and "ifFalse" which specify how to change the classification of each component -based on whether a the decision criterion is true or also. In addition to the default and intermediate classification options, +based on whether a the decision criterion is true or false. In addition to the default and intermediate classification options, this can also be "nochange" (i.e. For components where a>b is true, "reject". For components where a>b is false, "nochange"). The optional parameters "tag_ifTrue" and "tag_ifFalse" define the classification tags to be assigned to components. Currently, the only exception is "manual_classify" which uses "new_classification" to designate the new component classification @@ -218,7 +218,7 @@ examples for how to meet these expectations. Create a dictionary called "outputs" that includes key fields that should be recorded. The following line should be at the end of each function ``selector.nodes[selector.current_node_idx]["outputs"] = outputs`` -Additional fields can be used to log funciton-specific information, but the following fields are common and may be used by other parts of the code: +Additional fields can be used to log function-specific information, but the following fields are common and may be used by other parts of the code: - "decision_node_idx" (required): the ordered index for the current function in the decision tree. - "node_label" (required): A decriptive label for what happens in the node. @@ -233,7 +233,7 @@ This will be useful to gather all metrics a tree will use without requiring a sp Existing functions define ``function_name_idx = f"Step {selector.current_node_idx}: [text of function_name]`` This is used in logging and is cleaner to initialize near the top of each function. -Each function has code that creates a default node label in ``outputs["node_label"]``. The default node lable +Each function has code that creates a default node label in ``outputs["node_label"]``. The default node label may be used in decision tree visualization so it should be relatively short. Within this section, if there is a user-provided custom_node_label, that should be used instead. From 2622ba57b44a773e1c8953daa68812848cb72ba3 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 18 Nov 2022 18:06:19 -0500 Subject: [PATCH 036/177] BIDSify some outputs --- tedana/resources/config/outputs.json | 4 ++-- tedana/tests/data/cornell_three_echo_outputs.txt | 4 ++-- tedana/tests/data/fiu_four_echo_outputs.txt | 2 +- tedana/tests/data/nih_five_echo_outputs_verbose.txt | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tedana/resources/config/outputs.json b/tedana/resources/config/outputs.json index 6cd65f6f0..3e185b887 100644 --- a/tedana/resources/config/outputs.json +++ b/tedana/resources/config/outputs.json @@ -165,7 +165,7 @@ }, "PCA cross component metrics json": { "orig": "pca_cross_component_metrics", - "bidsv1.5.0": "desc-PCA_cross_component_metrics" + "bidsv1.5.0": "desc-PCACrossComponent_metrics" }, "ICA decomposition json": { "orig": "ica_decomposition", @@ -193,7 +193,7 @@ }, "ICA cross component metrics json": { "orig": "ica_cross_component_metrics", - "bidsv1.5.0": "desc-ICA_cross_component_metrics" + "bidsv1.5.0": "desc-ICACrossComponent_metrics" }, "ICA status table tsv": { "orig": "ica_status_table", diff --git a/tedana/tests/data/cornell_three_echo_outputs.txt b/tedana/tests/data/cornell_three_echo_outputs.txt index ab56a9cb3..821f07b8e 100644 --- a/tedana/tests/data/cornell_three_echo_outputs.txt +++ b/tedana/tests/data/cornell_three_echo_outputs.txt @@ -8,12 +8,12 @@ desc-ICA_decomposition.json desc-tedana_metrics.json desc-tedana_metrics.tsv desc-tedana_registry.json -desc-ICA_cross_component_metrics.json +desc-ICACrossComponent_metrics.json desc-ICA_status_table.tsv desc-ICA_decision_tree.json desc-ICA_mixing.tsv desc-ICA_stat-z_components.nii.gz -desc-PCA_cross_component_metrics.json +desc-PCACrossComponent_metrics.json desc-PCA_decomposition.json desc-PCA_metrics.json desc-PCA_metrics.tsv diff --git a/tedana/tests/data/fiu_four_echo_outputs.txt b/tedana/tests/data/fiu_four_echo_outputs.txt index 829f910db..77dc5869e 100644 --- a/tedana/tests/data/fiu_four_echo_outputs.txt +++ b/tedana/tests/data/fiu_four_echo_outputs.txt @@ -11,7 +11,7 @@ desc-ICA_decomposition.json desc-tedana_metrics.json desc-tedana_metrics.tsv desc-tedana_registry.json -desc-ICA_cross_component_metrics.json +desc-ICACrossComponent_metrics.json desc-ICA_status_table.tsv desc-ICA_decision_tree.json desc-ICAS0_stat-F_statmap.nii.gz diff --git a/tedana/tests/data/nih_five_echo_outputs_verbose.txt b/tedana/tests/data/nih_five_echo_outputs_verbose.txt index 217f4aac0..948487065 100644 --- a/tedana/tests/data/nih_five_echo_outputs_verbose.txt +++ b/tedana/tests/data/nih_five_echo_outputs_verbose.txt @@ -10,7 +10,7 @@ desc-ICA_decomposition.json desc-tedana_metrics.json desc-tedana_metrics.tsv desc-tedana_registry.json -desc-ICA_cross_component_metrics.json +desc-ICACrossComponent_metrics.json desc-ICA_status_table.tsv desc-ICA_decision_tree.json desc-ICAS0_stat-F_statmap.nii.gz From 12debd3642982180760a1be933e547fe819d9093 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 18 Nov 2022 18:09:35 -0500 Subject: [PATCH 037/177] Appease black --- tedana/io.py | 2 +- tedana/workflows/tedana_reclassify.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index 15aa0b7d6..878378193 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -308,7 +308,7 @@ def save_tsv(self, data, name): if not isinstance(data, pd.DataFrame): raise TypeError(f"data must be pd.Data, not type {data_type}.") # Replace blanks with numpy NaN - deblanked = data.replace('', np.nan) + deblanked = data.replace("", np.nan) deblanked.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) def save_self(self): diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 8e547d365..af777f669 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -219,7 +219,7 @@ def post_tedana( rej = () if (not accept) and (not reject): - raise ValueError('Must manually accept or reject at least one component') + raise ValueError("Must manually accept or reject at least one component") in_both = [] for a in acc: From 9f5982414faab0ba500758bfbba9e944c837f3fa Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 21 Nov 2022 11:57:33 -0500 Subject: [PATCH 038/177] Heavily revise ComponentSelector module docs --- docs/conf.py | 2 +- tedana/selection/ComponentSelector.py | 154 ++++++++++++-------------- 2 files changed, 69 insertions(+), 87 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 4fd0d016d..434d50964 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -42,6 +42,7 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ + "sphinx.ext.napoleon", "matplotlib.sphinxext.plot_directive", "sphinx.ext.autodoc", "sphinx.ext.autosummary", @@ -49,7 +50,6 @@ "sphinx.ext.ifconfig", "sphinx.ext.intersphinx", "sphinx.ext.linkcode", - "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinxarg.ext", "sphinxcontrib.bibtex", # for foot-citations diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 2b8f377c8..90a0bf469 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -47,36 +47,40 @@ def load_config(tree): ------- tree : :obj:`dict` A validated decision tree for the component selection process. - The `dict` has several required fields to describe the entire tree - `tree_id`: :obj:`str` The name of the tree - `info`: :obj:`str` A brief description of the tree for info logging - `report`: :obj:`str` - A narrative description of the tree that could be used in report logging - `refs`: :obj:`str` Publications that should be referenced, when this tree is used - `necessary_metrics`: :obj:`list[str]` - The metrics in `component_table` that will be used by this tree - `intermediate_classifications`: :obj:`list[str]` - User specified component classification labels. 'accepted', 'rejected', and - 'unclassified' are defaults that don't need to be included here - `classification_tags`: :obj:`list[str]` - Descriptive labels that can be used to explain why a component was accepted or rejected. - For example, ["Likely BOLD","Low variance"] - `nodes`: :obj:`list[dict]` Each dictionary includes the information - to run one node in the decision tree. Each node should either be able - to change component classifications (function names starting with dec_) - or calculate values using information from multiple components - (function names starting with calc_) - nodes includes: - `functionname`: :obj:`str` The name of the function to be called - `parameters`: :obj:`dict` Required parameters for the function - The only parameter that is used in all functions is `decidecomps`, - which are the component classifications the function should run on. - Most dec_ functions also include `ifTrue` and `ifFalse` which - define how to to change the classification of a component if the - criteria in the function is true or false. - `kwargs`: :obj:`dict` Optional parameters for the function """ +# Formerly used text +# The `dict` has several required fields to describe the entire tree +# - `tree_id`: :obj:`str` The name of the tree +# - `info`: :obj:`str` A brief description of the tree for info logging +# - `report`: :obj:`str` +# - A narrative description of the tree that could be used in report logging +# - `refs`: :obj:`str` Publications that should be referenced, when this tree is used +# - `necessary_metrics`: :obj:`list[str]` +# - The metrics in `component_table` that will be used by this tree +# - `intermediate_classifications`: :obj:`list[str]` +# - User specified component classification labels. 'accepted', 'rejected', and +# - 'unclassified' are defaults that don't need to be included here +# - `classification_tags`: :obj:`list[str]` +# - Descriptive labels that can be used to explain why a component was accepted or rejected. +# - For example, ["Likely BOLD","Low variance"] +# - `nodes`: :obj:`list[dict]` Each dictionary includes the information +# +# to run one node in the decision tree. Each node should either be able +# to change component classifications (function names starting with ``dec_``) +# or calculate values using information from multiple components +# (function names starting with ``calc_``) +# nodes includes: +# - `functionname`: :obj:`str` The name of the function to be called +# - `parameters`: :obj:`dict` Required parameters for the function +# The only parameter that is used in all functions is `decidecomps`, +# which are the component classifications the function should run on. +# Most ``dec_`` functions also include `ifTrue` and `ifFalse` which +# define how to to change the classification of a component if the +# criteria in the function is true or false. +# +# - `kwargs`: :obj:`dict` Optional parameters for the function + if tree in DEFAULT_TREES: fname = op.join(get_resource_path(), "decision_trees", tree + ".json") else: @@ -273,8 +277,22 @@ class ComponentSelector: If None, then look for `tree` within ./selection/data in the tedana code directory. default=None - Additional Parameters - --------------------- + + Returns + ------- + component_table : :obj:`pandas.DataFrame` + Updated component table with two extra columns. + cross_component_metrics : :obj:`Dict` + Metrics that are each a single value calculated across components. + component_status_table : :obj:`pandas.DataFrame` + A table tracking the status of each component at each step. + nodes : :obj:`list[dict]` + Nodes used in decision tree. + current_node_idx : :obj:`int` + The index for the current node, which should be the last node in the decision tree. + + Notes + ----- Any parameter that is used by a decision tree node function can be passed as a parameter of ComponentSelector class initialization function or can be included in the json file that defines the decision tree. If a parameter @@ -288,53 +306,6 @@ class ComponentSelector: Number of echos in multi-echo fMRI data n_vols: :obj:`int` Number of volumes (time points) in the fMRI data - - - Returns - ------- - component_table : :obj:`pandas.DataFrame` - Updated component table with two extra columns: - classifications : :obj:`str` (i.e., accepted, rejected) for each component - classification_tags : :obj:`list[str]` descriptions explaining reasons for classification - cross_component_metrics : :obj:`Dict` - Metrics that are each a single value calculated across - components. For example, kappa and rho. - component_status_table : :obj:`pandas.DataFrame` - A dataframe where each column lists the classification status of - each component after each node was run - Information that was stored in the tree json file. This includes: - tree, classification_tags, intermediate_classifications, necessary_metrics - nodes : :obj:`list[dict]` - Nodes used in decision tree. This includes the decision tree dict - from the json file in the `tree` input. For every element in the list - there is an added dict key `outputs` which includes key information from - when the function was run. Some of this information is function-specific, - but there are common elements across most or all: - decison_node_idx : :obj:`int` - The decision tree functions are run as part of an ordered list. - This is the positional index for when this function was run - as part of this list. - used_metrics : :obj:`list[str]` - A list of the metrics used in a node of the decision tree - used_cross_component_metrics : :obj:`list[str]` - A list of cross component metrics used in the node of a decision tree - node_label : :obj:`str` - A brief label for what happens in this node that can be used in a decision - tree summary table or flow chart. - numTrue, numFalse : :obj:`int` - For decision (dec_) functions, the number of components that were classified - as true or false respectively in this decision tree step. - calc_cross_comp_metrics : :obj:`list[str]` - For calculation (calc_) functions, cross component metrics that were - calculated in this function. When this is included, each of those - metrics and the calculated values are also distinct keys in 'outputs'. - While cross_component_metrics does not include where each component - was calculated, that information is stored here. - current_node_idx : :obj:`int` - The index for the current node, which should be the last node in the decision tree - - Notes - ----- """ def __init__(self, tree, component_table, cross_component_metrics={}, status_table=None): @@ -348,22 +319,27 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab selector = ComponentSelector(tree, comptable, n_echos=n_echos, n_vols=n_vols) - Returns - ------- - The class structure with the following fields loaded from tree: - nodes, necessary_metrics, intermediate_classificaitons, - classification_tags, + Notes + ----- + The structure has the following fields loaded from tree: + + - nodes + - necessary_metrics + - intermediate_classifications + - classification_tags + Adds to the class structure: - component_status_table: empty dataframe - cross_component_metrics: empty dict - used_metrics: empty set + + - component_status_table: empty dataframe + - cross_component_metrics: empty dict + - used_metrics: empty set """ self.tree_name = tree self.__dict__.update(cross_component_metrics) self.cross_component_metrics = cross_component_metrics - """Construct an un-executed selector""" + # Construct an un-executed selector self.component_table = component_table.copy() # To run a decision tree, each component needs to have an initial classification @@ -557,19 +533,23 @@ def are_all_components_accepted_or_rejected(self): @property def n_comps(self): + """The number of components in the component table.""" return len(self.component_table) @property def n_bold_comps(self): + """The number of components that are considered bold-weighted.""" ct = self.component_table return len(ct[ct.classification == "accepted"]) @property def accepted_comps(self): + """The number of components that are accepted.""" return self.component_table["classification"] == "accepted" @property def rejected_comps(self): + """The number of components that are rejected.""" return self.component_table["classification"] == "rejected" @property @@ -579,10 +559,12 @@ def is_final(self): @property def mixing(self): + """The mixing matrix used to generate the components being decided upon.""" return self.mixing_matrix @property def oc_data(self): + """The optimally combined data being used for this tree.""" return self.oc_data def to_files(self, io_generator): From 7a5ffff72592d40583cbe559c94e6b030f15c786 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Wed, 23 Nov 2022 08:11:01 -0500 Subject: [PATCH 039/177] Fixing mid kappa A inconsistency (#17) * Output codes in kundu.json * fixed kappa ratio * Update tedana/selection/selection_nodes.py Co-authored-by: Joshua Teves * minimal tree keep kappa>2rho Co-authored-by: Joshua Teves --- tedana/resources/decision_trees/kundu.json | 34 ++++++++++++-------- tedana/resources/decision_trees/minimal.json | 4 +-- tedana/selection/selection_nodes.py | 12 ++++--- 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 839b11851..98244bb4d 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -53,7 +53,8 @@ "log_extra_info": "Reject if Kappa>Rho", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" - } + }, + "_comment": "Code I002 in premodularized tedana" }, { "functionname": "dec_left_op_right", @@ -72,7 +73,8 @@ "log_extra_info": "Reject if countsig_in S0clusters > T2clusters & countsig_in_T2clusters>0", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" - } + }, + "_comment": "Code I003 in premodularized tedana" }, { "functionname": "calc_median", @@ -99,7 +101,8 @@ "log_extra_info": "Reject if DICE S0>T2 & varex>median", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" - } + }, + "_comment": "Code I004 in premodularized tedana" }, { "functionname": "dec_left_op_right", @@ -118,7 +121,8 @@ "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0 & varex>median", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" - } + }, + "_comment": "Code I005 in premodularized tedana" }, { "functionname": "calc_kappa_elbow", @@ -190,7 +194,8 @@ "tag_ifTrue": "No provisional accept", "log_extra_info": "If nothing is provisionally accepted by this point, be conservative and accept everything", "log_extra_report": "" - } + }, + "_comment": "Code I006 in premodularized tedana" }, { "functionname": "calc_varex_thresh", @@ -260,7 +265,7 @@ "right2": "varex_upper_thresh", "log_extra_info": "If variance and d_table_scores are high, then reject" }, - "_comment": "One of several steps that makes it more likely to reject high variance components" + "_comment": "Code I007 in premodularized tedana. One of several steps that makes it more likely to reject high variance components" }, { "functionname": "dec_left_op_right", @@ -284,7 +289,8 @@ "left3": "kappa", "right3": "kappa_elbow_kundu", "log_extra_info": "If low variance, accept even if bad kappa & d_table_scores" - } + }, + "_comment": "Code I008 in premodularized tedana" }, { "functionname": "dec_classification_doesnt_exist", @@ -300,7 +306,8 @@ "tag_ifTrue": "Likely BOLD", "log_extra_info": "If nothing left is unclassified, then accept all", "log_extra_report": "" - } + }, + "_comment": "No code in premodularized tedana" }, { "functionname": "calc_revised_meanmetricrank_guesses", @@ -338,7 +345,7 @@ "right3_scale": "extend_factor", "log_extra_info": "Reject if a combination of kappa, variance, and other factors are ranked worse than others" }, - "_comment": "Quirky combination 1 of a bunch of metrics that deal with rejecting some edge cases" + "_comment": "Code I009 in premodularized tedana. Quirky combination 1 of a bunch of metrics that deal with rejecting some edge cases" }, { "functionname": "dec_left_op_right", @@ -362,7 +369,7 @@ "right2_scale": "extend_factor", "log_extra_info": "Reject if a combination of variance and ranks of other metrics are worse than others" }, - "_comment": "Quirky combination 2 of a bunch of metrics that deal with rejecting some edge cases" + "_comment": "Code I010 in premodularized tedana. Quirky combination 2 of a bunch of metrics that deal with rejecting some edge cases" }, { "functionname": "calc_varex_thresh", @@ -399,7 +406,7 @@ "right2": "varex_new_lower_thresh", "log_extra_info": "Accept components with a bad d_table_score, but are at the higher end of the remaining variance so more cautious to not remove" }, - "_comment": "Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh would be lower than it is here. If there are differences in results, might be worth adding a scaling factor" + "_comment": "Code I011 in premodularized tedana. Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh would be lower than it is here. If there are differences in results, might be worth adding a scaling factor" }, { "functionname": "dec_left_op_right", @@ -421,7 +428,7 @@ "right2": "varex_new_lower_thresh", "log_extra_info": "Accept components above the kappa elbow, but are at the higher end of the remaining variance so more cautious to not remove" }, - "_comment": "Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh would be lower than it is here. If there are differences in results, might be worth adding a scaling factor" + "_comment": "Code I012 in premodularized tedana. Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh would be lower than it is here. If there are differences in results, might be worth adding a scaling factor" }, { "functionname": "manual_classify", @@ -436,7 +443,8 @@ "log_extra_info": "Anything still provisional (accepted or rejected) should be accepted", "log_extra_report": "", "tag": "Likely BOLD" - } + }, + "_comment": "No code in the premodularized tedana" } ] } \ No newline at end of file diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index 5aa50ec11..b596e9293 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -167,9 +167,9 @@ "right": "rho" }, "kwargs": { - "log_extra_info": "If kappa>elbow and kappa>3*rho accept even if rho>elbow", + "log_extra_info": "If kappa>elbow and kappa>2*rho accept even if rho>elbow", "log_extra_report": "", - "right_scale": 3, + "right_scale": 2, "tag_ifTrue": "Likely BOLD" } }, diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index d9d7ada39..b602ce496 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1512,9 +1512,9 @@ def calc_varex_kappa_ratio( only_used_metrics=False, ): """ - Calculates the variance explained / kappa ratio for the componentse in decide_comps - and add those values to a new column in the component_table titled "varex kappa ratio". - Also calculated kappa_rate which is a cross_component_metric + Calculates the cross_component_metric ``kappa_rate`` for the components in decide_comps + and then calculate the variance explained / kappa ratio for ALL components + and adds those values to a new column in the component_table titled "varex kappa ratio". Parameters ---------- @@ -1596,10 +1596,12 @@ def calc_varex_kappa_ratio( ) outputs["kappa_rate"] = kappa_rate LGR.info(f"Kappa rate found to be {kappa_rate} from components " f"{comps2use}") + # NOTE: kappa_rate is calculated on a subset of components while + # "varex kappa ratio" is calculated for all compnents selector.component_table["varex kappa ratio"] = ( kappa_rate - * selector.component_table.loc[comps2use, "variance explained"] - / selector.component_table.loc[comps2use, "kappa"] + * selector.component_table["variance explained"] + / selector.component_table["kappa"] ) # Unclear if necessary, but this may clean up a weird issue on passing # references in a data frame. From 5a768be95bc6eed3f699183b43567e363af60f28 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 23 Nov 2022 08:17:47 -0500 Subject: [PATCH 040/177] Drops 3.6 support --- setup.cfg | 3 +-- tedana/selection/ComponentSelector.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index e3ebd823c..d74a85dce 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,14 +14,13 @@ classifiers = Intended Audience :: Science/Research Topic :: Scientific/Engineering :: Information Analysis License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) - Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 [options] -python_requires = >= 3.6 +python_requires = >= 3.7 install_requires = bokeh<2.3.0 mapca>=0.0.3 diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 90a0bf469..3466a4cbf 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -277,7 +277,7 @@ class ComponentSelector: If None, then look for `tree` within ./selection/data in the tedana code directory. default=None - + Returns ------- component_table : :obj:`pandas.DataFrame` @@ -287,7 +287,7 @@ class ComponentSelector: component_status_table : :obj:`pandas.DataFrame` A table tracking the status of each component at each step. nodes : :obj:`list[dict]` - Nodes used in decision tree. + Nodes used in decision tree. current_node_idx : :obj:`int` The index for the current node, which should be the last node in the decision tree. From e40bd60e8dbe9077835f01274c178f045827ae5d Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 23 Nov 2022 08:19:27 -0500 Subject: [PATCH 041/177] Remove 3.6 support from CircleCI tests --- .circleci/config.yml | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 650ff3788..40cde265c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -28,40 +28,6 @@ jobs: paths: - /opt/conda/envs/tedana_py37 - unittest_36: - docker: - - image: continuumio/miniconda3 - working_directory: /tmp/src/tedana - steps: - - checkout - - restore_cache: - key: conda-py36-v2-{{ checksum "setup.cfg" }} - - run: - name: Generate environment - command: | - apt-get update - apt-get install -yqq make - if [ ! -d /opt/conda/envs/tedana_py36 ]; then - conda create -yq -n tedana_py36 python=3.6 - source activate tedana_py36 - pip install .[tests] - fi - - run: - name: Running unit tests - command: | - source activate tedana_py36 - make unittest - mkdir /tmp/src/coverage - mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py36 - - save_cache: - key: conda-py36-v2-{{ checksum "setup.cfg" }} - paths: - - /opt/conda/envs/tedana_py36 - - persist_to_workspace: - root: /tmp - paths: - - src/coverage/.coverage.py36 - unittest_37: docker: - image: continuumio/miniconda3 @@ -332,7 +298,6 @@ workflows: build_test: jobs: - makeenv_37 - - unittest_36 - unittest_37: requires: - makeenv_37 @@ -356,7 +321,6 @@ workflows: - unittest_310 - merge_coverage: requires: - - unittest_36 - unittest_37 - unittest_38 - unittest_39 From 6caeecfa91a73c82c0a7c9b2faf28f291be55ceb Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 23 Nov 2022 08:27:38 -0500 Subject: [PATCH 042/177] Reformat comment --- tedana/selection/ComponentSelector.py | 62 +++++++++++++-------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 3466a4cbf..eee95891d 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -49,37 +49,37 @@ def load_config(tree): A validated decision tree for the component selection process. """ -# Formerly used text -# The `dict` has several required fields to describe the entire tree -# - `tree_id`: :obj:`str` The name of the tree -# - `info`: :obj:`str` A brief description of the tree for info logging -# - `report`: :obj:`str` -# - A narrative description of the tree that could be used in report logging -# - `refs`: :obj:`str` Publications that should be referenced, when this tree is used -# - `necessary_metrics`: :obj:`list[str]` -# - The metrics in `component_table` that will be used by this tree -# - `intermediate_classifications`: :obj:`list[str]` -# - User specified component classification labels. 'accepted', 'rejected', and -# - 'unclassified' are defaults that don't need to be included here -# - `classification_tags`: :obj:`list[str]` -# - Descriptive labels that can be used to explain why a component was accepted or rejected. -# - For example, ["Likely BOLD","Low variance"] -# - `nodes`: :obj:`list[dict]` Each dictionary includes the information -# -# to run one node in the decision tree. Each node should either be able -# to change component classifications (function names starting with ``dec_``) -# or calculate values using information from multiple components -# (function names starting with ``calc_``) -# nodes includes: -# - `functionname`: :obj:`str` The name of the function to be called -# - `parameters`: :obj:`dict` Required parameters for the function -# The only parameter that is used in all functions is `decidecomps`, -# which are the component classifications the function should run on. -# Most ``dec_`` functions also include `ifTrue` and `ifFalse` which -# define how to to change the classification of a component if the -# criteria in the function is true or false. -# -# - `kwargs`: :obj:`dict` Optional parameters for the function + # Formerly used text + # The `dict` has several required fields to describe the entire tree + # - `tree_id`: :obj:`str` The name of the tree + # - `info`: :obj:`str` A brief description of the tree for info logging + # - `report`: :obj:`str` + # - A narrative description of the tree that could be used in report logging + # - `refs`: :obj:`str` Publications that should be referenced, when this tree is used + # - `necessary_metrics`: :obj:`list[str]` + # - The metrics in `component_table` that will be used by this tree + # - `intermediate_classifications`: :obj:`list[str]` + # - User specified component classification labels. 'accepted', 'rejected', and + # - 'unclassified' are defaults that don't need to be included here + # - `classification_tags`: :obj:`list[str]` + # - Descriptive labels that can be used to explain why a component was accepted or rejected. + # - For example, ["Likely BOLD","Low variance"] + # - `nodes`: :obj:`list[dict]` Each dictionary includes the information + # + # to run one node in the decision tree. Each node should either be able + # to change component classifications (function names starting with ``dec_``) + # or calculate values using information from multiple components + # (function names starting with ``calc_``) + # nodes includes: + # - `functionname`: :obj:`str` The name of the function to be called + # - `parameters`: :obj:`dict` Required parameters for the function + # The only parameter that is used in all functions is `decidecomps`, + # which are the component classifications the function should run on. + # Most ``dec_`` functions also include `ifTrue` and `ifFalse` which + # define how to to change the classification of a component if the + # criteria in the function is true or false. + # + # - `kwargs`: :obj:`dict` Optional parameters for the function if tree in DEFAULT_TREES: fname = op.join(get_resource_path(), "decision_trees", tree + ".json") From 723e6af8b51efaacb8fbf1ca9dfbfdc1ea39b09e Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 23 Nov 2022 08:29:33 -0500 Subject: [PATCH 043/177] Reduce line length --- tedana/selection/ComponentSelector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index eee95891d..e58d50dc7 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -62,7 +62,7 @@ def load_config(tree): # - User specified component classification labels. 'accepted', 'rejected', and # - 'unclassified' are defaults that don't need to be included here # - `classification_tags`: :obj:`list[str]` - # - Descriptive labels that can be used to explain why a component was accepted or rejected. + # - Descriptive labels to be used to explain why a component was accepted or rejected. # - For example, ["Likely BOLD","Low variance"] # - `nodes`: :obj:`list[dict]` Each dictionary includes the information # From 48bfe5a8028fb5738490d9770b18abc612c50eb4 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 23 Nov 2022 08:33:51 -0500 Subject: [PATCH 044/177] Update lint in Makefile --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index ba8b71886..b26334242 100644 --- a/Makefile +++ b/Makefile @@ -13,6 +13,7 @@ help: lint: @flake8 tedana + @black --check --diff tedana unittest: @py.test --skipintegration --cov-append --cov-report term-missing --cov=tedana tedana/ From f403e38639480b11258432aff752ad09e6f19092 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 23 Nov 2022 09:06:32 -0500 Subject: [PATCH 045/177] Correctly collect API submodule doc --- docs/api.rst | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index d7d0d5a75..6e72d913a 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -116,23 +116,17 @@ API :no-members: :no-inherited-members: - - .. currentmodule:: tedana.selection .. autosummary:: :toctree: generated/ - :template: function.rst - - tedana.selection.manual_selection - tedana.selection.kundu_tedpca - -.. autosummary:: tedana.selection.ComponentSelector - :toctree: generated/ - - tedana.selection.ComponentSelector.ComponentSelector - tedana.selection.ComponentSelector.load_config + :template: module.rst + tedana.selection.ComponentSelector + tedana.selection.selection_nodes + tedana.selection.selection_utils + tedana.selection.tedica + tedana.selection.tedpca .. _api_gscontrol_ref: From e3db4415f792c2a654c9703aadda8be4dd81c745 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 23 Nov 2022 10:59:32 -0500 Subject: [PATCH 046/177] Fix errors --- tedana/selection/selection_nodes.py | 144 ++++++++++++++++++++-------- 1 file changed, 106 insertions(+), 38 deletions(-) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index b602ce496..58da8cc6b 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -27,6 +27,54 @@ decision_docs = { "selector": """\ +selector: :obj:`tedana.selection.ComponentSelector` + The selector to perform decision tree-based component selection with.""", + "ifTrueFalse": """\ +ifTrue: :obj:`str` + If the condition in this step is True, give the component this label. + Use 'nochange' if no label changes are desired. + + ifFalse: :obj`str` + If the condition in this step is False, give the component this label. + Use 'nochange' to indicate if no label changes are desired. +""", + # FIXME: missing default + "decide_comps": """\ +decide_comps: :obj:`str` or :obj:`list[str]` + What classification(s) to operate on. Use 'all' to include all components.""", + "log_extra_report": """\ +log_extra_report: :obj:`str` + Additional text to place in the report log. Default "".""", + "log_extra_info": """\ +log_extra_info: :obj:`str` + Additional text to place in the information log. Default "".""", + "only_used_metrics": """\ +only_used_metrics: :obj:`bool` + Whether to only report what metrics will be used when this is run. Default False.""", + "custom_node_label": """\ +custom_node_label: :obj:`str` + A short label to use in the table for this step. One is automatically + assigned by default. Default "".""", + "tag_ifTrueFalse": """\ +tag_ifTrue: :obj:`str` + The classification tag to apply if a component is classified True. Default "". + tag_ifFalse: :obj`str` + The classification tag to apply if a component is classified False. Default "".""", + "basicreturns": """\ +:obj:`tedana.selection.ComponentSelector`: The updated selector.""", + "extend_factor": """\ +extend_factor: :obj:`float` + A scalar used to set the threshold for the mean rank metric.""", + "restrict_factor": """\ +restrict_factor: :obj:`float` + A scalar used to set the threshold for the mean rank metric.""", + "prev_X_steps": """\ +prev_X_steps: :obj:`int` + The number of previous steps to search for a label in.""", +} + +_old_decision_docs = { + "selector": """\ selector: :obj:`tedana.selection.ComponentSelector` This structure contains most of the information needed to execute each decision node function and to store the ouput of the function. The class @@ -148,7 +196,8 @@ def manual_classify( classifications. If this is True, that warning is suppressed. (Useful if manual_classify is used to reset all labels to unclassified). default=False - {log_extra} + {log_extra_info} + {log_extra_report} {custom_node_label} {only_used_metrics} @@ -267,20 +316,17 @@ def dec_left_op_right( tag_ifFalse=None, ): """ - Tests a relationship between (left_scale*)left and (right_scale*right) - using an operator, like >, defined with op - This can be used to directly compare any 2 metrics and use that info - to change component classification. If either metric is a number, - this can also compare a metric against a fixed threshold. + Performs a relational comparison. Parameters ---------- {selector} {ifTrueFalse} {decide_comps} - op: :ojb:`str` + op: :obj:`str` Must be one of: ">", ">=", "==", "<=", "<" Applied the user defined operator to left op right + left, right: :obj:`str` or :obj:`float` The labels for the two metrics to be used for comparision. for example: left='kappa', right='rho' and op='>' means this @@ -295,7 +341,8 @@ def dec_left_op_right( cross_component_metrics, since those will resolve to a single value. This cannot be a label for a component_table column since that would output a different value for each component. default=1 - op2: :ojb:`str`, optional + + op2: :obj:`str`, optional left2, right2, left3, right3: :obj:`str` or :obj:`float`, optional left2_scale, right2_scale, left3_scale, right3_scale: :obj:`float`, optional This function can also be used to calculate the intersection of two or three @@ -303,7 +350,9 @@ def dec_left_op_right( this function returns (left_scale*)left op (right_scale*right) AND (left2_scale*)left2 op2 (right2_scale*right2) if the "3" parameters are also defined then it's the intersection of all 3 statements - {log_extra} + + {log_extra_info} + {log_extra_report} {custom_node_label} {only_used_metrics} {tag_ifTrueFalse} @@ -595,7 +644,8 @@ def dec_variance_lessthan_thresholds( all_comp_threshold: :obj: `float` The threshold for which the sum of all components Date: Wed, 23 Nov 2022 11:06:06 -0500 Subject: [PATCH 047/177] Fix more sphinx --- tedana/selection/selection_utils.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 8fb4eeb66..1170d6dbe 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -374,15 +374,17 @@ def log_decision_tree_step( function_name_idx: :obj:`str` The name of the function that should be logged. By convention, this be "Step current_node_idx: function_name" + comps2use: :obj:`list[int]` or -1 A list of component indices that should be used by a function. Only used to report no components found if empty and report the number of components found if not empty. - Note: calc_ functions that don't use component metrics do not + Note: ``calc_`` functions that don't use component metrics do not need to use the component_table and may not require selecting components. For those functions, set comps2use==-1 to avoid logging a warning that no components were found. Currently, this is only used by calc_extend_factor + decide_comps: :obj:`str` or :obj:`list[str]` or :obj:`list[int]` This is string or a list of strings describing what classifications of components to operate on. Only used in this function to report @@ -392,6 +394,7 @@ def log_decision_tree_step( ifTrue, ifFalse: :obj:`str` If a component is true or false, the classification to assign that component + calc_outputs: :obj:`dict` A dictionary with output information from the function. If it contains a key "calc_cross_comp_metrics" then the value for that key is a list of @@ -646,24 +649,22 @@ def rho_elbow_kundu_liberal( Component metric table. One row for each component, with a column for each metric. The index should be the component number. Only the 'kappa' column is used in this function + n_echos: :obj:`int` The number of echos in the multi-echo data rho_elbow_type: :obj:`str` - The algorithm used to calculate the rho elbow. Current options are: - kundu (default): Method used by Kundu in MEICA v2.7. It is the mean between - the rho elbow calculated on all components and a subset of unclassificated - components with some extra quirks - liberal: Same as kundu but is the maximum of the two elbows, which will minimize - the number of components rejected by having values greater than the rho elbow + The algorithm used to calculate the rho elbow. Current options are + 'kundu' and 'liberal'. + comps2use: :obj:`list[int]` A list of component indices used to calculate the elbow default=None which means use all components + subset_comps2use: :obj:`list[int]` A list of component indices used to calculate the elbow If None then only calculate a threshold using all components default=-1 which means use only 'unclassified' components - Returns ------- rho_elbow: :obj:`float` From 509affd19cda10231a6f667d06860e18acf09ffd Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 29 Nov 2022 09:55:36 -0500 Subject: [PATCH 048/177] working on selector init documentation --- tedana/selection/ComponentSelector.py | 106 ++++++++++++-------------- 1 file changed, 48 insertions(+), 58 deletions(-) diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index e58d50dc7..d36a36d6a 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -253,59 +253,8 @@ def validate_tree(tree): class ComponentSelector: """ - Classifies components based on specified `tree` when the class is initialized - and then the `select` function is called. - The expected output of running a decision tree is that every component - will be classified as 'accepted', or 'rejected'. - - The selection process uses previously calculated parameters listed in - `component_table` for each ICA component such as Kappa (a T2* weighting metric), - Rho (an S0 weighting metric), and variance explained. See tedana.metrics - for more detail on the calculated metrics - - Parameters - ---------- - tree : :obj:`str` - A json file name without the '.json' extension that contains the decision tree to use - component_table : (C x M) :obj:`pandas.DataFrame` - Component metric table. One row for each component, with a column for - each metric; the index should be the component number! - user_notes : :obj:`str, optional` - Additional user notes about decision tree - path : :obj:`str, optional` - The directory path where `tree` is located. - If None, then look for `tree` within ./selection/data - in the tedana code directory. default=None - - - Returns - ------- - component_table : :obj:`pandas.DataFrame` - Updated component table with two extra columns. - cross_component_metrics : :obj:`Dict` - Metrics that are each a single value calculated across components. - component_status_table : :obj:`pandas.DataFrame` - A table tracking the status of each component at each step. - nodes : :obj:`list[dict]` - Nodes used in decision tree. - current_node_idx : :obj:`int` - The index for the current node, which should be the last node in the decision tree. - - Notes - ----- - Any parameter that is used by a decision tree node function can be passed - as a parameter of ComponentSelector class initialization function or can be - included in the json file that defines the decision tree. If a parameter - is set in the json file, that will take precedence. As a style rule, a - parameter that is the same regardless of the inputted data should be - defined in the decision tree json file. A parameter that is dataset specific - should be passed through the initialization function. Parameters that may need - to be passed through the class include: - - n_echos : :obj:`int, optional` - Number of echos in multi-echo fMRI data - n_vols: :obj:`int` - Number of volumes (time points) in the fMRI data + Contains information and methods to load and classify components based on + a specificed `tree` """ def __init__(self, tree, component_table, cross_component_metrics={}, status_table=None): @@ -319,20 +268,50 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab selector = ComponentSelector(tree, comptable, n_echos=n_echos, n_vols=n_vols) + Parameters + ---------- + tree : :obj:`str` + The named tree or path to a JSON file that defines one + component_table : (C x M) :obj:`pandas.DataFrame` + Component metric table. One row for each component, with a column for + each metric; the index should be the component number + cross_component_metrics : :obj:`Dict` + Metrics that are each a single value calculated across components. + Default is empty + status_table : :obj:`pandas.DataFrame` + A table tracking the status of each component at each step. + Pass a status table if running additional steps on a decision + tree that was already executed. Default=None. + + Notes ----- - The structure has the following fields loaded from tree: + Initializing the `ComponentSelector` loads following fields from tree: - nodes - necessary_metrics - intermediate_classifications - classification_tags - Adds to the class structure: + Adds to the `ComponentSelector`: - - component_status_table: empty dataframe - - cross_component_metrics: empty dict + - component_status_table: empty dataframe or contents of inputted status_table + - cross_component_metrics: empty dict or contents of inputed values - used_metrics: empty set + + Any parameter that is used by a decision tree node function can be passed + as a parameter to ComponentSelector class initialization function or can be + included in the json file that defines the decision tree. If a parameter + is set in the json file, that will take precedence. As a style rule, a + parameter that is the same regardless of the inputted data should be + defined in the decision tree json file. A parameter that is dataset specific + should be passed through the initialization function. Parameters that may need + to be passed during initialization include: + + n_echos : :obj:`int, optional` + Number of echos in multi-echo fMRI data + n_vols: :obj:`int` + Number of volumes (time points) in the fMRI data """ self.tree_name = tree @@ -386,7 +365,18 @@ def select(self): Parameters all defined in class initialization - Returns + Classifies components based on specified `tree` when the class is initialized + and then the `select` function is called. + The expected output of running a decision tree is that every component + will be classified as 'accepted', or 'rejected'. + + The selection process uses previously calculated parameters listed in + `component_table` for each ICA component such as Kappa (a T2* weighting metric), + Rho (an S0 weighting metric), and variance explained. See tedana.metrics + for more detail on the calculated metrics + + + Notes ------- The following attributes are altered in this function are descibed in the ComponentSelector class description: From 4c7abce44bc7228f5cc45a6fd05871ec7ded9f15 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 29 Nov 2022 20:52:42 -0500 Subject: [PATCH 049/177] Breaking up outputs.rst --- docs/component_table_descriptions.rst | 57 ++++++++ docs/index.rst | 2 + docs/output_file_descriptions.rst | 136 ++++++++++++++++++ docs/outputs.rst | 195 ++------------------------ tedana/selection/ComponentSelector.py | 3 + 5 files changed, 209 insertions(+), 184 deletions(-) create mode 100644 docs/component_table_descriptions.rst create mode 100644 docs/output_file_descriptions.rst diff --git a/docs/component_table_descriptions.rst b/docs/component_table_descriptions.rst new file mode 100644 index 000000000..b75169dd1 --- /dev/null +++ b/docs/component_table_descriptions.rst @@ -0,0 +1,57 @@ +############################# +Component table descriptions +############################# + + +In order to make sense of the rationale codes in the component tables, +consult the tables below. +TEDPCA rationale codes start with a "P", while TEDICA codes start with an "I". + +=============== ============================================================= +Classification Description +=============== ============================================================= +accepted BOLD-like components included in denoised and high-Kappa data +rejected Non-BOLD components excluded from denoised and high-Kappa data +ignored Low-variance components included in denoised, but excluded + from high-Kappa data +=============== ============================================================= + + +TEDPCA codes +============ + +===== =============== ======================================================== +Code Classification Description +===== =============== ======================================================== +P001 rejected Low Rho, Kappa, and variance explained +P002 rejected Low variance explained +P003 rejected Kappa equals fmax +P004 rejected Rho equals fmax +P005 rejected Cumulative variance explained above 95% (only in + stabilized PCA decision tree) +P006 rejected Kappa below fmin (only in stabilized PCA decision tree) +P007 rejected Rho below fmin (only in stabilized PCA decision tree) +===== =============== ======================================================== + + +TEDICA codes +============ + +===== ================= ======================================================== +Code Classification Description +===== ================= ======================================================== +I001 rejected|accepted Manual classification +I002 rejected Rho greater than Kappa +I003 rejected More significant voxels in S0 model than R2 model +I004 rejected S0 Dice is higher than R2 Dice and high variance + explained +I005 rejected Noise F-value is higher than signal F-value and high + variance explained +I006 ignored No good components found +I007 rejected Mid-Kappa component +I008 ignored Low variance explained +I009 rejected Mid-Kappa artifact type A +I010 rejected Mid-Kappa artifact type B +I011 ignored ign_add0 +I012 ignored ign_add1 +===== ================= ======================================================== diff --git a/docs/index.rst b/docs/index.rst index 3f3e1c3ef..fa3f168ff 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -188,6 +188,8 @@ tedana is licensed under GNU Lesser General Public License version 2.1. :name: hiddentoc dependence_metrics + output_file_descriptions + component_table_descriptions ****************** diff --git a/docs/output_file_descriptions.rst b/docs/output_file_descriptions.rst new file mode 100644 index 000000000..8d4966053 --- /dev/null +++ b/docs/output_file_descriptions.rst @@ -0,0 +1,136 @@ +############################# +Output file name descriptions +############################# + +The file names listed here are the default "bids" names. If `tedana` is called with +the `--convention` option, then other file names will be generated. The file names used +for others options are stored in `outputs.json`_. + +.. _outputs.json: https://github.com/ME-ICA/tedana/blob/main/tedana/resources/config/outputs.json + +================================================ ===================================================== +Filename Content +================================================ ===================================================== +dataset_description.json Top-level metadata for the workflow. +T2starmap.nii.gz Full estimated T2* 3D map. + Values are in seconds. + The difference between the limited and full maps + is that, for voxels affected by dropout where + only one echo contains good data, the full map uses + the T2* estimate from the first two echoes, while the + limited map has a NaN. +S0map.nii.gz Full S0 3D map. + The difference between the limited and full maps + is that, for voxels affected by dropout where + only one echo contains good data, the full map uses + the S0 estimate from the first two echoes, while the + limited map has a NaN. +desc-optcom_bold.nii.gz Optimally combined time series. +desc-optcomDenoised_bold.nii.gz Denoised optimally combined time series. Recommended + dataset for analysis. +desc-optcomRejected_bold.nii.gz Combined time series from rejected components. +desc-optcomAccepted_bold.nii.gz High-kappa time series. This dataset does not + include thermal noise or low variance components. + Not the recommended dataset for analysis. +desc-adaptiveGoodSignal_mask.nii.gz Integer-valued mask used in the workflow, where + each voxel's value corresponds to the number of good + echoes to be used for T2\*/S0 estimation. +desc-PCA_mixing.tsv Mixing matrix (component time series) from PCA + decomposition in a tab-delimited file. Each column is + a different component, and the column name is the + component number. +desc-PCA_decomposition.json Metadata for the PCA decomposition. +desc-PCA_stat-z_components.nii.gz Component weight maps from PCA decomposition. + Each map corresponds to the same component index in + the mixing matrix and component table. + Maps are in z-statistics. +desc-PCA_metrics.tsv TEDPCA component table. A BIDS Derivatives-compatible + TSV file with summary metrics and inclusion/exclusion + information for each component from the PCA + decomposition. +desc-PCA_metrics.json Metadata about the metrics in ``desc-PCA_metrics.tsv``. +desc-ICA_mixing.tsv Mixing matrix (component time series) from ICA + decomposition in a tab-delimited file. Each column is + a different component, and the column name is the + component number. +desc-ICA_components.nii.gz Full ICA coefficient feature set. +desc-ICA_stat-z_components.nii.gz Z-statistic component weight maps from ICA + decomposition. + Values are z-transformed standardized regression + coefficients. Each map corresponds to the same + component index in the mixing matrix and component table. +desc-ICA_decomposition.json Metadata for the ICA decomposition. +desc-tedana_metrics.tsv TEDICA component table. A BIDS Derivatives-compatible + TSV file with summary metrics and inclusion/exclusion + information for each component from the ICA + decomposition. +desc-tedana_metrics.json Metadata about the metrics in + ``desc-tedana_metrics.tsv``. +desc-ICAAccepted_components.nii.gz High-kappa ICA coefficient feature set +desc-ICAAcceptedZ_components.nii.gz Z-normalized spatial component maps +report.txt A summary report for the workflow with relevant + citations. +references.bib The BibTeX entries for references cited in + report.txt. +tedana_report.html The interactive HTML report. +================================================ ===================================================== + +If ``verbose`` is set to True: + +============================================================== ===================================================== +Filename Content +============================================================== ===================================================== +desc-limited_T2starmap.nii.gz Limited T2* map/time series. + Values are in seconds. + The difference between the limited and full maps + is that, for voxels affected by dropout where + only one echo contains good data, the full map uses + the S0 estimate from the first two echoes, while the + limited map has a NaN. +desc-limited_S0map.nii.gz Limited S0 map/time series. + The difference between the limited and full maps + is that, for voxels affected by dropout where + only one echo contains good data, the full map uses + the S0 estimate from the first two echoes, while the + limited map has a NaN. +echo-[echo]_desc-[PCA|ICA]_components.nii.gz Echo-wise PCA/ICA component weight maps. +echo-[echo]_desc-[PCA|ICA]R2ModelPredictions_components.nii.gz Component- and voxel-wise R2-model predictions, + separated by echo. +echo-[echo]_desc-[PCA|ICA]S0ModelPredictions_components.nii.gz Component- and voxel-wise S0-model predictions, + separated by echo. +desc-[PCA|ICA]AveragingWeights_components.nii.gz Component-wise averaging weights for metric + calculation. +desc-[PCA|ICA]S0_stat-F_statmap.nii.gz F-statistic map for each component, for the S0 model. +desc-[PCA|ICA]T2_stat-F_statmap.nii.gz F-statistic map for each component, for the T2 model. +desc-optcomPCAReduced_bold.nii.gz Optimally combined data after dimensionality + reduction with PCA. This is the input to the ICA. +echo-[echo]_desc-Accepted_bold.nii.gz High-Kappa time series for echo number ``echo`` +echo-[echo]_desc-Rejected_bold.nii.gz Low-Kappa time series for echo number ``echo`` +echo-[echo]_desc-Denoised_bold.nii.gz Denoised time series for echo number ``echo`` +============================================================== ===================================================== + +If ``gscontrol`` includes 'gsr': + +================================================ ===================================================== +Filename Content +================================================ ===================================================== +desc-globalSignal_map.nii.gz Spatial global signal +desc-globalSignal_timeseries.tsv Time series of global signal from optimally combined + data. +desc-optcomWithGlobalSignal_bold.nii.gz Optimally combined time series with global signal + retained. +desc-optcomNoGlobalSignal_bold.nii.gz Optimally combined time series with global signal + removed. +================================================ ===================================================== + +If ``gscontrol`` includes 't1c': + +================================================ ===================================================== +Filename Content +================================================ ===================================================== +desc-T1likeEffect_min.nii.gz T1-like effect +desc-optcomAcceptedT1cDenoised_bold.nii.gz T1-corrected high-kappa time series by regression +desc-optcomT1cDenoised_bold.nii.gz T1-corrected denoised time series +desc-TEDICAAcceptedT1cDenoised_components.nii.gz T1-GS corrected high-kappa components +desc-TEDICAT1cDenoised_mixing.tsv T1-GS corrected mixing matrix +================================================ ===================================================== \ No newline at end of file diff --git a/docs/outputs.rst b/docs/outputs.rst index e686bd100..c67031cd8 100644 --- a/docs/outputs.rst +++ b/docs/outputs.rst @@ -5,137 +5,15 @@ Outputs of tedana ################# -****************************** -Outputs of the tedana workflow -****************************** - -================================================ ===================================================== -Filename Content -================================================ ===================================================== -dataset_description.json Top-level metadata for the workflow. -T2starmap.nii.gz Full estimated T2* 3D map. - Values are in seconds. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the T2* estimate from the first two echoes, while the - limited map has a NaN. -S0map.nii.gz Full S0 3D map. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the S0 estimate from the first two echoes, while the - limited map has a NaN. -desc-optcom_bold.nii.gz Optimally combined time series. -desc-optcomDenoised_bold.nii.gz Denoised optimally combined time series. Recommended - dataset for analysis. -desc-optcomRejected_bold.nii.gz Combined time series from rejected components. -desc-optcomAccepted_bold.nii.gz High-kappa time series. This dataset does not - include thermal noise or low variance components. - Not the recommended dataset for analysis. -desc-adaptiveGoodSignal_mask.nii.gz Integer-valued mask used in the workflow, where - each voxel's value corresponds to the number of good - echoes to be used for T2\*/S0 estimation. -desc-PCA_mixing.tsv Mixing matrix (component time series) from PCA - decomposition in a tab-delimited file. Each column is - a different component, and the column name is the - component number. -desc-PCA_decomposition.json Metadata for the PCA decomposition. -desc-PCA_stat-z_components.nii.gz Component weight maps from PCA decomposition. - Each map corresponds to the same component index in - the mixing matrix and component table. - Maps are in z-statistics. -desc-PCA_metrics.tsv TEDPCA component table. A BIDS Derivatives-compatible - TSV file with summary metrics and inclusion/exclusion - information for each component from the PCA - decomposition. -desc-PCA_metrics.json Metadata about the metrics in ``desc-PCA_metrics.tsv``. -desc-ICA_mixing.tsv Mixing matrix (component time series) from ICA - decomposition in a tab-delimited file. Each column is - a different component, and the column name is the - component number. -desc-ICA_components.nii.gz Full ICA coefficient feature set. -desc-ICA_stat-z_components.nii.gz Z-statistic component weight maps from ICA - decomposition. - Values are z-transformed standardized regression - coefficients. Each map corresponds to the same - component index in the mixing matrix and component table. -desc-ICA_decomposition.json Metadata for the ICA decomposition. -desc-tedana_metrics.tsv TEDICA component table. A BIDS Derivatives-compatible - TSV file with summary metrics and inclusion/exclusion - information for each component from the ICA - decomposition. -desc-tedana_metrics.json Metadata about the metrics in - ``desc-tedana_metrics.tsv``. -desc-ICAAccepted_components.nii.gz High-kappa ICA coefficient feature set -desc-ICAAcceptedZ_components.nii.gz Z-normalized spatial component maps -report.txt A summary report for the workflow with relevant - citations. -references.bib The BibTeX entries for references cited in - report.txt. -tedana_report.html The interactive HTML report. -================================================ ===================================================== - -If ``verbose`` is set to True: - -============================================================== ===================================================== -Filename Content -============================================================== ===================================================== -desc-limited_T2starmap.nii.gz Limited T2* map/time series. - Values are in seconds. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the S0 estimate from the first two echoes, while the - limited map has a NaN. -desc-limited_S0map.nii.gz Limited S0 map/time series. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the S0 estimate from the first two echoes, while the - limited map has a NaN. -echo-[echo]_desc-[PCA|ICA]_components.nii.gz Echo-wise PCA/ICA component weight maps. -echo-[echo]_desc-[PCA|ICA]R2ModelPredictions_components.nii.gz Component- and voxel-wise R2-model predictions, - separated by echo. -echo-[echo]_desc-[PCA|ICA]S0ModelPredictions_components.nii.gz Component- and voxel-wise S0-model predictions, - separated by echo. -desc-[PCA|ICA]AveragingWeights_components.nii.gz Component-wise averaging weights for metric - calculation. -desc-[PCA|ICA]S0_stat-F_statmap.nii.gz F-statistic map for each component, for the S0 model. -desc-[PCA|ICA]T2_stat-F_statmap.nii.gz F-statistic map for each component, for the T2 model. -desc-optcomPCAReduced_bold.nii.gz Optimally combined data after dimensionality - reduction with PCA. This is the input to the ICA. -echo-[echo]_desc-Accepted_bold.nii.gz High-Kappa time series for echo number ``echo`` -echo-[echo]_desc-Rejected_bold.nii.gz Low-Kappa time series for echo number ``echo`` -echo-[echo]_desc-Denoised_bold.nii.gz Denoised time series for echo number ``echo`` -============================================================== ===================================================== - -If ``gscontrol`` includes 'gsr': - -================================================ ===================================================== -Filename Content -================================================ ===================================================== -desc-globalSignal_map.nii.gz Spatial global signal -desc-globalSignal_timeseries.tsv Time series of global signal from optimally combined - data. -desc-optcomWithGlobalSignal_bold.nii.gz Optimally combined time series with global signal - retained. -desc-optcomNoGlobalSignal_bold.nii.gz Optimally combined time series with global signal - removed. -================================================ ===================================================== - -If ``gscontrol`` includes 't1c': - -================================================ ===================================================== -Filename Content -================================================ ===================================================== -desc-T1likeEffect_min.nii.gz T1-like effect -desc-optcomAcceptedT1cDenoised_bold.nii.gz T1-corrected high-kappa time series by regression -desc-optcomT1cDenoised_bold.nii.gz T1-corrected denoised time series -desc-TEDICAAcceptedT1cDenoised_components.nii.gz T1-GS corrected high-kappa components -desc-TEDICAT1cDenoised_mixing.tsv T1-GS corrected mixing matrix -================================================ ===================================================== +*************************************** +Filename outputs of the tedana workflow +*************************************** +When tedana is run, it outputs files for the optimally combined and denoised +data and many additional files to help understand the results and fascilitate +future processing. `descriptions of these output files are here`_. + +.. _descriptions of these output files are here: output_file_descriptions.html **************** Component tables @@ -144,61 +22,10 @@ Component tables TEDPCA and TEDICA use component tables to track relevant metrics, component classifications, and rationales behind classifications. The component tables are stored as tsv files for BIDS-compatibility. +`Full descriptions of these outputs are here`_. + +.. _Full descriptions of these outputs are here: component_table_descriptions.html -In order to make sense of the rationale codes in the component tables, -consult the tables below. -TEDPCA rationale codes start with a "P", while TEDICA codes start with an "I". - -=============== ============================================================= -Classification Description -=============== ============================================================= -accepted BOLD-like components included in denoised and high-Kappa data -rejected Non-BOLD components excluded from denoised and high-Kappa data -ignored Low-variance components included in denoised, but excluded - from high-Kappa data -=============== ============================================================= - - -TEDPCA codes -============ - -===== =============== ======================================================== -Code Classification Description -===== =============== ======================================================== -P001 rejected Low Rho, Kappa, and variance explained -P002 rejected Low variance explained -P003 rejected Kappa equals fmax -P004 rejected Rho equals fmax -P005 rejected Cumulative variance explained above 95% (only in - stabilized PCA decision tree) -P006 rejected Kappa below fmin (only in stabilized PCA decision tree) -P007 rejected Rho below fmin (only in stabilized PCA decision tree) -===== =============== ======================================================== - - -TEDICA codes -============ - -===== ================= ======================================================== -Code Classification Description -===== ================= ======================================================== -I001 rejected|accepted Manual classification -I002 rejected Rho greater than Kappa -I003 rejected More significant voxels in S0 model than R2 model -I004 rejected S0 Dice is higher than R2 Dice and high variance - explained -I005 rejected Noise F-value is higher than signal F-value and high - variance explained -I006 ignored No good components found -I007 rejected Mid-Kappa component -I008 ignored Low variance explained -I009 rejected Mid-Kappa artifact type A -I010 rejected Mid-Kappa artifact type B -I011 ignored ign_add0 -I012 ignored ign_add1 -===== ================= ======================================================== - -.. _interactive reports: ********************* ICA Components Report diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index d36a36d6a..1f6154e7e 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -30,6 +30,9 @@ class TreeError(Exception): + """ + Passes errors that are raised when `validate_tree` fails + """ pass From a881b57fa10d47d4fcbba9feb27e5d98ea1c88df Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 1 Dec 2022 15:16:01 -0500 Subject: [PATCH 050/177] partially updated output_file_descriptions.rst --- docs/output_file_descriptions.rst | 141 +++++++++++++++--------------- 1 file changed, 72 insertions(+), 69 deletions(-) diff --git a/docs/output_file_descriptions.rst b/docs/output_file_descriptions.rst index 8d4966053..ab192ee35 100644 --- a/docs/output_file_descriptions.rst +++ b/docs/output_file_descriptions.rst @@ -2,78 +2,81 @@ Output file name descriptions ############################# -The file names listed here are the default "bids" names. If `tedana` is called with -the `--convention` option, then other file names will be generated. The file names used -for others options are stored in `outputs.json`_. +tedana allows for multiple file naming conventions. The key labels and naming options for +each convention that can be set using the `--convention` option are in `outputs.json`_. +The output of `tedana` also includes a file called `registery.json` or +`desc-tedana_registry.json` that includes the keys and the matching file names for the +output. The table below lists both these keys and the default "BIDS Derivatives" +file names. .. _outputs.json: https://github.com/ME-ICA/tedana/blob/main/tedana/resources/config/outputs.json -================================================ ===================================================== -Filename Content -================================================ ===================================================== -dataset_description.json Top-level metadata for the workflow. -T2starmap.nii.gz Full estimated T2* 3D map. - Values are in seconds. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the T2* estimate from the first two echoes, while the - limited map has a NaN. -S0map.nii.gz Full S0 3D map. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the S0 estimate from the first two echoes, while the - limited map has a NaN. -desc-optcom_bold.nii.gz Optimally combined time series. -desc-optcomDenoised_bold.nii.gz Denoised optimally combined time series. Recommended - dataset for analysis. -desc-optcomRejected_bold.nii.gz Combined time series from rejected components. -desc-optcomAccepted_bold.nii.gz High-kappa time series. This dataset does not - include thermal noise or low variance components. - Not the recommended dataset for analysis. -desc-adaptiveGoodSignal_mask.nii.gz Integer-valued mask used in the workflow, where - each voxel's value corresponds to the number of good - echoes to be used for T2\*/S0 estimation. -desc-PCA_mixing.tsv Mixing matrix (component time series) from PCA - decomposition in a tab-delimited file. Each column is - a different component, and the column name is the - component number. -desc-PCA_decomposition.json Metadata for the PCA decomposition. -desc-PCA_stat-z_components.nii.gz Component weight maps from PCA decomposition. - Each map corresponds to the same component index in - the mixing matrix and component table. - Maps are in z-statistics. -desc-PCA_metrics.tsv TEDPCA component table. A BIDS Derivatives-compatible - TSV file with summary metrics and inclusion/exclusion - information for each component from the PCA - decomposition. -desc-PCA_metrics.json Metadata about the metrics in ``desc-PCA_metrics.tsv``. -desc-ICA_mixing.tsv Mixing matrix (component time series) from ICA - decomposition in a tab-delimited file. Each column is - a different component, and the column name is the - component number. -desc-ICA_components.nii.gz Full ICA coefficient feature set. -desc-ICA_stat-z_components.nii.gz Z-statistic component weight maps from ICA - decomposition. - Values are z-transformed standardized regression - coefficients. Each map corresponds to the same - component index in the mixing matrix and component table. -desc-ICA_decomposition.json Metadata for the ICA decomposition. -desc-tedana_metrics.tsv TEDICA component table. A BIDS Derivatives-compatible - TSV file with summary metrics and inclusion/exclusion - information for each component from the ICA - decomposition. -desc-tedana_metrics.json Metadata about the metrics in - ``desc-tedana_metrics.tsv``. -desc-ICAAccepted_components.nii.gz High-kappa ICA coefficient feature set -desc-ICAAcceptedZ_components.nii.gz Z-normalized spatial component maps -report.txt A summary report for the workflow with relevant - citations. -references.bib The BibTeX entries for references cited in - report.txt. -tedana_report.html The interactive HTML report. -================================================ ===================================================== +=========================================================================== ===================================================== +Key: Filename Content +=========================================================================== ===================================================== +"data description json": dataset_description.json Top-level metadata for the workflow. +"t2star img": T2starmap.nii.gz Full estimated T2* 3D map. + Values are in seconds. + The difference between the limited and full maps + is that, for voxels affected by dropout where + only one echo contains good data, the full map uses + the T2* estimate from the first two echoes, while the + limited map has a NaN. +"s0 img": S0map.nii.gz Full S0 3D map. + The difference between the limited and full maps + is that, for voxels affected by dropout where + only one echo contains good data, the full map uses + the S0 estimate from the first two echoes, while the + limited map has a NaN. +"combined img": desc-optcom_bold.nii.gz Optimally combined time series. +"denoised ts img": desc-optcomDenoised_bold.nii.gz Denoised optimally combined time series. Recommended + dataset for analysis. +"low kappa ts img": desc-optcomRejected_bold.nii.gz Combined time series from rejected components. +"high kappa ts img": desc-optcomAccepted_bold.nii.gz High-kappa time series. This dataset does not + include thermal noise or low variance components. + Not the recommended dataset for analysis. +"adaptive mask img": desc-adaptiveGoodSignal_mask.nii.gz Integer-valued mask used in the workflow, where + each voxel's value corresponds to the number of good + echoes to be used for T2\*/S0 estimation. +"PCA mixing tsv": desc-PCA_mixing.tsv Mixing matrix (component time series) from PCA + decomposition in a tab-delimited file. Each column is + a different component, and the column name is the + component number. +"PCA decomposition json": desc-PCA_decomposition.json Metadata for the PCA decomposition. +"z-scored PCA components img": desc-PCA_stat-z_components.nii.gz Component weight maps from PCA decomposition. + Each map corresponds to the same component index in + the mixing matrix and component table. + Maps are in z-statistics. +"PCA metrics tsv": desc-PCA_metrics.tsv TEDPCA component table. A BIDS Derivatives-compatible + TSV file with summary metrics and inclusion/exclusion + information for each component from the PCA + decomposition. +"PCA metrics json": desc-PCA_metrics.json Metadata about the metrics in ``desc-PCA_metrics.tsv``. +"ICA mixing tsv": desc-ICA_mixing.tsv Mixing matrix (component time series) from ICA + decomposition in a tab-delimited file. Each column is + a different component, and the column name is the + component number. +"ICA components img": desc-ICA_components.nii.gz Full ICA coefficient feature set. +"z-scored ICA components img": desc-ICA_stat-z_components.nii.gz Z-statistic component weight maps from ICA + decomposition. + Values are z-transformed standardized regression + coefficients. Each map corresponds to the same + component index in the mixing matrix and component table. +"ICA decomposition json": desc-ICA_decomposition.json Metadata for the ICA decomposition. +"ICA metrics tsv": desc-tedana_metrics.tsv TEDICA component table. A BIDS Derivatives-compatible + TSV file with summary metrics and inclusion/exclusion + information for each component from the ICA + decomposition. +"ICA metrics json": desc-tedana_metrics.json Metadata about the metrics in + ``desc-tedana_metrics.tsv``. +"ICA accepted components img": desc-ICAAccepted_components.nii.gz High-kappa ICA coefficient feature set +"z-scored ICA accepted components img": desc-ICAAcceptedZ_components.nii.gz Z-normalized spatial component maps +report.txt A summary report for the workflow with relevant + citations. +references.bib The BibTeX entries for references cited in + report.txt. +tedana_report.html The interactive HTML report. +=========================================================================== ===================================================== If ``verbose`` is set to True: From 05757ff212fbc22c980785363bae99c48f1944b9 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 1 Dec 2022 16:57:12 -0500 Subject: [PATCH 051/177] changed n_bold_comps to n_accepted_comps --- tedana/selection/ComponentSelector.py | 105 +++++++++++++------------- tedana/workflows/tedana_reclassify.py | 2 +- 2 files changed, 55 insertions(+), 52 deletions(-) diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 1f6154e7e..2092c385f 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -33,6 +33,7 @@ class TreeError(Exception): """ Passes errors that are raised when `validate_tree` fails """ + pass @@ -257,20 +258,13 @@ def validate_tree(tree): class ComponentSelector: """ Contains information and methods to load and classify components based on - a specificed `tree` + a specified `tree` """ def __init__(self, tree, component_table, cross_component_metrics={}, status_table=None): """ Initialize the class using the info specified in the json file `tree` - Any optional variables defined in the function call will be added to - the class structure. Several trees expect n_echos to be defined. - The full kundu tree also require n_vols (number of volumes) to be - defined. An example initialization with these options would look like - selector = ComponentSelector(tree, comptable, n_echos=n_echos, - n_vols=n_vols) - Parameters ---------- tree : :obj:`str` @@ -284,17 +278,13 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab status_table : :obj:`pandas.DataFrame` A table tracking the status of each component at each step. Pass a status table if running additional steps on a decision - tree that was already executed. Default=None. + tree that was already executed. Default=None. Notes ----- - Initializing the `ComponentSelector` loads following fields from tree: - - - nodes - - necessary_metrics - - intermediate_classifications - - classification_tags + Initializing the `ComponentSelector` confirms tree is valid and + loads all information in the tree json file into `ComponentSelector` Adds to the `ComponentSelector`: @@ -303,19 +293,25 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab - used_metrics: empty set Any parameter that is used by a decision tree node function can be passed - as a parameter to ComponentSelector class initialization function or can be + as a parameter in the `ComponentSelector` initialization or can be included in the json file that defines the decision tree. If a parameter is set in the json file, that will take precedence. As a style rule, a parameter that is the same regardless of the inputted data should be defined in the decision tree json file. A parameter that is dataset specific - should be passed through the initialization function. Parameters that may need - to be passed during initialization include: + should be passed through the initialization function. Dataset specific + parameters that may need to be passed during initialization include: n_echos : :obj:`int, optional` - Number of echos in multi-echo fMRI data + Number of echos in multi-echo fMRI data. + Required for kundu and minimal trees n_vols: :obj:`int` Number of volumes (time points) in the fMRI data + Required for kundu tree + + An example initialization with these options would look like + `selector = ComponentSelector(tree, comptable, n_echos=n_echos, n_vols=n_vols)` """ + self.tree_name = tree self.__dict__.update(cross_component_metrics) @@ -363,33 +359,40 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab def select(self): """ - Parse the parameters used to call each function in the component - selection decision tree and run the functions to classify components - - Parameters all defined in class initialization - - Classifies components based on specified `tree` when the class is initialized - and then the `select` function is called. - The expected output of running a decision tree is that every component - will be classified as 'accepted', or 'rejected'. - - The selection process uses previously calculated parameters listed in - `component_table` for each ICA component such as Kappa (a T2* weighting metric), - Rho (an S0 weighting metric), and variance explained. See tedana.metrics - for more detail on the calculated metrics - + Using the validated tree in `ComponentSelector` run the decision + tree functions to calculate cross_component metrics and classify + each component as accepted or rejected. Notes ------- - The following attributes are altered in this function are descibed in - the ComponentSelector class description: - component_table, cross_component_metrics, component_status_table, - cross_component_metrics, used_metrics, nodes (outputs field), - current_node_idx + The selection process uses previously calculated parameters stored in + `component_table` for each ICA component such as Kappa (a T2* weighting metric), + Rho (an S0 weighting metric), and variance explained. If a necessary metric + is not calculated, this will not run. See `tedana.metrics` for more detail on + the calculated metrics + + This can be used on a component_table with no component classifications or to alter + classifications and on a component_table that was already run (i.e. for manual + classificaiton changes after visual inspection) + + When this is run, multiple elements in `ComponentSelector` will change including: + + - component_table: `classification` column with `accepted` or `rejected labels` + and `classification_tags` column with can hold multiple labels explaining why + a classification happened + - cross_component_metrics: Any values that were calculated based on the metric + values across components or by direct user input + - component_status_table: Contains the classification statuses at each node + in the decision tree + - used_metrics: A list of metrics used in the selection process + - nodes: The original tree definition with an added `outputs` key + listing everything that changed in each node + - current_node_idx: The total number of nodes run in `ComponentSelector` """ - # TODO: force-add classification tags + if "classification_tags" not in self.component_table.columns: self.component_table["classification_tags"] = "" + # this will crash the program with an error message if not all # necessary_metrics are in the comptable confirm_metrics_exist( @@ -439,13 +442,14 @@ def select(self): self.are_all_components_accepted_or_rejected() def add_manual(self, indices, classification): - """Add nodes that will manually classify components + """ + Add nodes that will manually classify components Parameters ---------- - indices: list[int] + indices: :obj:`list[int]` The indices to manually classify - classification: str + classification: :obj:`str` The classification to set the nodes to """ self.tree["nodes"].append( @@ -468,8 +472,8 @@ def check_null(self, params, fcn): Returns ------- - params - The values for the inputted parameters + params: :obj:`dict` + The keys and values for the inputted parameters """ for key, val in params.items(): @@ -529,17 +533,16 @@ def n_comps(self): """The number of components in the component table.""" return len(self.component_table) - @property - def n_bold_comps(self): - """The number of components that are considered bold-weighted.""" - ct = self.component_table - return len(ct[ct.classification == "accepted"]) - @property def accepted_comps(self): """The number of components that are accepted.""" return self.component_table["classification"] == "accepted" + @property + def n_accepted_comps(self): + """The number of components that are accepted.""" + return self.accepted_comps.sum() + @property def rejected_comps(self): """The number of components that are rejected.""" diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index af777f669..36e83394e 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -314,7 +314,7 @@ def post_tedana( # Save component selector and tree selector.to_files(io_generator) - if selector.n_bold_comps == 0: + if selector.n_accepted_comps == 0: LGR.warning("No BOLD components detected! Please check data and results!") mmix_orig = mmix.copy() From 6d44265cee265ac678f8017345e707de8bdd845c Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 1 Dec 2022 16:57:36 -0500 Subject: [PATCH 052/177] n_bold_comps to n_accepted_comps --- tedana/workflows/tedana.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 434b822cb..67084fd6a 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -649,10 +649,10 @@ def tedana_workflow( metrics=required_metrics, ) ica_selection = selection.automatic_selection(comptable, n_echos, n_vols, tree=tree) - n_bold_comps = ica_selection.n_bold_comps - if (n_restarts < maxrestart) and (n_bold_comps == 0): + n_accepted_comps = ica_selection.n_accepted_comps + if (n_restarts < maxrestart) and (n_accepted_comps == 0): LGR.warning("No BOLD components found. Re-attempting ICA.") - elif n_bold_comps == 0: + elif n_accepted_comps == 0: LGR.warning("No BOLD components found, but maximum number of restarts reached.") keep_restarting = False else: @@ -723,7 +723,7 @@ def tedana_workflow( } io_generator.save_file(decomp_metadata, "ICA decomposition json") - if ica_selection.n_bold_comps == 0: + if ica_selection.n_accepted_comps == 0: LGR.warning("No BOLD components detected! Please check data and results!") # TODO: un-hack separate comptable From e91498e9b46b2d32a38039f66cc05d5b7d80e60c Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 1 Dec 2022 17:26:57 -0500 Subject: [PATCH 053/177] ComponentSelector.py API docs cleaned up --- tedana/selection/ComponentSelector.py | 34 +-------------------------- 1 file changed, 1 insertion(+), 33 deletions(-) diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 2092c385f..3b5d72c2b 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -53,38 +53,6 @@ def load_config(tree): A validated decision tree for the component selection process. """ - # Formerly used text - # The `dict` has several required fields to describe the entire tree - # - `tree_id`: :obj:`str` The name of the tree - # - `info`: :obj:`str` A brief description of the tree for info logging - # - `report`: :obj:`str` - # - A narrative description of the tree that could be used in report logging - # - `refs`: :obj:`str` Publications that should be referenced, when this tree is used - # - `necessary_metrics`: :obj:`list[str]` - # - The metrics in `component_table` that will be used by this tree - # - `intermediate_classifications`: :obj:`list[str]` - # - User specified component classification labels. 'accepted', 'rejected', and - # - 'unclassified' are defaults that don't need to be included here - # - `classification_tags`: :obj:`list[str]` - # - Descriptive labels to be used to explain why a component was accepted or rejected. - # - For example, ["Likely BOLD","Low variance"] - # - `nodes`: :obj:`list[dict]` Each dictionary includes the information - # - # to run one node in the decision tree. Each node should either be able - # to change component classifications (function names starting with ``dec_``) - # or calculate values using information from multiple components - # (function names starting with ``calc_``) - # nodes includes: - # - `functionname`: :obj:`str` The name of the function to be called - # - `parameters`: :obj:`dict` Required parameters for the function - # The only parameter that is used in all functions is `decidecomps`, - # which are the component classifications the function should run on. - # Most ``dec_`` functions also include `ifTrue` and `ifFalse` which - # define how to to change the classification of a component if the - # criteria in the function is true or false. - # - # - `kwargs`: :obj:`dict` Optional parameters for the function - if tree in DEFAULT_TREES: fname = op.join(get_resource_path(), "decision_trees", tree + ".json") else: @@ -568,7 +536,7 @@ def to_files(self, io_generator): Parameters ---------- - io_generator: tedana.io.OutputGenerator + io_generator: :obj:`tedana.io.OutputGenerator` The output generator to use for filename generation and saving. """ io_generator.save_file(self.component_table, "ICA metrics tsv") From d09c97711361a86daadc9d9c9039c0e19068399f Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 1 Dec 2022 17:55:43 -0500 Subject: [PATCH 054/177] selection_nodes decision_docs updated --- tedana/selection/ComponentSelector.py | 4 +-- tedana/selection/selection_nodes.py | 50 ++++++++++----------------- 2 files changed, 20 insertions(+), 34 deletions(-) diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 3b5d72c2b..6ba83a464 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -240,7 +240,7 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab component_table : (C x M) :obj:`pandas.DataFrame` Component metric table. One row for each component, with a column for each metric; the index should be the component number - cross_component_metrics : :obj:`Dict` + cross_component_metrics : :obj:`dict` Metrics that are each a single value calculated across components. Default is empty status_table : :obj:`pandas.DataFrame` @@ -269,7 +269,7 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab should be passed through the initialization function. Dataset specific parameters that may need to be passed during initialization include: - n_echos : :obj:`int, optional` + n_echos : :obj:`int` Number of echos in multi-echo fMRI data. Required for kundu and minimal trees n_vols: :obj:`int` diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 58da8cc6b..e3e3c7e7b 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -33,44 +33,37 @@ ifTrue: :obj:`str` If the condition in this step is True, give the component this label. Use 'nochange' if no label changes are desired. - - ifFalse: :obj`str` + ifFalse: :obj:`str` If the condition in this step is False, give the component this label. Use 'nochange' to indicate if no label changes are desired. """, - # FIXME: missing default "decide_comps": """\ decide_comps: :obj:`str` or :obj:`list[str]` What classification(s) to operate on. Use 'all' to include all components.""", "log_extra_report": """\ log_extra_report: :obj:`str` - Additional text to place in the report log. Default "".""", + Additional text to the report log. Default "".""", "log_extra_info": """\ log_extra_info: :obj:`str` - Additional text to place in the information log. Default "".""", + Additional text to the information log. Default "".""", "only_used_metrics": """\ only_used_metrics: :obj:`bool` Whether to only report what metrics will be used when this is run. Default False.""", "custom_node_label": """\ custom_node_label: :obj:`str` - A short label to use in the table for this step. One is automatically - assigned by default. Default "".""", + A short label to describe what happens in this step. If "" then a label is + automatically generated. Default "".""", "tag_ifTrueFalse": """\ tag_ifTrue: :obj:`str` The classification tag to apply if a component is classified True. Default "". tag_ifFalse: :obj`str` The classification tag to apply if a component is classified False. Default "".""", "basicreturns": """\ -:obj:`tedana.selection.ComponentSelector`: The updated selector.""", - "extend_factor": """\ -extend_factor: :obj:`float` - A scalar used to set the threshold for the mean rank metric.""", - "restrict_factor": """\ -restrict_factor: :obj:`float` - A scalar used to set the threshold for the mean rank metric.""", - "prev_X_steps": """\ -prev_X_steps: :obj:`int` - The number of previous steps to search for a label in.""", +"selector" :obj:`tedana.selection.ComponentSelector` + If only_used_metrics is False, the updated selector is returned + "used_metrics" :obj:`set(str)`: + If only_used_metrics is True, the names of the metrics used in the + function are returned""", } _old_decision_docs = { @@ -129,7 +122,7 @@ classified as true or false. default=None """, "basicreturns": """\ -selector: :obj:`tedana.selection.ComponentSelector` +"selector": :obj:`tedana.selection.ComponentSelector` The key fields that will be changed in selector are the component classifications and tags in component_table or a new metric that is added to cross_component_metrics. The output field for the current @@ -147,12 +140,6 @@ A scaler used to set the threshold for the mean rank metric \ """, - "prev_X_steps": """\ -prev_X_steps: :obj:`int` - Search for components with a classification label in the current or the previous X steps in - the decision tree - \ - """, } @@ -329,22 +316,21 @@ def dec_left_op_right( left, right: :obj:`str` or :obj:`float` The labels for the two metrics to be used for comparision. - for example: left='kappa', right='rho' and op='>' means this + For example: left='kappa', right='rho' and op='>' means this function will test kappa>rho. One of the two can also be a number. - In that case a metric would be compared against a fixed threshold. + In that case, a metric would be compared against a fixed threshold. For example left='T2fitdiff_invsout_ICAmap_Tstat', right=0, and op='>' means this function will test T2fitdiff_invsout_ICAmap_Tstat>0 - left_scale, right_scale: :obj:`float`, optional + left_scale, right_scale: :obj:`float` or :obj:`str` Multiply the left or right metrics value by a constant. For example if left='kappa', right='rho', right_scale=2, and op='>' this tests - kappa>(2*rho). These also be a string that labels a value in + kappa>(2*rho). These can also be a string that is a value in cross_component_metrics, since those will resolve to a single value. This cannot be a label for a component_table column since that would output a different value for each component. default=1 - - op2: :obj:`str`, optional - left2, right2, left3, right3: :obj:`str` or :obj:`float`, optional - left2_scale, right2_scale, left3_scale, right3_scale: :obj:`float`, optional + op2: :obj:`str`, Default=None + left2, right2, left3, right3: :obj:`str` or :obj:`float`, Default=None + left2_scale, right2_scale, left3_scale, right3_scale: :obj:`float` or :obj:`str`, Default=1 This function can also be used to calculate the intersection of two or three boolean statements. If op2, left2, and right2 are defined then this function returns From 6dd25ff42277dcf98e3006cf9663b1d795c0cbe4 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 1 Dec 2022 21:58:48 -0500 Subject: [PATCH 055/177] selection_nodes docstrings cleaned up --- tedana/resources/decision_trees/kundu.json | 4 +- tedana/selection/selection_nodes.py | 251 +++++++-------------- tedana/selection/selection_utils.py | 2 +- 3 files changed, 86 insertions(+), 171 deletions(-) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 98244bb4d..a0f134ec1 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -191,7 +191,7 @@ "class_comp_exists": "provisionalaccept" }, "kwargs": { - "tag_ifTrue": "No provisional accept", + "tag": "No provisional accept", "log_extra_info": "If nothing is provisionally accepted by this point, be conservative and accept everything", "log_extra_report": "" }, @@ -303,7 +303,7 @@ "class_comp_exists": "unclassified" }, "kwargs": { - "tag_ifTrue": "Likely BOLD", + "tag": "Likely BOLD", "log_extra_info": "If nothing left is unclassified, then accept all", "log_extra_report": "" }, diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index e3e3c7e7b..be6d8926c 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -31,117 +31,44 @@ The selector to perform decision tree-based component selection with.""", "ifTrueFalse": """\ ifTrue: :obj:`str` - If the condition in this step is True, give the component this label. - Use 'nochange' if no label changes are desired. + If the condition in this step is True, give the component classification this + label. Use 'nochange' if no label changes are desired. ifFalse: :obj:`str` - If the condition in this step is False, give the component this label. - Use 'nochange' to indicate if no label changes are desired. + If the condition in this step is False, give the component classification this + label. Use 'nochange' to indicate if no label changes are desired. """, "decide_comps": """\ decide_comps: :obj:`str` or :obj:`list[str]` - What classification(s) to operate on. Use 'all' to include all components.""", + What classification(s) to operate on. using default or + intermediate_classification labels. For example: decide_comps='unclassified' + means to operate only on unclassified components. Use 'all' to include all + components.""", "log_extra_report": """\ log_extra_report: :obj:`str` - Additional text to the report log. Default "".""", + Additional text to the report log. Default="".""", "log_extra_info": """\ log_extra_info: :obj:`str` - Additional text to the information log. Default "".""", + Additional text to the information log. Default="".""", "only_used_metrics": """\ only_used_metrics: :obj:`bool` - Whether to only report what metrics will be used when this is run. Default False.""", + If True, only return the component_table metrics that would be used. Default=False.""", "custom_node_label": """\ custom_node_label: :obj:`str` A short label to describe what happens in this step. If "" then a label is - automatically generated. Default "".""", + automatically generated. Default="".""", "tag_ifTrueFalse": """\ tag_ifTrue: :obj:`str` - The classification tag to apply if a component is classified True. Default "". - tag_ifFalse: :obj`str` - The classification tag to apply if a component is classified False. Default "".""", + The classification tag to apply if a component is classified True. Default="". + tag_ifFalse: :obj:`str` + The classification tag to apply if a component is classified False. Default="".""", "basicreturns": """\ -"selector" :obj:`tedana.selection.ComponentSelector` +selector: :obj:`tedana.selection.ComponentSelector` If only_used_metrics is False, the updated selector is returned - "used_metrics" :obj:`set(str)`: + used_metrics: :obj:`set(str)` If only_used_metrics is True, the names of the metrics used in the function are returned""", } -_old_decision_docs = { - "selector": """\ -selector: :obj:`tedana.selection.ComponentSelector` - This structure contains most of the information needed to execute each - decision node function and to store the ouput of the function. The class - description has full details. Key elements include: component_table: - The metrics for each component, and the classification - labels and tags; cross_component_metrics: Values like the kappa and rho - elbows that are used to create decision criteria; nodes: Information on - the function calls for each step in the decision tree; and - current_node_idx: which is the ordered index for when a function is - called in the decision tree\ -""", - "ifTrueFalse": """\ -ifTrue, ifFalse: :obj:`str` - If the condition in this step is true or false, give the component - the label in this string. Options are 'accepted', 'rejected', - 'nochange', or intermediate_classification labels predefined in the - decision tree. If 'nochange' then don't change the current component - classification\ -""", - "decide_comps": """\ -decide_comps: :obj:`str` or :obj:`list[str]` - This is string or a list of strings describing what classifications - of components to operate on, using default or intermediate_classification - labels. For example: decide_comps='unclassified' means to operate only on - unclassified components. The label 'all' will operate on all components - regardess of classification.\ -""", - "log_extra": """\ -log_extra_report, log_extra_info: :obj:`str` - Text for each function call is automatically placed in the logger output - In addition to that text, the text in these these strings will also be - included in the logger with the report or info codes respectively. - These might be useful to give a narrative explanation of why a step was - parameterized a certain way. default="" (no extra logging)\ -""", - "only_used_metrics": """\ -only_used_metrics: :obj:`bool` - If true, this function will only return the names of the comptable metrics - that will be used when this function is fully run. default=False\ -""", - "custom_node_label": """\ -custom_node_label: :obj:`str` - A brief label for what happens in this node that can be used in a decision -tree summary table or flow chart. If custom_node_label is not empty, then the -text in this parameter is used instead of the text would be automatically -assigned within the function call default=""\ -""", - "tag_ifTrueFalse": """\ -tag_ifTrue, tag_ifFalse: :obj:`str` - A string containing a label in classification_tags that will be added to - the classification_tags column in component_table if a component is - classified as true or false. default=None -""", - "basicreturns": """\ -"selector": :obj:`tedana.selection.ComponentSelector` - The key fields that will be changed in selector are the component - classifications and tags in component_table or a new metric that is - added to cross_component_metrics. The output field for the current - node will also be updated to include relevant information including - the use_metrics of the node, and the numTrue and numFalse components - the call to the node's function.\ -""", - "extend_factor": """\ -extend_factor: :obj:`float` - A scaler used to set the threshold for the mean rank metric - \ - """, - "restrict_factor": """\ -restrict_factor: :obj:`float` - A scaler used to set the threshold for the mean rank metric - \ - """, -} - def manual_classify( selector, @@ -156,24 +83,23 @@ def manual_classify( dont_warn_reclassify=False, ): """ - Explicitly assign a classifictation, defined in new_classification, + Explicitly assign a classification, defined in new_classification, to all the components in decide_comps. Parameters ---------- {selector} {decide_comps} - new_classification: :obj: `str` + new_classification: :obj:`str` Assign all components identified in decide_comps the classification in new_classification. Options are 'unclassified', 'accepted', 'rejected', or intermediate_classification labels predefined in the decision tree - clear_classification_tags: :obj: `bool` + clear_classification_tags: :obj:`bool` If True, reset all values in the 'classification_tags' column to empty strings. This also can create the classification_tags column if it - does not already exist - If False, do nothing. - tag: :obj: `str` + does not already exist. If False, do nothing. + tag: :obj:`str` A classification tag to assign to all components being reclassified. This should be one of the tags defined by classification_tags in the decision tree specification @@ -182,7 +108,7 @@ def manual_classify( rejected to something else, it gives a warning, since those should be terminal classifications. If this is True, that warning is suppressed. (Useful if manual_classify is used to reset all labels to unclassified). - default=False + Default=False {log_extra_info} {log_extra_report} {custom_node_label} @@ -196,13 +122,11 @@ def manual_classify( Note ---- This was designed with three use - cases in mind: - 1. Set the classifications of all components to unclassified for the first - node of a decision tree. clear_classification_tags=True is recommended for - this use case - 2. Shift all components between classifications, such as provisionalaccept - to accepted for the penultimate node in the decision tree. - 3. Manually re-classify components by number based on user observations. + cases in mind: (1) Set the classifications of all components to unclassified + for the first node of a decision tree. clear_classification_tags=True is + recommended for this use case. (2) Shift all components between classifications, + such as provisionalaccept to accepted for the penultimate node in the decision tree. + (3) Manually re-classify components by number based on user observations. Unlike other decision node functions, ifTrue and ifFalse are not inputs since the same classification is assigned to all components listed in @@ -327,7 +251,7 @@ def dec_left_op_right( kappa>(2*rho). These can also be a string that is a value in cross_component_metrics, since those will resolve to a single value. This cannot be a label for a component_table column since that would - output a different value for each component. default=1 + output a different value for each component. Default=1 op2: :obj:`str`, Default=None left2, right2, left3, right3: :obj:`str` or :obj:`float`, Default=None left2_scale, right2_scale, left3_scale, right3_scale: :obj:`float` or :obj:`str`, Default=1 @@ -352,13 +276,13 @@ def dec_left_op_right( This function is ideally run with one boolean statement at a time so that the result of each boolean is logged. For example, it's better to test kappa>kappa_elbow and rho>rho_elbow with two separate calls to this function - so that the results of each can be easily viewed. That said, particularly for + so that the results of each test can be easily viewed. That said, particularly for the original kundu decision tree, if you're making decisions on components with various classifications based on multiple boolean statements, the decision tree becomes really messy and the added functionality here is useful. Combinations of boolean statements only test with "and" and not "or". This is - an intentional decision because, if a classification changes if A or B are true - then the results of each should be logged separately + an intentional decision because, if a classification changes if A>B or C>D are true + then A>B and C>D should be logged separately """ # predefine all outputs that should be logged @@ -610,10 +534,11 @@ def dec_variance_lessthan_thresholds( tag_ifFalse=None, ): """ - Finds components with variancerho_elbow are more likely rejected). The liberal threshold option takes the max of the two - elbows based on rho values. The assumption is that the thrshold on + elbows based on rho values. The assumption is that the threshold on unclassified components is always lower and can likely be excluded. Both rho elbows are now logged so that it will be possible to confirm this with data & make additional adjustments to this threshold From 6e597ae65fa05aa15a62ab8eebf3dbd730e439d4 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 1 Dec 2022 22:10:56 -0500 Subject: [PATCH 056/177] Fixed a test for selection_nodes --- tedana/tests/test_selection_nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index bea20ea62..e5e05c1c1 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -671,7 +671,7 @@ def test_dec_classification_doesnt_exist_smoke(): log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", - tag_ifTrue="test true tag", + tag="test true tag", ) assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 # Lists the number of components in decide_comps in numFalse @@ -705,7 +705,7 @@ def test_dec_classification_doesnt_exist_smoke(): "changed accepted", decide_comps, class_comp_exists="provisional reject", - tag_ifTrue="test true tag", + tag="test true tag", ) assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 17 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 From e13d680e5ad1a97704baf5fa8a0715c233b28124 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Fri, 2 Dec 2022 14:35:50 -0500 Subject: [PATCH 057/177] Updated faq for tedana_reclassify and tree options --- docs/contributing.rst | 2 +- docs/faq.rst | 56 +++++++++++++++++++++++++-- tedana/selection/ComponentSelector.py | 4 +- 3 files changed, 56 insertions(+), 6 deletions(-) diff --git a/docs/contributing.rst b/docs/contributing.rst index 13fd5e25d..18d0e5ce0 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -125,7 +125,7 @@ this switch, but it also means that exactly reproducing previous MEICA analyses The other reason is that the core developers have chosen to look forwards rather than maintaining an older code base. -As described in the :ref:`governance` section, ``tedana`` is maintained by a small team of +As described in the `governance`_ section, ``tedana`` is maintained by a small team of volunteers with limited development time. If you'd like to use MEICA as has been previously published the code is available on `bitbucket`_ and freely available under a LGPL2 license. diff --git a/docs/faq.rst b/docs/faq.rst index 72ef2b439..d267b60cc 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -76,12 +76,62 @@ applying tedana, and you encounter this problem, please submit a question to `Ne ******************************************************************************** -[tedana] I think that some BOLD ICA components have been misclassified as noise. +[tedana] Can I manually reclassify components? ******************************************************************************** -``tedana`` allows users to manually specify accepted components when calling the pipeline. -You can use the ``--manacc`` argument to specify the indices of components to accept. +``tedana_reclassify`` allows users to manually alter component classifications. +This can both be used as a command line tool or as part of other interactive +programs, such as `RICA`_. RICA creates a graphical interface that is similar to +the build-in tedana reports that lets users interactively change component +classifications. Both programs will log which component classifications were +manually altered. If one wants to retain the original denoised time series, +make sure to output the denoised time series into a separate directory. +.. _RICA: https://github.com/ME-ICA/rica + +************************************************************************************* +[tedana] What is the difference between the kundu and minimal decision trees? +************************************************************************************* + +The decision tree is the series of conditions through which each component is +classified as accepted or rejected. The currently default kundu tree (`--tree kundu`) +was used in Prantik Kundu's MEICA v2.7 is the classification process that has long +been used by ``tedana`` and users have been generally content with the results. The +kundu tree used multiple intersecting metrics and rankings classify components. +How these steps may interact on specific datasets is opaque. While there is a kappa +(T2*-weighted) elbow threshold and a rho (S0-weighted) elbow threshold, as discussed +in publications, no component is accepted or rejected because of those thresholds. +Users sometimes notice rejected components that clearly should been accepted. For +example, a component that included a clear T2*-weighted V1 response to a block design +flashing checkerboard was sometimes rejected because the relatively large variance of +that component interacted with a rejection criterion. + +The minimal tree (`--tree minimal`) is designed to be easier to understand and less +likely to reject T2* weighted components. There are a few other critiera, but components +with `kappa>kappa elbow` and `rho Date: Fri, 2 Dec 2022 15:34:59 -0500 Subject: [PATCH 058/177] docstrings in tedica and other small updates --- docs/index.rst | 2 +- tedana/selection/selection_nodes.py | 2 +- tedana/selection/tedica.py | 35 ++++++++++++++++------------- 3 files changed, 21 insertions(+), 18 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index fa3f168ff..d698c21ed 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -175,9 +175,9 @@ tedana is licensed under GNU Lesser General Public License version 2.1. multi-echo usage approach - building decision trees outputs faq + building decision trees support contributing roadmap diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index be6d8926c..48cfbe5af 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -64,7 +64,7 @@ "basicreturns": """\ selector: :obj:`tedana.selection.ComponentSelector` If only_used_metrics is False, the updated selector is returned - used_metrics: :obj:`set(str)` + used_metrics: :obj:`set(str)` If only_used_metrics is True, the names of the metrics used in the function are returned""", } diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 991b37742..7631f6f4e 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -82,32 +82,30 @@ def manual_selection(comptable, acc=None, rej=None): return comptable, metric_metadata -def automatic_selection(comptable, n_echos, n_vols, tree="minimal"): - """Classify components based on component table and tree type. +def automatic_selection(component_table, n_echos, n_vols, tree="minimal"): + """Classify components based on component table and decision tree type. Parameters ---------- - comptable: pd.DataFrame + component_table: :obj:`pd.DataFrame` The component table to classify - n_echos: int + n_echos: :obj:`int` The number of echoes in this dataset - tree: str - The type of tree to use for the ComponentSelector object + tree: :obj:`str` + The type of tree to use for the ComponentSelector object. Default="minimal" Returns ------- - A dataframe of the component table, after classification and reorder - The metadata associated with the component table - See Also - -------- - ComponentSelector, the class used to represent the classification process + selector: :obj:`tedana.selection.ComponentSelector` + Contains component classifications in a component_table and provenance + and metadata from the component selection process + Notes ----- - The selection algorithm used in this function was originated in ME-ICA + If tree=kundu, the selection algorithm used in this function was originated in ME-ICA by Prantik Kundu, and his original implementation is available at: - https://github.com/ME-ICA/me-ica/blob/\ - b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py + https://github.com/ME-ICA/me-ica/blob/b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py The appropriate citation is :footcite:t:`kundu2013integrated`. @@ -119,9 +117,14 @@ def automatic_selection(comptable, n_echos, n_vols, tree="minimal"): components, a hypercommented version of this attempt is available at: https://gist.github.com/emdupre/ca92d52d345d08ee85e104093b81482e + If tree==minimal, the selection algorithm based on the kundu tree with differences + described in the `FAQ`_ + References ---------- .. footbibliography:: + + .. _FAQ: faq.html """ LGR.info("Performing ICA component selection with Kundu decision tree v2.5") RepLGR.info( @@ -131,12 +134,12 @@ def automatic_selection(comptable, n_echos, n_vols, tree="minimal"): "decision tree (v2.5) \\citep{kundu2013integrated}." ) - comptable["classification_tags"] = "" + component_table["classification_tags"] = "" xcomp = { "n_echos": n_echos, "n_vols": n_vols, } - selector = ComponentSelector(tree, comptable, cross_component_metrics=xcomp) + selector = ComponentSelector(tree, component_table, cross_component_metrics=xcomp) selector.select() selector.metadata = collect.get_metadata(selector.component_table) From 728ceb33490faeef05973e8696f590cc15d5bd30 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Mon, 5 Dec 2022 12:00:21 -0500 Subject: [PATCH 059/177] Updated docstrings in selection_utils.py --- docs/faq.rst | 2 +- tedana/selection/selection_utils.py | 42 ++++++++++++++++++----------- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/docs/faq.rst b/docs/faq.rst index d267b60cc..4a9f71f94 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -94,7 +94,7 @@ make sure to output the denoised time series into a separate directory. ************************************************************************************* The decision tree is the series of conditions through which each component is -classified as accepted or rejected. The currently default kundu tree (`--tree kundu`) +classified as accepted or rejected. The kundu tree (`--tree kundu`) was used in Prantik Kundu's MEICA v2.7 is the classification process that has long been used by ``tedana`` and users have been generally content with the results. The kundu tree used multiple intersecting metrics and rankings classify components. diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 36260f4a1..7be51c341 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -19,7 +19,7 @@ def selectcomps2use(selector, decide_comps): """ - Give a list of component numbers that fit the classification types in + Get a list of component numbers that fit the classification types in decide_comps. Parameters @@ -37,7 +37,7 @@ def selectcomps2use(selector, decide_comps): Returns ------- comps2use: :obj:`list[int]` - A list of component indices that should be used by a function + A list of component indices with classifications included in decide_comps """ if "classification" not in selector.component_table: @@ -99,8 +99,8 @@ def change_comptable_classifications( dont_warn_reclassify=False, ): """ - Given information on whether a decision critereon is true or false for each component - change or don't change the component classification + Given information on whether a decision critereon is true or false for each + component, change or don't change the component classification Parameters ---------- @@ -223,6 +223,14 @@ def comptable_classification_changer( be separated by commas. If a classification is changed away from accepted or rejected and dont_warn_reclassify is False, then a warning is logged + + Note + ---- + This is designed to be run by + `tedana.selection.selection_utils.change_comptable_classifications`. + This function is run twice, ones for changes to make of a component is + True and again for components that are False. + """ if classify_if != "nochange": changeidx = decision_boolean.index[np.asarray(decision_boolean) == boolstate] @@ -319,9 +327,10 @@ def confirm_metrics_exist(component_table, necessary_metrics, function_name=None Parameters ---------- component_table : (C x M) :obj:`pandas.DataFrame` - Component metric table. One row for each component, with a column for - each metric. The index should be the component number. - necessary_metrics : :obj:`set` a set of strings of metrics + Component metric table. One row for each component, with a column for + each metric. The index should be the component number. + necessary_metrics : :obj:`set` + A set of strings of metric names function_name : :obj:`str` Text identifying the function name that called this function @@ -335,8 +344,8 @@ def confirm_metrics_exist(component_table, necessary_metrics, function_name=None Note ----- This doesn't check if there are data in each metric's column, just that - the columns exist. Also, this requires identical strings for the names - of the metrics in necessary_metrics and the column labels in component_table + the columns exist. Also, the string in `necessary_metrics` and the + column labels in component_table will only be matched if they're identical. """ missing_metrics = necessary_metrics - set(component_table.columns) @@ -367,7 +376,7 @@ def log_decision_tree_step( calc_outputs=None, ): """ - Logging text to add for every decision tree calculation + Logging text to add after every decision tree calculation Parameters ---------- @@ -383,7 +392,7 @@ def log_decision_tree_step( need to use the component_table and may not require selecting components. For those functions, set comps2use==-1 to avoid logging a warning that no components were found. Currently, - this is only used by calc_extend_factor + this is only used by `calc_extend_factor` decide_comps: :obj:`str` or :obj:`list[str]` or :obj:`list[int]` This is string or a list of strings describing what classifications @@ -404,9 +413,9 @@ def log_decision_tree_step( Returns ------- - Information is added to the LGR.info logger. This either logs that - nothing was changed, the number of components classified as true or - false and what they changed to, or the cross component metrics that were + Information is added to the LGR.info logger. This either logs that \ + nothing was changed, the number of components classified as true or \ + false and what they changed to, or the cross component metrics that were \ calculated """ @@ -450,7 +459,7 @@ def log_classification_counts(decision_node_idx, component_table): Returns ------- - The info logger will add a line like: + The LGR.info logger will add a line like: \ 'Step 4: Total component classifications: 10 accepted, 5 provisionalreject, 8 rejected' """ @@ -780,7 +789,8 @@ def rho_elbow_kundu_liberal( def get_extend_factor(n_vols=None, extend_factor=None): """ - extend_factor is a scaler used to set a threshold for the d_table_score + extend_factor is a scaler used to set a threshold for the d_table_score in + the kundu decision tree. It is either defined by the number of volumes in the time series or directly defined by the user. If it is defined by the user, that takes precedence over using the number of volumes in a calculation From fe9ff446e22792d0ac2a7fe85d5e38d143f32781 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 5 Dec 2022 13:37:24 -0500 Subject: [PATCH 060/177] Update docs/output_file_descriptions.rst --- docs/output_file_descriptions.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/output_file_descriptions.rst b/docs/output_file_descriptions.rst index ab192ee35..ff7c934ca 100644 --- a/docs/output_file_descriptions.rst +++ b/docs/output_file_descriptions.rst @@ -4,7 +4,7 @@ Output file name descriptions tedana allows for multiple file naming conventions. The key labels and naming options for each convention that can be set using the `--convention` option are in `outputs.json`_. -The output of `tedana` also includes a file called `registery.json` or +The output of `tedana` also includes a file called `registry.json` or `desc-tedana_registry.json` that includes the keys and the matching file names for the output. The table below lists both these keys and the default "BIDS Derivatives" file names. From a6623557612253db5765541120168cb118744d4b Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Mon, 5 Dec 2022 13:41:16 -0500 Subject: [PATCH 061/177] Working on improving selector documentation (#18) * working on selector init documentation * Breaking up outputs.rst * partially updated output_file_descriptions.rst * changed n_bold_comps to n_accepted_comps * n_bold_comps to n_accepted_comps * ComponentSelector.py API docs cleaned up * selection_nodes decision_docs updated * selection_nodes docstrings cleaned up * Fixed a test for selection_nodes * Updated faq for tedana_reclassify and tree options * docstrings in tedica and other small updates * Updated docstrings in selection_utils.py * Update docs/output_file_descriptions.rst Co-authored-by: Joshua Teves --- docs/component_table_descriptions.rst | 57 ++++ docs/contributing.rst | 2 +- docs/faq.rst | 56 +++- docs/index.rst | 4 +- docs/output_file_descriptions.rst | 139 ++++++++++ docs/outputs.rst | 195 +------------- tedana/resources/decision_trees/kundu.json | 4 +- tedana/selection/ComponentSelector.py | 212 +++++++-------- tedana/selection/selection_nodes.py | 287 +++++++-------------- tedana/selection/selection_utils.py | 44 ++-- tedana/selection/tedica.py | 35 +-- tedana/tests/test_selection_nodes.py | 4 +- tedana/workflows/tedana.py | 8 +- tedana/workflows/tedana_reclassify.py | 2 +- 14 files changed, 501 insertions(+), 548 deletions(-) create mode 100644 docs/component_table_descriptions.rst create mode 100644 docs/output_file_descriptions.rst diff --git a/docs/component_table_descriptions.rst b/docs/component_table_descriptions.rst new file mode 100644 index 000000000..b75169dd1 --- /dev/null +++ b/docs/component_table_descriptions.rst @@ -0,0 +1,57 @@ +############################# +Component table descriptions +############################# + + +In order to make sense of the rationale codes in the component tables, +consult the tables below. +TEDPCA rationale codes start with a "P", while TEDICA codes start with an "I". + +=============== ============================================================= +Classification Description +=============== ============================================================= +accepted BOLD-like components included in denoised and high-Kappa data +rejected Non-BOLD components excluded from denoised and high-Kappa data +ignored Low-variance components included in denoised, but excluded + from high-Kappa data +=============== ============================================================= + + +TEDPCA codes +============ + +===== =============== ======================================================== +Code Classification Description +===== =============== ======================================================== +P001 rejected Low Rho, Kappa, and variance explained +P002 rejected Low variance explained +P003 rejected Kappa equals fmax +P004 rejected Rho equals fmax +P005 rejected Cumulative variance explained above 95% (only in + stabilized PCA decision tree) +P006 rejected Kappa below fmin (only in stabilized PCA decision tree) +P007 rejected Rho below fmin (only in stabilized PCA decision tree) +===== =============== ======================================================== + + +TEDICA codes +============ + +===== ================= ======================================================== +Code Classification Description +===== ================= ======================================================== +I001 rejected|accepted Manual classification +I002 rejected Rho greater than Kappa +I003 rejected More significant voxels in S0 model than R2 model +I004 rejected S0 Dice is higher than R2 Dice and high variance + explained +I005 rejected Noise F-value is higher than signal F-value and high + variance explained +I006 ignored No good components found +I007 rejected Mid-Kappa component +I008 ignored Low variance explained +I009 rejected Mid-Kappa artifact type A +I010 rejected Mid-Kappa artifact type B +I011 ignored ign_add0 +I012 ignored ign_add1 +===== ================= ======================================================== diff --git a/docs/contributing.rst b/docs/contributing.rst index 13fd5e25d..18d0e5ce0 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -125,7 +125,7 @@ this switch, but it also means that exactly reproducing previous MEICA analyses The other reason is that the core developers have chosen to look forwards rather than maintaining an older code base. -As described in the :ref:`governance` section, ``tedana`` is maintained by a small team of +As described in the `governance`_ section, ``tedana`` is maintained by a small team of volunteers with limited development time. If you'd like to use MEICA as has been previously published the code is available on `bitbucket`_ and freely available under a LGPL2 license. diff --git a/docs/faq.rst b/docs/faq.rst index 72ef2b439..4a9f71f94 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -76,12 +76,62 @@ applying tedana, and you encounter this problem, please submit a question to `Ne ******************************************************************************** -[tedana] I think that some BOLD ICA components have been misclassified as noise. +[tedana] Can I manually reclassify components? ******************************************************************************** -``tedana`` allows users to manually specify accepted components when calling the pipeline. -You can use the ``--manacc`` argument to specify the indices of components to accept. +``tedana_reclassify`` allows users to manually alter component classifications. +This can both be used as a command line tool or as part of other interactive +programs, such as `RICA`_. RICA creates a graphical interface that is similar to +the build-in tedana reports that lets users interactively change component +classifications. Both programs will log which component classifications were +manually altered. If one wants to retain the original denoised time series, +make sure to output the denoised time series into a separate directory. +.. _RICA: https://github.com/ME-ICA/rica + +************************************************************************************* +[tedana] What is the difference between the kundu and minimal decision trees? +************************************************************************************* + +The decision tree is the series of conditions through which each component is +classified as accepted or rejected. The kundu tree (`--tree kundu`) +was used in Prantik Kundu's MEICA v2.7 is the classification process that has long +been used by ``tedana`` and users have been generally content with the results. The +kundu tree used multiple intersecting metrics and rankings classify components. +How these steps may interact on specific datasets is opaque. While there is a kappa +(T2*-weighted) elbow threshold and a rho (S0-weighted) elbow threshold, as discussed +in publications, no component is accepted or rejected because of those thresholds. +Users sometimes notice rejected components that clearly should been accepted. For +example, a component that included a clear T2*-weighted V1 response to a block design +flashing checkerboard was sometimes rejected because the relatively large variance of +that component interacted with a rejection criterion. + +The minimal tree (`--tree minimal`) is designed to be easier to understand and less +likely to reject T2* weighted components. There are a few other critiera, but components +with `kappa>kappa elbow` and `rhorho. One of the two can also be a number. - In that case a metric would be compared against a fixed threshold. + In that case, a metric would be compared against a fixed threshold. For example left='T2fitdiff_invsout_ICAmap_Tstat', right=0, and op='>' means this function will test T2fitdiff_invsout_ICAmap_Tstat>0 - left_scale, right_scale: :obj:`float`, optional + left_scale, right_scale: :obj:`float` or :obj:`str` Multiply the left or right metrics value by a constant. For example if left='kappa', right='rho', right_scale=2, and op='>' this tests - kappa>(2*rho). These also be a string that labels a value in + kappa>(2*rho). These can also be a string that is a value in cross_component_metrics, since those will resolve to a single value. This cannot be a label for a component_table column since that would - output a different value for each component. default=1 - - op2: :obj:`str`, optional - left2, right2, left3, right3: :obj:`str` or :obj:`float`, optional - left2_scale, right2_scale, left3_scale, right3_scale: :obj:`float`, optional + output a different value for each component. Default=1 + op2: :obj:`str`, Default=None + left2, right2, left3, right3: :obj:`str` or :obj:`float`, Default=None + left2_scale, right2_scale, left3_scale, right3_scale: :obj:`float` or :obj:`str`, Default=1 This function can also be used to calculate the intersection of two or three boolean statements. If op2, left2, and right2 are defined then this function returns @@ -366,13 +276,13 @@ def dec_left_op_right( This function is ideally run with one boolean statement at a time so that the result of each boolean is logged. For example, it's better to test kappa>kappa_elbow and rho>rho_elbow with two separate calls to this function - so that the results of each can be easily viewed. That said, particularly for + so that the results of each test can be easily viewed. That said, particularly for the original kundu decision tree, if you're making decisions on components with various classifications based on multiple boolean statements, the decision tree becomes really messy and the added functionality here is useful. Combinations of boolean statements only test with "and" and not "or". This is - an intentional decision because, if a classification changes if A or B are true - then the results of each should be logged separately + an intentional decision because, if a classification changes if A>B or C>D are true + then A>B and C>D should be logged separately """ # predefine all outputs that should be logged @@ -624,10 +534,11 @@ def dec_variance_lessthan_thresholds( tag_ifFalse=None, ): """ - Finds components with variancerho_elbow are more likely rejected). The liberal threshold option takes the max of the two - elbows based on rho values. The assumption is that the thrshold on + elbows based on rho values. The assumption is that the threshold on unclassified components is always lower and can likely be excluded. Both rho elbows are now logged so that it will be possible to confirm this with data & make additional adjustments to this threshold @@ -780,7 +789,8 @@ def rho_elbow_kundu_liberal( def get_extend_factor(n_vols=None, extend_factor=None): """ - extend_factor is a scaler used to set a threshold for the d_table_score + extend_factor is a scaler used to set a threshold for the d_table_score in + the kundu decision tree. It is either defined by the number of volumes in the time series or directly defined by the user. If it is defined by the user, that takes precedence over using the number of volumes in a calculation diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 991b37742..7631f6f4e 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -82,32 +82,30 @@ def manual_selection(comptable, acc=None, rej=None): return comptable, metric_metadata -def automatic_selection(comptable, n_echos, n_vols, tree="minimal"): - """Classify components based on component table and tree type. +def automatic_selection(component_table, n_echos, n_vols, tree="minimal"): + """Classify components based on component table and decision tree type. Parameters ---------- - comptable: pd.DataFrame + component_table: :obj:`pd.DataFrame` The component table to classify - n_echos: int + n_echos: :obj:`int` The number of echoes in this dataset - tree: str - The type of tree to use for the ComponentSelector object + tree: :obj:`str` + The type of tree to use for the ComponentSelector object. Default="minimal" Returns ------- - A dataframe of the component table, after classification and reorder - The metadata associated with the component table - See Also - -------- - ComponentSelector, the class used to represent the classification process + selector: :obj:`tedana.selection.ComponentSelector` + Contains component classifications in a component_table and provenance + and metadata from the component selection process + Notes ----- - The selection algorithm used in this function was originated in ME-ICA + If tree=kundu, the selection algorithm used in this function was originated in ME-ICA by Prantik Kundu, and his original implementation is available at: - https://github.com/ME-ICA/me-ica/blob/\ - b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py + https://github.com/ME-ICA/me-ica/blob/b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py The appropriate citation is :footcite:t:`kundu2013integrated`. @@ -119,9 +117,14 @@ def automatic_selection(comptable, n_echos, n_vols, tree="minimal"): components, a hypercommented version of this attempt is available at: https://gist.github.com/emdupre/ca92d52d345d08ee85e104093b81482e + If tree==minimal, the selection algorithm based on the kundu tree with differences + described in the `FAQ`_ + References ---------- .. footbibliography:: + + .. _FAQ: faq.html """ LGR.info("Performing ICA component selection with Kundu decision tree v2.5") RepLGR.info( @@ -131,12 +134,12 @@ def automatic_selection(comptable, n_echos, n_vols, tree="minimal"): "decision tree (v2.5) \\citep{kundu2013integrated}." ) - comptable["classification_tags"] = "" + component_table["classification_tags"] = "" xcomp = { "n_echos": n_echos, "n_vols": n_vols, } - selector = ComponentSelector(tree, comptable, cross_component_metrics=xcomp) + selector = ComponentSelector(tree, component_table, cross_component_metrics=xcomp) selector.select() selector.metadata = collect.get_metadata(selector.component_table) diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index bea20ea62..e5e05c1c1 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -671,7 +671,7 @@ def test_dec_classification_doesnt_exist_smoke(): log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", - tag_ifTrue="test true tag", + tag="test true tag", ) assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 # Lists the number of components in decide_comps in numFalse @@ -705,7 +705,7 @@ def test_dec_classification_doesnt_exist_smoke(): "changed accepted", decide_comps, class_comp_exists="provisional reject", - tag_ifTrue="test true tag", + tag="test true tag", ) assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 17 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 434b822cb..67084fd6a 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -649,10 +649,10 @@ def tedana_workflow( metrics=required_metrics, ) ica_selection = selection.automatic_selection(comptable, n_echos, n_vols, tree=tree) - n_bold_comps = ica_selection.n_bold_comps - if (n_restarts < maxrestart) and (n_bold_comps == 0): + n_accepted_comps = ica_selection.n_accepted_comps + if (n_restarts < maxrestart) and (n_accepted_comps == 0): LGR.warning("No BOLD components found. Re-attempting ICA.") - elif n_bold_comps == 0: + elif n_accepted_comps == 0: LGR.warning("No BOLD components found, but maximum number of restarts reached.") keep_restarting = False else: @@ -723,7 +723,7 @@ def tedana_workflow( } io_generator.save_file(decomp_metadata, "ICA decomposition json") - if ica_selection.n_bold_comps == 0: + if ica_selection.n_accepted_comps == 0: LGR.warning("No BOLD components detected! Please check data and results!") # TODO: un-hack separate comptable diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index af777f669..36e83394e 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -314,7 +314,7 @@ def post_tedana( # Save component selector and tree selector.to_files(io_generator) - if selector.n_bold_comps == 0: + if selector.n_accepted_comps == 0: LGR.warning("No BOLD components detected! Please check data and results!") mmix_orig = mmix.copy() From f7ee8db3321f9ce254aa9c265639e7a2517d3214 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 5 Dec 2022 13:55:54 -0500 Subject: [PATCH 062/177] Remove manual selection --- tedana/selection/__init__.py | 4 +- tedana/selection/tedica.py | 72 ---------------------------------- tedana/tests/test_selection.py | 34 ---------------- 3 files changed, 2 insertions(+), 108 deletions(-) delete mode 100644 tedana/tests/test_selection.py diff --git a/tedana/selection/__init__.py b/tedana/selection/__init__.py index 6fb7795af..8a2f3dc5f 100644 --- a/tedana/selection/__init__.py +++ b/tedana/selection/__init__.py @@ -1,7 +1,7 @@ # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: -from .tedica import automatic_selection, manual_selection +from .tedica import automatic_selection from .tedpca import kundu_tedpca -__all__ = ["kundu_tedpca", "kundu_selection_v2", "manual_selection"] +__all__ = ["kundu_tedpca", "automatic_selection"] diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 7631f6f4e..0c57a43a3 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -3,85 +3,13 @@ """ import logging -import numpy as np - from tedana.metrics import collect from tedana.selection.ComponentSelector import ComponentSelector -from tedana.selection.selection_utils import clean_dataframe LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") -def manual_selection(comptable, acc=None, rej=None): - """ - Perform manual selection of components. - - Parameters - ---------- - comptable : (C x M) :obj:`pandas.DataFrame` - Component metric table, where `C` is components and `M` is metrics - acc : :obj:`list`, optional - List of accepted components. Default is None. - rej : :obj:`list`, optional - List of rejected components. Default is None. - - Returns - ------- - comptable : (C x M) :obj:`pandas.DataFrame` - Component metric table with classification. - metric_metadata : :obj:`dict` - Dictionary with metadata about calculated metrics. - Each entry corresponds to a column in ``comptable``. - """ - LGR.info("Performing manual ICA component selection") - RepLGR.info( - "Next, components were manually classified as " - "BOLD (TE-dependent), non-BOLD (TE-independent), or " - "uncertain (low-variance)." - ) - # NOTE: during a merge conflict this got split oddly in a diff - # Please pay attention to this part to make sure it makes sense - if ( - "classification" in comptable.columns - and "original_classification" not in comptable.columns - ): - comptable["original_classification"] = comptable["classification"] - # comptable["original_rationale"] = comptable["rationale"] - - # comptable["rationale"] = "" - - all_comps = comptable.index.values - if acc is not None: - acc = [int(comp) for comp in acc] - - if rej is not None: - rej = [int(comp) for comp in rej] - - if acc is not None and rej is None: - rej = sorted(np.setdiff1d(all_comps, acc)) - elif acc is None and rej is not None: - acc = sorted(np.setdiff1d(all_comps, rej)) - elif acc is None and rej is None: - LGR.info("No manually accepted or rejected components supplied. Accepting all components.") - # Accept all components if no manual selection provided - acc = all_comps[:] - rej = [] - - ign = np.setdiff1d(all_comps, np.union1d(acc, rej)) - comptable.loc[acc, "classification"] = "accepted" - comptable.loc[rej, "classification"] = "rejected" - # TODO Need to fix classification_tags here to better interact with any previous tags - # comptable.loc[rej, "classification_tags"] += "Manual" - comptable.loc[ign, "classification"] = "ignored" - # comptable.loc[ign, "classification_tags"] += "Manual" - - # Move decision columns to end - comptable = clean_dataframe(comptable) - metric_metadata = collect.get_metadata(comptable) - return comptable, metric_metadata - - def automatic_selection(component_table, n_echos, n_vols, tree="minimal"): """Classify components based on component table and decision tree type. diff --git a/tedana/tests/test_selection.py b/tedana/tests/test_selection.py deleted file mode 100644 index 8bed1eb19..000000000 --- a/tedana/tests/test_selection.py +++ /dev/null @@ -1,34 +0,0 @@ -""" -Tests for tedana.selection -""" - -import numpy as np -import pandas as pd - -from tedana import selection - - -def test_manual_selection(): - """ - Check that manual_selection runs correctly for different combinations of - accepted and rejected components. - """ - comptable = pd.DataFrame(index=np.arange(100)) - comptable, metric_metadata = selection.manual_selection(comptable, acc=[1, 3, 5]) - assert comptable.loc[comptable.classification == "accepted"].shape[0] == 3 - assert comptable.loc[comptable.classification == "rejected"].shape[0] == ( - comptable.shape[0] - 3 - ) - - comptable, metric_metadata = selection.manual_selection(comptable, rej=[1, 3, 5]) - assert comptable.loc[comptable.classification == "rejected"].shape[0] == 3 - assert comptable.loc[comptable.classification == "accepted"].shape[0] == ( - comptable.shape[0] - 3 - ) - - comptable, metric_metadata = selection.manual_selection( - comptable, acc=[0, 2, 4], rej=[1, 3, 5] - ) - assert comptable.loc[comptable.classification == "accepted"].shape[0] == 3 - assert comptable.loc[comptable.classification == "rejected"].shape[0] == 3 - assert comptable.loc[comptable.classification == "ignored"].shape[0] == comptable.shape[0] - 6 From 4fc656f1d4aebc8d28c093347d6d08bf217f0edc Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 5 Dec 2022 13:56:11 -0500 Subject: [PATCH 063/177] Force user to pick a tree --- Makefile | 2 +- tedana/workflows/tedana.py | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index b26334242..d7cc34e9c 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,8 @@ help: @echo " all_tests to run 'lint', 'unittest', and 'integration'" lint: - @flake8 tedana @black --check --diff tedana + @flake8 tedana unittest: @py.test --skipintegration --cov-append --cov-report term-missing --cov=tedana tedana/ diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 5338a4c83..e676c8131 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -77,6 +77,17 @@ def _get_parser(): help="Echo times (in ms). E.g., 15.0 39.0 63.0", required=True, ) + required.add_argument( + "-t", + "--tree", + dest="tree", + help=( + "Decision tree to use. You may use a " + "packaged tree (kundu, minimal) or supply a JSON " + "file which matches the decision tree file " + "specification." + ), + ) optional.add_argument( "--out-dir", dest="out_dir", @@ -153,17 +164,6 @@ def _get_parser(): ), default="aic", ) - optional.add_argument( - "--tree", - dest="tree", - help=( - "Decision tree to use. You may use a " - "packaged tree (kundu, minimal) or supply a JSON " - "file which matches the decision tree file " - "specification." - ), - default="minimal", - ) optional.add_argument( "--seed", dest="fixed_seed", From 40383369d2c41a189a21f6bb0e522e88b6bfa0fe Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 5 Dec 2022 14:36:17 -0500 Subject: [PATCH 064/177] Fix CLI test --- tedana/tests/test_integration.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 5b16f075f..8593b408b 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -194,6 +194,8 @@ def test_integration_three_echo(skip_integration): "14.5", "38.5", "62.5", + "-t", + "minimal", "--out-dir", out_dir_manual, "--debug", From ca578259b1e9672cd02458fe2e7302afff63037d Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 5 Dec 2022 15:08:10 -0500 Subject: [PATCH 065/177] Revert "Force user to pick a tree" This reverts commit 4fc656f1d4aebc8d28c093347d6d08bf217f0edc. --- Makefile | 2 +- tedana/workflows/tedana.py | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index d7cc34e9c..b26334242 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,8 @@ help: @echo " all_tests to run 'lint', 'unittest', and 'integration'" lint: - @black --check --diff tedana @flake8 tedana + @black --check --diff tedana unittest: @py.test --skipintegration --cov-append --cov-report term-missing --cov=tedana tedana/ diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index e676c8131..5338a4c83 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -77,17 +77,6 @@ def _get_parser(): help="Echo times (in ms). E.g., 15.0 39.0 63.0", required=True, ) - required.add_argument( - "-t", - "--tree", - dest="tree", - help=( - "Decision tree to use. You may use a " - "packaged tree (kundu, minimal) or supply a JSON " - "file which matches the decision tree file " - "specification." - ), - ) optional.add_argument( "--out-dir", dest="out_dir", @@ -164,6 +153,17 @@ def _get_parser(): ), default="aic", ) + optional.add_argument( + "--tree", + dest="tree", + help=( + "Decision tree to use. You may use a " + "packaged tree (kundu, minimal) or supply a JSON " + "file which matches the decision tree file " + "specification." + ), + default="minimal", + ) optional.add_argument( "--seed", dest="fixed_seed", From 3a8bd8b91635e33c84d8dbb665779cdb52508b26 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 5 Dec 2022 15:08:32 -0500 Subject: [PATCH 066/177] Revert "Fix CLI test" This reverts commit 40383369d2c41a189a21f6bb0e522e88b6bfa0fe. --- tedana/tests/test_integration.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 8593b408b..5b16f075f 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -194,8 +194,6 @@ def test_integration_three_echo(skip_integration): "14.5", "38.5", "62.5", - "-t", - "minimal", "--out-dir", out_dir_manual, "--debug", From e77cdd0d2990a18aa6c8ae55445e5173438a1e24 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 5 Dec 2022 15:19:19 -0500 Subject: [PATCH 067/177] Make kundu default tree --- tedana/selection/tedica.py | 4 ++-- tedana/workflows/tedana.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 0c57a43a3..3331136f4 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -10,7 +10,7 @@ RepLGR = logging.getLogger("REPORT") -def automatic_selection(component_table, n_echos, n_vols, tree="minimal"): +def automatic_selection(component_table, n_echos, n_vols, tree="kundu"): """Classify components based on component table and decision tree type. Parameters @@ -20,7 +20,7 @@ def automatic_selection(component_table, n_echos, n_vols, tree="minimal"): n_echos: :obj:`int` The number of echoes in this dataset tree: :obj:`str` - The type of tree to use for the ComponentSelector object. Default="minimal" + The type of tree to use for the ComponentSelector object. Default="kundu" Returns ------- diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 5338a4c83..2839fa682 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -162,7 +162,7 @@ def _get_parser(): "file which matches the decision tree file " "specification." ), - default="minimal", + default="kundu", ) optional.add_argument( "--seed", @@ -322,7 +322,7 @@ def tedana_workflow( prefix="", fittype="loglin", combmode="t2s", - tree="minimal", + tree="kundu", tedpca="aic", fixed_seed=42, maxit=500, From 9d2246854b8f8bce4b8b1e250041e952a05d1951 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 5 Dec 2022 17:05:54 -0500 Subject: [PATCH 068/177] Attempt to fix error --- tedana/reporting/static_figures.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tedana/reporting/static_figures.py b/tedana/reporting/static_figures.py index c8a2cd75d..3740f1637 100644 --- a/tedana/reporting/static_figures.py +++ b/tedana/reporting/static_figures.py @@ -196,14 +196,14 @@ def comp_figures(ts, mask, comptable, mmix, io_generator, png_cmap): for compnum in comptable.index.values: if comptable.loc[compnum, "classification"] == "accepted": line_color = "g" - expl_text = "accepted reason(s): " + comptable.loc[compnum, "classification_tags"] + expl_text = "accepted reason(s): " + str(comptable.loc[compnum, "classification_tags"]) elif comptable.loc[compnum, "classification"] == "rejected": line_color = "r" - expl_text = "rejected reason(s): " + comptable.loc[compnum, "classification_tags"] + expl_text = "rejected reason(s): " + str(comptable.loc[compnum, "classification_tags"]) elif comptable.loc[compnum, "classification"] == "ignored": line_color = "k" - expl_text = "ignored reason(s): " + comptable.loc[compnum, "classification_tags"] + expl_text = "ignored reason(s): " + str(comptable.loc[compnum, "classification_tags"]) else: # Classification not added # If new, this will keep code running From c7349bd3eeab6473eaf9173e49e2b71ffc070a6e Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 6 Dec 2022 14:23:15 -0500 Subject: [PATCH 069/177] Adds input data to registry --- tedana/resources/config/outputs.json | 4 ++++ tedana/tests/data/cornell_three_echo_outputs.txt | 3 +++ tedana/tests/data/fiu_four_echo_outputs.txt | 4 ++++ tedana/tests/data/nih_five_echo_outputs_verbose.txt | 5 +++++ tedana/workflows/tedana.py | 8 ++++++++ 5 files changed, 24 insertions(+) diff --git a/tedana/resources/config/outputs.json b/tedana/resources/config/outputs.json index 3e185b887..4f75aeb77 100644 --- a/tedana/resources/config/outputs.json +++ b/tedana/resources/config/outputs.json @@ -215,6 +215,10 @@ "orig": "ica_orth_mixing", "bidsv1.5.0": "desc-ICAOrth_mixing" }, + "input img": { + "orig": "raw_echo-{echo}", + "bidsv1.5.0": "echo-{echo}_desc-TedanaInput" + }, "registry json": { "orig": "registry", "bidsv1.5.0": "desc-tedana_registry" diff --git a/tedana/tests/data/cornell_three_echo_outputs.txt b/tedana/tests/data/cornell_three_echo_outputs.txt index 821f07b8e..11945aefd 100644 --- a/tedana/tests/data/cornell_three_echo_outputs.txt +++ b/tedana/tests/data/cornell_three_echo_outputs.txt @@ -24,6 +24,9 @@ desc-optcomAccepted_bold.nii.gz desc-optcomDenoised_bold.nii.gz desc-optcomRejected_bold.nii.gz desc-optcom_bold.nii.gz +echo-1_desc-TedanaInput.nii.gz +echo-2_desc-TedanaInput.nii.gz +echo-3_desc-TedanaInput.nii.gz figures figures/carpet_optcom.svg figures/carpet_denoised.svg diff --git a/tedana/tests/data/fiu_four_echo_outputs.txt b/tedana/tests/data/fiu_four_echo_outputs.txt index 77dc5869e..62f85d849 100644 --- a/tedana/tests/data/fiu_four_echo_outputs.txt +++ b/tedana/tests/data/fiu_four_echo_outputs.txt @@ -49,6 +49,7 @@ echo-1_desc-ICA_components.nii.gz echo-1_desc-PCAT2ModelPredictions_components.nii.gz echo-1_desc-PCAS0ModelPredictions_components.nii.gz echo-1_desc-PCA_components.nii.gz +echo-1_desc-TedanaInput.nii.gz echo-1_desc-Rejected_bold.nii.gz echo-2_desc-Accepted_bold.nii.gz echo-2_desc-Denoised_bold.nii.gz @@ -59,6 +60,7 @@ echo-2_desc-PCAT2ModelPredictions_components.nii.gz echo-2_desc-PCAS0ModelPredictions_components.nii.gz echo-2_desc-PCA_components.nii.gz echo-2_desc-Rejected_bold.nii.gz +echo-2_desc-TedanaInput.nii.gz echo-3_desc-Accepted_bold.nii.gz echo-3_desc-Denoised_bold.nii.gz echo-3_desc-ICAT2ModelPredictions_components.nii.gz @@ -68,6 +70,7 @@ echo-3_desc-PCAT2ModelPredictions_components.nii.gz echo-3_desc-PCAS0ModelPredictions_components.nii.gz echo-3_desc-PCA_components.nii.gz echo-3_desc-Rejected_bold.nii.gz +echo-3_desc-TedanaInput.nii.gz echo-4_desc-Accepted_bold.nii.gz echo-4_desc-Denoised_bold.nii.gz echo-4_desc-ICAT2ModelPredictions_components.nii.gz @@ -77,6 +80,7 @@ echo-4_desc-PCAT2ModelPredictions_components.nii.gz echo-4_desc-PCAS0ModelPredictions_components.nii.gz echo-4_desc-PCA_components.nii.gz echo-4_desc-Rejected_bold.nii.gz +echo-4_desc-TedanaInput.nii.gz references.bib report.txt tedana_report.html diff --git a/tedana/tests/data/nih_five_echo_outputs_verbose.txt b/tedana/tests/data/nih_five_echo_outputs_verbose.txt index 948487065..a8291ed95 100644 --- a/tedana/tests/data/nih_five_echo_outputs_verbose.txt +++ b/tedana/tests/data/nih_five_echo_outputs_verbose.txt @@ -42,6 +42,7 @@ echo-1_desc-PCAT2ModelPredictions_components.nii.gz echo-1_desc-PCAS0ModelPredictions_components.nii.gz echo-1_desc-PCA_components.nii.gz echo-1_desc-Rejected_bold.nii.gz +echo-1_desc-TedanaInput.nii.gz echo-2_desc-Accepted_bold.nii.gz echo-2_desc-Denoised_bold.nii.gz echo-2_desc-ICAT2ModelPredictions_components.nii.gz @@ -51,6 +52,7 @@ echo-2_desc-PCAT2ModelPredictions_components.nii.gz echo-2_desc-PCAS0ModelPredictions_components.nii.gz echo-2_desc-PCA_components.nii.gz echo-2_desc-Rejected_bold.nii.gz +echo-2_desc-TedanaInput.nii.gz echo-3_desc-Accepted_bold.nii.gz echo-3_desc-Denoised_bold.nii.gz echo-3_desc-ICAT2ModelPredictions_components.nii.gz @@ -60,6 +62,7 @@ echo-3_desc-PCAT2ModelPredictions_components.nii.gz echo-3_desc-PCAS0ModelPredictions_components.nii.gz echo-3_desc-PCA_components.nii.gz echo-3_desc-Rejected_bold.nii.gz +echo-3_desc-TedanaInput.nii.gz echo-4_desc-Accepted_bold.nii.gz echo-4_desc-Denoised_bold.nii.gz echo-4_desc-ICAT2ModelPredictions_components.nii.gz @@ -69,6 +72,7 @@ echo-4_desc-PCAT2ModelPredictions_components.nii.gz echo-4_desc-PCAS0ModelPredictions_components.nii.gz echo-4_desc-PCA_components.nii.gz echo-4_desc-Rejected_bold.nii.gz +echo-4_desc-TedanaInput.nii.gz echo-5_desc-Accepted_bold.nii.gz echo-5_desc-Denoised_bold.nii.gz echo-5_desc-ICAT2ModelPredictions_components.nii.gz @@ -78,6 +82,7 @@ echo-5_desc-PCAT2ModelPredictions_components.nii.gz echo-5_desc-PCAS0ModelPredictions_components.nii.gz echo-5_desc-PCA_components.nii.gz echo-5_desc-Rejected_bold.nii.gz +echo-5_desc-TedanaInput.nii.gz references.bib report.txt tedana_report.html diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 2839fa682..75db083f1 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -488,6 +488,14 @@ def tedana_workflow( "to correct your TR to the value it should be." ) + # Save input data + for i in range(n_echos): + io_generator.save_file( + io.new_nii_like(ref_img, np.squeeze(catd[:, i, :])), + "input img", + echo=i + 1, + ) + if mixm is not None and op.isfile(mixm): mixm = op.abspath(mixm) # Allow users to re-run on same folder From 7959f8f702b65be0a06406f33fbf65c4f46f6c58 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 6 Dec 2022 14:33:19 -0500 Subject: [PATCH 070/177] Revert "Adds input data to registry" This reverts commit c7349bd3eeab6473eaf9173e49e2b71ffc070a6e. --- tedana/resources/config/outputs.json | 4 ---- tedana/tests/data/cornell_three_echo_outputs.txt | 3 --- tedana/tests/data/fiu_four_echo_outputs.txt | 4 ---- tedana/tests/data/nih_five_echo_outputs_verbose.txt | 5 ----- tedana/workflows/tedana.py | 8 -------- 5 files changed, 24 deletions(-) diff --git a/tedana/resources/config/outputs.json b/tedana/resources/config/outputs.json index 4f75aeb77..3e185b887 100644 --- a/tedana/resources/config/outputs.json +++ b/tedana/resources/config/outputs.json @@ -215,10 +215,6 @@ "orig": "ica_orth_mixing", "bidsv1.5.0": "desc-ICAOrth_mixing" }, - "input img": { - "orig": "raw_echo-{echo}", - "bidsv1.5.0": "echo-{echo}_desc-TedanaInput" - }, "registry json": { "orig": "registry", "bidsv1.5.0": "desc-tedana_registry" diff --git a/tedana/tests/data/cornell_three_echo_outputs.txt b/tedana/tests/data/cornell_three_echo_outputs.txt index 11945aefd..821f07b8e 100644 --- a/tedana/tests/data/cornell_three_echo_outputs.txt +++ b/tedana/tests/data/cornell_three_echo_outputs.txt @@ -24,9 +24,6 @@ desc-optcomAccepted_bold.nii.gz desc-optcomDenoised_bold.nii.gz desc-optcomRejected_bold.nii.gz desc-optcom_bold.nii.gz -echo-1_desc-TedanaInput.nii.gz -echo-2_desc-TedanaInput.nii.gz -echo-3_desc-TedanaInput.nii.gz figures figures/carpet_optcom.svg figures/carpet_denoised.svg diff --git a/tedana/tests/data/fiu_four_echo_outputs.txt b/tedana/tests/data/fiu_four_echo_outputs.txt index 62f85d849..77dc5869e 100644 --- a/tedana/tests/data/fiu_four_echo_outputs.txt +++ b/tedana/tests/data/fiu_four_echo_outputs.txt @@ -49,7 +49,6 @@ echo-1_desc-ICA_components.nii.gz echo-1_desc-PCAT2ModelPredictions_components.nii.gz echo-1_desc-PCAS0ModelPredictions_components.nii.gz echo-1_desc-PCA_components.nii.gz -echo-1_desc-TedanaInput.nii.gz echo-1_desc-Rejected_bold.nii.gz echo-2_desc-Accepted_bold.nii.gz echo-2_desc-Denoised_bold.nii.gz @@ -60,7 +59,6 @@ echo-2_desc-PCAT2ModelPredictions_components.nii.gz echo-2_desc-PCAS0ModelPredictions_components.nii.gz echo-2_desc-PCA_components.nii.gz echo-2_desc-Rejected_bold.nii.gz -echo-2_desc-TedanaInput.nii.gz echo-3_desc-Accepted_bold.nii.gz echo-3_desc-Denoised_bold.nii.gz echo-3_desc-ICAT2ModelPredictions_components.nii.gz @@ -70,7 +68,6 @@ echo-3_desc-PCAT2ModelPredictions_components.nii.gz echo-3_desc-PCAS0ModelPredictions_components.nii.gz echo-3_desc-PCA_components.nii.gz echo-3_desc-Rejected_bold.nii.gz -echo-3_desc-TedanaInput.nii.gz echo-4_desc-Accepted_bold.nii.gz echo-4_desc-Denoised_bold.nii.gz echo-4_desc-ICAT2ModelPredictions_components.nii.gz @@ -80,7 +77,6 @@ echo-4_desc-PCAT2ModelPredictions_components.nii.gz echo-4_desc-PCAS0ModelPredictions_components.nii.gz echo-4_desc-PCA_components.nii.gz echo-4_desc-Rejected_bold.nii.gz -echo-4_desc-TedanaInput.nii.gz references.bib report.txt tedana_report.html diff --git a/tedana/tests/data/nih_five_echo_outputs_verbose.txt b/tedana/tests/data/nih_five_echo_outputs_verbose.txt index a8291ed95..948487065 100644 --- a/tedana/tests/data/nih_five_echo_outputs_verbose.txt +++ b/tedana/tests/data/nih_five_echo_outputs_verbose.txt @@ -42,7 +42,6 @@ echo-1_desc-PCAT2ModelPredictions_components.nii.gz echo-1_desc-PCAS0ModelPredictions_components.nii.gz echo-1_desc-PCA_components.nii.gz echo-1_desc-Rejected_bold.nii.gz -echo-1_desc-TedanaInput.nii.gz echo-2_desc-Accepted_bold.nii.gz echo-2_desc-Denoised_bold.nii.gz echo-2_desc-ICAT2ModelPredictions_components.nii.gz @@ -52,7 +51,6 @@ echo-2_desc-PCAT2ModelPredictions_components.nii.gz echo-2_desc-PCAS0ModelPredictions_components.nii.gz echo-2_desc-PCA_components.nii.gz echo-2_desc-Rejected_bold.nii.gz -echo-2_desc-TedanaInput.nii.gz echo-3_desc-Accepted_bold.nii.gz echo-3_desc-Denoised_bold.nii.gz echo-3_desc-ICAT2ModelPredictions_components.nii.gz @@ -62,7 +60,6 @@ echo-3_desc-PCAT2ModelPredictions_components.nii.gz echo-3_desc-PCAS0ModelPredictions_components.nii.gz echo-3_desc-PCA_components.nii.gz echo-3_desc-Rejected_bold.nii.gz -echo-3_desc-TedanaInput.nii.gz echo-4_desc-Accepted_bold.nii.gz echo-4_desc-Denoised_bold.nii.gz echo-4_desc-ICAT2ModelPredictions_components.nii.gz @@ -72,7 +69,6 @@ echo-4_desc-PCAT2ModelPredictions_components.nii.gz echo-4_desc-PCAS0ModelPredictions_components.nii.gz echo-4_desc-PCA_components.nii.gz echo-4_desc-Rejected_bold.nii.gz -echo-4_desc-TedanaInput.nii.gz echo-5_desc-Accepted_bold.nii.gz echo-5_desc-Denoised_bold.nii.gz echo-5_desc-ICAT2ModelPredictions_components.nii.gz @@ -82,7 +78,6 @@ echo-5_desc-PCAT2ModelPredictions_components.nii.gz echo-5_desc-PCAS0ModelPredictions_components.nii.gz echo-5_desc-PCA_components.nii.gz echo-5_desc-Rejected_bold.nii.gz -echo-5_desc-TedanaInput.nii.gz references.bib report.txt tedana_report.html diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 75db083f1..2839fa682 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -488,14 +488,6 @@ def tedana_workflow( "to correct your TR to the value it should be." ) - # Save input data - for i in range(n_echos): - io_generator.save_file( - io.new_nii_like(ref_img, np.squeeze(catd[:, i, :])), - "input img", - echo=i + 1, - ) - if mixm is not None and op.isfile(mixm): mixm = op.abspath(mixm) # Allow users to re-run on same folder From 5ff58839bf7628be0678419884c17343b17dd055 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 12 Dec 2022 14:28:12 -0500 Subject: [PATCH 071/177] Adds input registration --- tedana/io.py | 12 ++++++++++++ tedana/workflows/tedana.py | 9 +++++++++ 2 files changed, 21 insertions(+) diff --git a/tedana/io.py b/tedana/io.py index 878378193..1c15942e2 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -168,6 +168,18 @@ def _determine_extension(self, description, name): return extension + def register_input(self, names): + """Register input filenames. + + Parameters + ---------- + names : list[str] + The list of filenames being input as multi-echo volumes. + """ + self.registry["input img"] = [ + op.relpath(name, start=self.out_dir) for name in names + ] + def get_name(self, description, **kwargs): """Generate a file full path to simplify file output. diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 2839fa682..a0c1ff3d6 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -460,8 +460,13 @@ def tedana_workflow( # a float on [0, 1] or an int >= 1 tedpca = check_tedpca_value(tedpca, is_parser=False) + # For z-catted files, make sure it's a list of size 1 + if isinstance(data, str): + data = [data] + LGR.info("Loading input data: {}".format([f for f in data])) catd, ref_img = io.load_data(data, n_echos=n_echos) + io_generator = io.OutputGenerator( ref_img, convention=convention, @@ -472,6 +477,10 @@ def tedana_workflow( verbose=verbose, ) + # Record inputs to OutputGenerator + # TODO: turn this into an IOManager since this isn't really output + io_generator.register_input(data) + n_samp, n_echos, n_vols = catd.shape LGR.debug("Resulting data shape: {}".format(catd.shape)) From 6fac77edab387cd2610ce72334420ccc97fe3553 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 12 Dec 2022 14:29:19 -0500 Subject: [PATCH 072/177] Appease linter --- tedana/io.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index 1c15942e2..415979948 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -176,9 +176,7 @@ def register_input(self, names): names : list[str] The list of filenames being input as multi-echo volumes. """ - self.registry["input img"] = [ - op.relpath(name, start=self.out_dir) for name in names - ] + self.registry["input img"] = [op.relpath(name, start=self.out_dir) for name in names] def get_name(self, description, **kwargs): """Generate a file full path to simplify file output. From 0190f7278ca5f969c62ef8e93a0c96a53f68dca0 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Mon, 12 Dec 2022 16:02:11 -0500 Subject: [PATCH 073/177] Add class template start --- docs/_templates/class.rst | 52 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 docs/_templates/class.rst diff --git a/docs/_templates/class.rst b/docs/_templates/class.rst new file mode 100644 index 000000000..0b9ab90cb --- /dev/null +++ b/docs/_templates/class.rst @@ -0,0 +1,52 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :no-members: + :no-inherited-members: + :no-special-members: + + {% block methods %} + {% if methods %} + + .. automethod:: __init__ + + {% if ('__call__' in all_methods) or ('__call__' in inherited_members) %} + + .. automethod:: __call__ + + {% endif %} + + .. rubric:: Methods + + .. autosummary:: + :toctree: + {% for item in all_methods %} + {%- if not item.startswith('_') or item in ['__mul__', '__getitem__', '__len__'] %} + ~{{ name }}.{{ item }} + {%- endif -%} + {%- endfor %} + {% for item in inherited_members %} + {%- if item in ['__mul__', '__getitem__', '__len__'] %} + ~{{ name }}.{{ item }} + {%- endif -%} + {%- endfor %} + {% endif %} + {% endblock %} + + + {% block attributes %} + {% if attributes %} + .. rubric:: Attributes + + .. autosummary:: + :toctree: + {% for item in all_attributes %} + {%- if not item.startswith('_') %} + ~{{ name }}.{{ item }} + {%- endif -%} + {%- endfor %} + {% endif %} + {% endblock %} From dafe7034ff67ac391e5fe0348aaa71375c00736f Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 13 Dec 2022 10:26:31 -0500 Subject: [PATCH 074/177] Add previous workflow registry into new one --- tedana/io.py | 19 +++++++++++++++++++ tedana/workflows/tedana_reclassify.py | 1 + 2 files changed, 20 insertions(+) diff --git a/tedana/io.py b/tedana/io.py index 415979948..a6d138ade 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -99,6 +99,7 @@ def __init__( make_figures=True, force=False, verbose=False, + old_registry=None ): if config == "auto": @@ -127,6 +128,17 @@ def __init__( self.force = force self.verbose = verbose self.registry = {} + if old_registry: + root = old_registry["root"] + rel_root = op.relpath(root, start=self.out_dir) + del old_registry["root"] + for k, v in old_registry.items(): + if isinstance(v, list): + self.registry[k] = [ + op.join(rel_root, vv) for vv in v + ] + else: + self.registry[k] = op.join(rel_root, v) if not op.isdir(self.out_dir): LGR.info(f"Generating output directory: {self.out_dir}") @@ -354,6 +366,13 @@ def get_file_contents(self, description): # always return. If more types are added, the loaders dict will # need to be updated with an appopriate loader + @property + def registry(self): + """The underlying file registry, including the root directory.""" + d = self._registry + d["root"] = self._base_dir + return d + def get_fields(name): """Identify all fields in an unformatted string. diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 36e83394e..d0fe8834c 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -278,6 +278,7 @@ def post_tedana( force=force, verbose=False, out_dir=out_dir, + old_registry=ioh.registry, ) # Make a new selector with the added files From fc94f2e3a2f571a44ef571914620ede0fd8a1bab Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 13 Dec 2022 12:05:10 -0500 Subject: [PATCH 075/177] Fix failure to update tags and classifications in manual --- tedana/selection/ComponentSelector.py | 7 ++++++- tedana/selection/selection_utils.py | 10 +++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index e14c067f2..004bcdd29 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -323,6 +323,7 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab # Since a status table exists, we need to skip nodes up to the # point where the last tree finished self.start_idx = len(tree_config["nodes"]) + LGR.info(f'Start is {self.start_idx}') self.component_status_table = status_table def select(self): @@ -368,7 +369,9 @@ def select(self): ) # for each node in the decision tree - for self.current_node_idx, node in enumerate(self.tree["nodes"][self.start_idx :]): + for self.current_node_idx, node in enumerate( + self.tree["nodes"][self.start_idx :], start=self.start_idx + ): # parse the variables to use with the function fcn = getattr(selection_nodes, node["functionname"]) @@ -409,6 +412,7 @@ def select(self): self.are_all_components_accepted_or_rejected() + LGR.info(f'Start idx is {self.start_idx}') def add_manual(self, indices, classification): """ Add nodes that will manually classify components @@ -429,6 +433,7 @@ def add_manual(self, indices, classification): }, "kwargs": { "dont_warn_reclassify": "true", + "tag": "manual reclassify", }, } ) diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 7be51c341..08e2281c3 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -149,7 +149,7 @@ def change_comptable_classifications( True, ifTrue, decision_boolean, - tag_ifTrue, + tag_if=tag_ifTrue, dont_warn_reclassify=dont_warn_reclassify, ) selector = comptable_classification_changer( @@ -157,7 +157,7 @@ def change_comptable_classifications( False, ifFalse, decision_boolean, - tag_ifFalse, + tag_if=tag_ifFalse, dont_warn_reclassify=dont_warn_reclassify, ) @@ -268,11 +268,11 @@ def comptable_classification_changer( if tag_if is not None: # only run if a tag is provided for idx in changeidx: tmpstr = selector.component_table.loc[idx, "classification_tags"] - if tmpstr != "": + if tmpstr == "" or isinstance(tmpstr, float): + tmpset = set([tag_if]) + else: tmpset = set(tmpstr.split(",")) tmpset.update([tag_if]) - else: - tmpset = set([tag_if]) selector.component_table.loc[idx, "classification_tags"] = ",".join( str(s) for s in tmpset ) From f87e0a24d754aab3189d5585bfad674c7eb10ffc Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 13 Dec 2022 12:12:09 -0500 Subject: [PATCH 076/177] Fix missing less likely BOOLD tag --- tedana/resources/decision_trees/kundu.json | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index a0f134ec1..a0e29e95c 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -263,7 +263,8 @@ "op2": ">", "left2": "variance explained", "right2": "varex_upper_thresh", - "log_extra_info": "If variance and d_table_scores are high, then reject" + "log_extra_info": "If variance and d_table_scores are high, then reject", + "tag_ifTrue": "Less likely BOLD" }, "_comment": "Code I007 in premodularized tedana. One of several steps that makes it more likely to reject high variance components" }, @@ -447,4 +448,4 @@ "_comment": "No code in the premodularized tedana" } ] -} \ No newline at end of file +} From 7e0de9f3d8090c70ae1b5dfacdd5297353c78ad6 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 13 Dec 2022 12:23:10 -0500 Subject: [PATCH 077/177] Adds more useful reporting for unused metrics --- tedana/selection/ComponentSelector.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 004bcdd29..79b1fd302 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -412,7 +412,6 @@ def select(self): self.are_all_components_accepted_or_rejected() - LGR.info(f'Start idx is {self.start_idx}') def add_manual(self, indices, classification): """ Add nodes that will manually classify components @@ -476,7 +475,7 @@ def are_only_necessary_metrics_used(self): if len(not_declared) > 0: LGR.warning( f"Decision tree {self.tree_name} used the following metrics that were " - "not declared as necessary: {not_declared}" + f"not declared as necessary: {not_declared}" ) if len(not_used) > 0: LGR.warning( From 232e3f7aff862c68dc1e268a8cb396dc38f304ae Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 13 Dec 2022 12:37:54 -0500 Subject: [PATCH 078/177] Create generated metrics --- tedana/io.py | 6 ++---- tedana/resources/decision_trees/kundu.json | 7 ++++++- tedana/selection/ComponentSelector.py | 10 +++++++--- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index a6d138ade..045dce22d 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -99,7 +99,7 @@ def __init__( make_figures=True, force=False, verbose=False, - old_registry=None + old_registry=None, ): if config == "auto": @@ -134,9 +134,7 @@ def __init__( del old_registry["root"] for k, v in old_registry.items(): if isinstance(v, list): - self.registry[k] = [ - op.join(rel_root, vv) for vv in v - ] + self.registry[k] = [op.join(rel_root, vv) for vv in v] else: self.registry[k] = op.join(rel_root, v) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index a0e29e95c..33c09850a 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -12,7 +12,12 @@ "dice_FT2", "signal-noise_t", "variance explained", - "d_table_score" + "d_table_score", + "countnoise" + ], + "generated_metrics": [ + "d_table_score_node19", + "varex kappa ratio" ], "intermediate_classifications": [ "provisionalaccept" diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/ComponentSelector.py index 79b1fd302..024ea6012 100644 --- a/tedana/selection/ComponentSelector.py +++ b/tedana/selection/ComponentSelector.py @@ -323,7 +323,7 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab # Since a status table exists, we need to skip nodes up to the # point where the last tree finished self.start_idx = len(tree_config["nodes"]) - LGR.info(f'Start is {self.start_idx}') + LGR.info(f"Start is {self.start_idx}") self.component_status_table = status_table def select(self): @@ -470,8 +470,12 @@ def are_only_necessary_metrics_used(self): used and if any used_metrics weren't explicitly declared necessary If either of these happen, a warning is added to the logger """ - not_declared = self.tree["used_metrics"] - self.necessary_metrics - not_used = self.necessary_metrics - self.tree["used_metrics"] + if "generated_metrics" in self.tree.keys(): + necessary_metrics = set(self.tree["generated_metrics"]) | self.necessary_metrics + else: + necessary_metrics = self.necessary_metrics + not_declared = self.tree["used_metrics"] - necessary_metrics + not_used = necessary_metrics - self.tree["used_metrics"] if len(not_declared) > 0: LGR.warning( f"Decision tree {self.tree_name} used the following metrics that were " From 8cf697cb42e2cc24eee3ceb8999e7d914f67c166 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 13 Dec 2022 12:39:16 -0500 Subject: [PATCH 079/177] Update line terminator --- tedana/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/io.py b/tedana/io.py index 045dce22d..222908c01 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -329,7 +329,7 @@ def save_tsv(self, data, name): raise TypeError(f"data must be pd.Data, not type {data_type}.") # Replace blanks with numpy NaN deblanked = data.replace("", np.nan) - deblanked.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) + deblanked.to_csv(name, sep="\t", lineterminator="\n", na_rep="n/a", index=False) def save_self(self): fname = self.save_file(self.registry, "registry json") From 347aa13c6532018ae297175aa469128e7db30c1b Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 13 Dec 2022 12:49:55 -0500 Subject: [PATCH 080/177] Force black to run before flake8 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b26334242..d7cc34e9c 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,8 @@ help: @echo " all_tests to run 'lint', 'unittest', and 'integration'" lint: - @flake8 tedana @black --check --diff tedana + @flake8 tedana unittest: @py.test --skipintegration --cov-append --cov-report term-missing --cov=tedana tedana/ From 9d6a4872c3b1223938a7a3019e287032a3931bd8 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Tue, 13 Dec 2022 12:51:54 -0500 Subject: [PATCH 081/177] Updates percentile call --- tedana/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/utils.py b/tedana/utils.py index 0248f9f2a..131fcb4ad 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -76,7 +76,7 @@ def make_adaptive_mask(data, mask=None, getsum=False, threshold=1): # get 33rd %ile of `first_echo` and find corresponding index # NOTE: percentile is arbitrary - perc = np.percentile(first_echo, 33, interpolation="higher") + perc = np.percentile(first_echo, 33, method="higher") perc_val = echo_means[:, 0] == perc # extract values from all echos at relevant index From 336dfa4a77b00f7dd9e95259bfbaecd8053c012c Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 13 Dec 2022 14:16:40 -0500 Subject: [PATCH 082/177] more doc updates --- docs/building decision trees.rst | 18 ++++++-- ...=> classification_output_descriptions.rst} | 0 docs/faq.rst | 43 ++++++++++++------- docs/index.rst | 2 +- docs/outputs.rst | 12 +++--- tedana/workflows/tedana.py | 2 + 6 files changed, 50 insertions(+), 27 deletions(-) rename docs/{component_table_descriptions.rst => classification_output_descriptions.rst} (100%) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index b3586c6a0..6787e49ab 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -2,10 +2,19 @@ Understanding and building a component selection process ######################################################## -``tedana`` involves transforming data into components via ICA, and then calculating metrics for each component. -Each metric has one value per component that is stored in a comptable or component_table dataframe. This structure -is then passed to a "decision tree" through which a series of binary choices categorize each component as **accepted** or -**rejected**. The time series for the rejected components are regressed from the data in the final denoising step. +This guide is designed for users who want to better understand the mechanics +of the component selection process and people who are considering customizing +their own decision tree or contributing to ``tedana`` code. We have tried to +make this accessible with minimal jargon, but it is long. If you just want to +better understand what's in the outputs from ``tedana`` start with +`classification output descriptions`_. + +``tedana`` involves transforming data into components, currently via ICA, and then +calculating metrics for each component. Each metric has one value per component that +is stored in a component_table dataframe. This structure is then passed to a +"decision tree" through which a series of binary choices categorize each component +as **accepted** or **rejected**. The time series for the rejected components are +regressed from the data in the final denoising step. There are several decision trees that are included by default in ``tedana`` but users can also build their own. This might be useful if one of the default decision trees needs to be slightly altered due to the nature @@ -18,6 +27,7 @@ Instructions for how to classify components is called a "decision tree" since ea process branches components into different intermediate or final classifications .. contents:: :local: +.. _classification output descriptions: classification output descriptions.html ****************************************** diff --git a/docs/component_table_descriptions.rst b/docs/classification_output_descriptions.rst similarity index 100% rename from docs/component_table_descriptions.rst rename to docs/classification_output_descriptions.rst diff --git a/docs/faq.rst b/docs/faq.rst index 4dc851ca8..ee065d948 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -47,6 +47,8 @@ Nevertheless, we have some code (thanks to Julio Peraza) that works for version +.. _fMRIPrep: https://fmriprep.readthedocs.io + Warping scanner-space fMRIPrep outputs to standard space ======================================================== @@ -74,6 +76,7 @@ correction, rescaling, nuisance regression). If you are confident that your data have been preprocessed correctly prior to applying tedana, and you encounter this problem, please submit a question to `NeuroStars`_. +.. _NeuroStars: https://neurostars.org .. _manual classification: @@ -136,24 +139,32 @@ can include additional criteria. .. _make their own: building\ decision\ trees.html ************************************************************************************* -[tedana] Why isn't v3.2 of the component selection algorithm supported in ``tedana``? +[tedana] What different versions of this method exist? ************************************************************************************* -There is a lot of solid logic behind the updated version of the TEDICA component -selection algorithm, first added to the original ME-ICA codebase `here`_ by Dr. Prantik Kundu. -However, we (the ``tedana`` developers) have encountered certain difficulties -with this method (e.g., misclassified components) and the method itself has yet -to be validated in any papers, posters, etc., which is why we have chosen to archive -the v3.2 code, with the goal of revisiting it when ``tedana`` is more stable. - -Anyone interested in using v3.2 may compile and install an earlier release (<=0.0.4) of ``tedana``. - - -.. _here: https://bitbucket.org/prantikk/me-ica/commits/906bd1f6db7041f88cd0efcac8a74074d673f4f5 - -.. _NeuroStars: https://neurostars.org -.. _fMRIPrep: https://fmriprep.readthedocs.io -.. _afni_proc.py: https://afni.nimh.nih.gov/pub/dist/doc/program_help/afni_proc.py.html +Dr. Prantik Kundu developed a multi-echo ICA (ME-ICA) denoising method and +`shared code on bitbucket`_ to allow others to use the method. A nearly identical +version of this code is `distributed with AFNI as MEICA v2.5 beta 11`_. Most early +publications that validated the MEICA method used variants of this code. That code +runs only on the now defunct python 2.7 and is not under active development. +``tedana`` when run with `--tree kundu --tedpca kundu` (or `--tedpca kundu-stabilize`), +uses the same core algorithm as in MEICA v2.5. Since ICA is a nondeterministic +algorithm and ``tedana`` and MEICA use different PCA and ICA code, the algorithm will +mostly be the same, but the results will not be identical. + +Prantik Kundu also worked on `MEICA v3.2`_ (also for python v2.7). The underlying ICA +step is very similar, but the component selection process was different. While this +new approach has potentialy useful ideas, the early ``tedana`` developers experienced +non-trivial component misclassifications and there were no publications that +validated this method. That is why ``tedana`` replicated the established and valided +MEICA v2.5 method and also includes options to ingrate additional component selection +methods. Recently Prantik has started to work `MEICA v3.3`_ (for python >=v3.7) so +that this version of the selection process would again be possible to run. + +.. _shared code on bitbucket: https://bitbucket.org/prantikk/me-ica/src/experimental +.. _distributed with AFNI as MEICA v 2.5 beta 11: https://github.com/afni/afni/tree/master/src/pkundu +.. _MEICA v3.2: https://github.com/ME-ICA/me-ica/tree/53191a7e8838788acf837fdf7cb3026efadf49ac +.. _MEICA v3.3: https://github.com/ME-ICA/me-ica/tree/ME-ICA_v3.3.0 ******************************************************************* diff --git a/docs/index.rst b/docs/index.rst index 3e33daa15..670136bcd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -190,7 +190,7 @@ tedana is licensed under GNU Lesser General Public License version 2.1. dependence_metrics output_file_descriptions - component_table_descriptions + classification_output_descriptions ****************** diff --git a/docs/outputs.rst b/docs/outputs.rst index c67031cd8..440db7a07 100644 --- a/docs/outputs.rst +++ b/docs/outputs.rst @@ -15,16 +15,16 @@ future processing. `descriptions of these output files are here`_. .. _descriptions of these output files are here: output_file_descriptions.html -**************** -Component tables -**************** +******************************************* +Component tables and classification outputs +******************************************* TEDPCA and TEDICA use component tables to track relevant metrics, component classifications, and rationales behind classifications. -The component tables are stored as tsv files for BIDS-compatibility. -`Full descriptions of these outputs are here`_. +The component tables and additional information are stored as tsv and json files. +`A full descriptions of these outputs are here`_. -.. _Full descriptions of these outputs are here: component_table_descriptions.html +.. _Full descriptions of these outputs are here: classification_output_descriptions.html ********************* diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index a0c1ff3d6..1b293680d 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -144,6 +144,8 @@ def _get_parser(): "PCA decomposition with the mdl, kic and aic options " "is based on a Moving Average (stationary Gaussian) " "process and are ordered from most to least aggressive. " + "'kundu' or 'kundu-stabilize' are selection methods that " + "were distributed with MEICA. " "Users may also provide a float from 0 to 1, " "in which case components will be selected based on the " "cumulative variance explained or an integer greater than 1" From 63446dce96711c192f081e452cf6732392d085fe Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 13 Dec 2022 16:39:28 -0500 Subject: [PATCH 083/177] fixed meica to v2.5 in docstrings --- docs/building decision trees.rst | 23 +++++++++++++---------- docs/faq.rst | 6 +++--- tedana/selection/selection_nodes.py | 8 ++++---- tedana/selection/selection_utils.py | 4 ++-- 4 files changed, 22 insertions(+), 19 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index 6787e49ab..9ddd3b42e 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -12,23 +12,26 @@ better understand what's in the outputs from ``tedana`` start with ``tedana`` involves transforming data into components, currently via ICA, and then calculating metrics for each component. Each metric has one value per component that is stored in a component_table dataframe. This structure is then passed to a -"decision tree" through which a series of binary choices categorize each component +"decision tree" through which a series of binary choices categorizes each component as **accepted** or **rejected**. The time series for the rejected components are -regressed from the data in the final denoising step. +regressed from the data in the `final denoising step`_. -There are several decision trees that are included by default in ``tedana`` but users can also build their own. -This might be useful if one of the default decision trees needs to be slightly altered due to the nature -of a specific data set, if one has an idea for a new approach to multi-echo denoising, or if one wants to integrate +There are a couple of decision trees that are included by default in ``tedana`` but +users can also build their own. This might be useful if one of the default decision +trees needs to be slightly altered due to the nature of a specific data set, if one has +an idea for a new approach to multi-echo denoising, or if one wants to integrate non-multi-echo metrics into a single decision tree. -Note: We use two terminologies interchangeably. The whole process is called "component selection" -and much of the code uses variants of that phrase (i.e. the ComponentSelector class, selection_nodes for the functions used in selection). -Instructions for how to classify components is called a "decision tree" since each step in the selection -process branches components into different intermediate or final classifications +Note: We use two terminologies interchangeably. The whole process is called "component +selection" and much of the code uses variants of that phrase (i.e. the ComponentSelector +class, selection_nodes for the functions used in selection). The steps for how to +classify components is called a "decision tree" since each step in the selection +process branches components into different intermediate or final classifications. -.. contents:: :local: .. _classification output descriptions: classification output descriptions.html +.. _final denoising step: denoising.html +.. contents:: :local: ****************************************** Expected outputs after component selection diff --git a/docs/faq.rst b/docs/faq.rst index ee065d948..3eaf9a812 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -70,9 +70,9 @@ The standard space template in this example is "MNI152NLin2009cAsym", but will d The TEDICA step may fail to converge if TEDPCA is either too strict (i.e., there are too few components) or too lenient (there are too many). -In our experience, this may happen when preprocessing has not been applied to -the data, or when improper steps have been applied to the data (e.g., distortion -correction, rescaling, nuisance regression). +With updates to the ``tedana`` code, this issue is now rare, but it may happen +when preprocessing has not been applied to the data, or when improper steps have +been applied to the data (e.g. rescaling, nuisance regression). If you are confident that your data have been preprocessed correctly prior to applying tedana, and you encounter this problem, please submit a question to `NeuroStars`_. diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 48cfbe5af..542d65bae 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -769,9 +769,9 @@ def calc_kappa_elbow( Note ---- This function is currently hard coded for a specific way to calculate the kappa elbow - based on the method by Kundu in the MEICA v2.7 code. This uses the minimum of + based on the method by Kundu in the MEICA v2.5 code. This uses the minimum of a kappa elbow calculation on all components and on a subset of kappa values below - a significance threshold. To get the same functionality as in MEICA v2.7, + a significance threshold. To get the same functionality as in MEICA v2.5, decide_comps must be 'all'. """ @@ -881,8 +881,8 @@ def calc_rho_elbow( Note ---- This script is currently hard coded for a specific way to calculate the rho elbow - based on the method by Kundu in the MEICA v2.7 code. To get the same functionality - in MEICA v2.7, decide_comps must be 'all' and subset_decide_comps must be + based on the method by Kundu in the MEICA v2.5 code. To get the same functionality + in MEICA v2.5, decide_comps must be 'all' and subset_decide_comps must be 'unclassified' See :obj:`tedana.selection.selection_utils.rho_elbow_kundu_liberal` for a more detailed explanation of the difference between the kundu and liberal options. diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 08e2281c3..a947eb16a 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -577,7 +577,7 @@ def getelbow(arr, return_val=False): def kappa_elbow_kundu(component_table, n_echos, comps2use=None): """ Calculate an elbow for kappa using the approach originally in - Prantik Kundu's MEICA v2.7 code + Prantik Kundu's MEICA v2.5 code Parameters ---------- @@ -649,7 +649,7 @@ def rho_elbow_kundu_liberal( ): """ Calculate an elbow for rho using the approach originally in - Prantik Kundu's MEICA v2.7 code and with a slightly more + Prantik Kundu's MEICA v2.5 code and with a slightly more liberal threshold Parameters From 054616a9560da4dc52439b6e9079a51451fca7bd Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Wed, 14 Dec 2022 10:19:37 -0500 Subject: [PATCH 084/177] docs building again --- docs/building decision trees.rst | 17 +++++++++-------- docs/faq.rst | 2 +- docs/outputs.rst | 4 ++-- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index 9ddd3b42e..d490f5b11 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -24,9 +24,9 @@ non-multi-echo metrics into a single decision tree. Note: We use two terminologies interchangeably. The whole process is called "component selection" and much of the code uses variants of that phrase (i.e. the ComponentSelector -class, selection_nodes for the functions used in selection). The steps for how to -classify components is called a "decision tree" since each step in the selection -process branches components into different intermediate or final classifications. +class, selection_nodes for the functions used in selection). We call the steps for how +to classify components a "decision tree" since each step in the selection process +branches components into different intermediate or final classifications. .. _classification output descriptions: classification output descriptions.html .. _final denoising step: denoising.html @@ -85,11 +85,12 @@ New columns in the ``component_table`` (sometimes a stand alone variable ``compt ``used_metrics``: Saved as a field in the ``tree`` json file - A list of the metrics that were used in the decision tree. This should - match ``necessary_metrics`` which was a predefined list of metrics that - a tree uses. If these don't match, a warning should appear. These might - be useful for future work so that a user can input a tree and metrics - would be calculated based on what's needed to execute the tree. + A list of the metrics that were used in the decision tree. Everything in + ``used_metrics`` should be in either ``necessary_metrics`` or + ``generated_metrics`` If a used metric isn't in either, a warning message + will appear. These may have an additional use for future work so that a + user can input a tree and metrics would be calculated based on what's + needed to execute the tree. ``classification_tags``: Saved as a field in the ``tree`` json file diff --git a/docs/faq.rst b/docs/faq.rst index 3eaf9a812..7dd013f9b 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -162,7 +162,7 @@ methods. Recently Prantik has started to work `MEICA v3.3`_ (for python >=v3.7) that this version of the selection process would again be possible to run. .. _shared code on bitbucket: https://bitbucket.org/prantikk/me-ica/src/experimental -.. _distributed with AFNI as MEICA v 2.5 beta 11: https://github.com/afni/afni/tree/master/src/pkundu +.. _distributed with AFNI as MEICA v2.5 beta 11: https://github.com/afni/afni/tree/master/src/pkundu .. _MEICA v3.2: https://github.com/ME-ICA/me-ica/tree/53191a7e8838788acf837fdf7cb3026efadf49ac .. _MEICA v3.3: https://github.com/ME-ICA/me-ica/tree/ME-ICA_v3.3.0 diff --git a/docs/outputs.rst b/docs/outputs.rst index 440db7a07..bec73edbf 100644 --- a/docs/outputs.rst +++ b/docs/outputs.rst @@ -22,9 +22,9 @@ Component tables and classification outputs TEDPCA and TEDICA use component tables to track relevant metrics, component classifications, and rationales behind classifications. The component tables and additional information are stored as tsv and json files. -`A full descriptions of these outputs are here`_. +`A full description of these outputs are here`_. -.. _Full descriptions of these outputs are here: classification_output_descriptions.html +.. _A full description of these outputs are here: classification_output_descriptions.html ********************* From 14b34001ce50b405df98d069d7b548bf9a35d967 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Wed, 14 Dec 2022 22:32:23 -0500 Subject: [PATCH 085/177] more updates to building decision trees --- docs/building decision trees.rst | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index d490f5b11..9cfd388a0 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -146,7 +146,7 @@ Defining a custom decision tree Decision trees are stored in json files. The default trees are stored as part of the tedana code repository in ./resources/decision_trees The minimal tree, minimal.json is a good example highlighting the structure and steps in a tree. It may be helpful -to look at that tree while reading this section. kundu.json should replicate the decision tree used in meica version 2.7, +to look at that tree while reading this section. kundu.json should replicate the decision tree used in MEICA version 2.5, the predecessor to tedana. It is a more complex, but also highlights additional possible functionality in decision trees. A user can specify another decision tree and link to the tree location when tedana is executed with the ``--tree`` option. The format is @@ -156,9 +156,9 @@ if violated, but more will just give a warning. If you are designing or editing A decision tree can include two types of nodes or functions. All functions are currently in selection_nodes.py -- A decision function will use existing metrics and potentially change the classification of the components based on those metrics. By convention, all these functions should begin with "dec" -- A calculation function will take existing metrics and calculate a value across components to be used for classification, for example the kappa and rho elbows. By convention, all these functions should begin with "calc" -- Nothing prevents a function from both calculating new cross component values and applying those values in a decision step, but following this convention should hopefully make decision tree specifications easier to follow and interpret. +- A decision function will use existing metrics and potentially change the classification of the components based on those metrics. By convention, all these functions begin with "dec" +- A calculation function will take existing metrics and calculate a value across components to be used for classification, for example the kappa and rho elbows. By convention, all these functions begin with "calc" +- Nothing prevents a function from both calculating new cross component values and applying those values in a decision step, but following this convention should hopefully make decision tree specifications easier to follow and results easier to interpret. **Key expectations** @@ -188,13 +188,19 @@ A decision tree can include two types of nodes or functions. All functions are c **Decision node json structure** -There are 6 initial fields, necessary_metrics, intermediate_classification, and classification_tags, as described in the above section: +There are 7 initial fields, necessary_metrics, intermediate_classification, and classification_tags, as described in the above section and : - "tree_id": a descriptive name for the tree that will be logged. - "info": A brief description of the tree for info logging - "report": A narrative description of the tree that could be used in report logging - "refs" Publications that should be referenced when this tree is used +"generated_metrics" is an optional initial field. It lists metrics that are calculated as part of the decision tree. +This is used similarly to necessary_metrics except, since the decision tree starts before these metrics exist, it +won't raise an error when these metrics are not found. One might want to calculate a new metric if the metric uses +only a subset of the components based on previous classifications. This does make interpretation of results more +confusing, but, since this functionaly was part of the kundu decision tree, it is included. + The "nodes" field is a list of elements where each element defines a node in the decision tree. There are several key fields for each of these nodes: - "functionname": The exact function name in selection_nodes.py that will be called. From 8c54a180d54fe5cc18a5936a771d54b1ada09c5a Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 15 Dec 2022 10:17:16 -0500 Subject: [PATCH 086/177] improved docs (#19) * working on selector init documentation * Breaking up outputs.rst * partially updated output_file_descriptions.rst * changed n_bold_comps to n_accepted_comps * n_bold_comps to n_accepted_comps * ComponentSelector.py API docs cleaned up * selection_nodes decision_docs updated * selection_nodes docstrings cleaned up * Fixed a test for selection_nodes * Updated faq for tedana_reclassify and tree options * docstrings in tedica and other small updates * Updated docstrings in selection_utils.py * Update docs/output_file_descriptions.rst * more doc updates * fixed meica to v2.5 in docstrings * docs building again * more updates to building decision trees Co-authored-by: Joshua Teves --- docs/building decision trees.rst | 66 ++++++++++++------- ...=> classification_output_descriptions.rst} | 0 docs/faq.rst | 49 ++++++++------ docs/index.rst | 2 +- docs/outputs.rst | 12 ++-- tedana/selection/selection_nodes.py | 8 +-- tedana/selection/selection_utils.py | 4 +- tedana/workflows/tedana.py | 2 + 8 files changed, 88 insertions(+), 55 deletions(-) rename docs/{component_table_descriptions.rst => classification_output_descriptions.rst} (100%) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index b3586c6a0..9cfd388a0 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -2,23 +2,36 @@ Understanding and building a component selection process ######################################################## -``tedana`` involves transforming data into components via ICA, and then calculating metrics for each component. -Each metric has one value per component that is stored in a comptable or component_table dataframe. This structure -is then passed to a "decision tree" through which a series of binary choices categorize each component as **accepted** or -**rejected**. The time series for the rejected components are regressed from the data in the final denoising step. - -There are several decision trees that are included by default in ``tedana`` but users can also build their own. -This might be useful if one of the default decision trees needs to be slightly altered due to the nature -of a specific data set, if one has an idea for a new approach to multi-echo denoising, or if one wants to integrate +This guide is designed for users who want to better understand the mechanics +of the component selection process and people who are considering customizing +their own decision tree or contributing to ``tedana`` code. We have tried to +make this accessible with minimal jargon, but it is long. If you just want to +better understand what's in the outputs from ``tedana`` start with +`classification output descriptions`_. + +``tedana`` involves transforming data into components, currently via ICA, and then +calculating metrics for each component. Each metric has one value per component that +is stored in a component_table dataframe. This structure is then passed to a +"decision tree" through which a series of binary choices categorizes each component +as **accepted** or **rejected**. The time series for the rejected components are +regressed from the data in the `final denoising step`_. + +There are a couple of decision trees that are included by default in ``tedana`` but +users can also build their own. This might be useful if one of the default decision +trees needs to be slightly altered due to the nature of a specific data set, if one has +an idea for a new approach to multi-echo denoising, or if one wants to integrate non-multi-echo metrics into a single decision tree. -Note: We use two terminologies interchangeably. The whole process is called "component selection" -and much of the code uses variants of that phrase (i.e. the ComponentSelector class, selection_nodes for the functions used in selection). -Instructions for how to classify components is called a "decision tree" since each step in the selection -process branches components into different intermediate or final classifications +Note: We use two terminologies interchangeably. The whole process is called "component +selection" and much of the code uses variants of that phrase (i.e. the ComponentSelector +class, selection_nodes for the functions used in selection). We call the steps for how +to classify components a "decision tree" since each step in the selection process +branches components into different intermediate or final classifications. -.. contents:: :local: +.. _classification output descriptions: classification output descriptions.html +.. _final denoising step: denoising.html +.. contents:: :local: ****************************************** Expected outputs after component selection @@ -72,11 +85,12 @@ New columns in the ``component_table`` (sometimes a stand alone variable ``compt ``used_metrics``: Saved as a field in the ``tree`` json file - A list of the metrics that were used in the decision tree. This should - match ``necessary_metrics`` which was a predefined list of metrics that - a tree uses. If these don't match, a warning should appear. These might - be useful for future work so that a user can input a tree and metrics - would be calculated based on what's needed to execute the tree. + A list of the metrics that were used in the decision tree. Everything in + ``used_metrics`` should be in either ``necessary_metrics`` or + ``generated_metrics`` If a used metric isn't in either, a warning message + will appear. These may have an additional use for future work so that a + user can input a tree and metrics would be calculated based on what's + needed to execute the tree. ``classification_tags``: Saved as a field in the ``tree`` json file @@ -132,7 +146,7 @@ Defining a custom decision tree Decision trees are stored in json files. The default trees are stored as part of the tedana code repository in ./resources/decision_trees The minimal tree, minimal.json is a good example highlighting the structure and steps in a tree. It may be helpful -to look at that tree while reading this section. kundu.json should replicate the decision tree used in meica version 2.7, +to look at that tree while reading this section. kundu.json should replicate the decision tree used in MEICA version 2.5, the predecessor to tedana. It is a more complex, but also highlights additional possible functionality in decision trees. A user can specify another decision tree and link to the tree location when tedana is executed with the ``--tree`` option. The format is @@ -142,9 +156,9 @@ if violated, but more will just give a warning. If you are designing or editing A decision tree can include two types of nodes or functions. All functions are currently in selection_nodes.py -- A decision function will use existing metrics and potentially change the classification of the components based on those metrics. By convention, all these functions should begin with "dec" -- A calculation function will take existing metrics and calculate a value across components to be used for classification, for example the kappa and rho elbows. By convention, all these functions should begin with "calc" -- Nothing prevents a function from both calculating new cross component values and applying those values in a decision step, but following this convention should hopefully make decision tree specifications easier to follow and interpret. +- A decision function will use existing metrics and potentially change the classification of the components based on those metrics. By convention, all these functions begin with "dec" +- A calculation function will take existing metrics and calculate a value across components to be used for classification, for example the kappa and rho elbows. By convention, all these functions begin with "calc" +- Nothing prevents a function from both calculating new cross component values and applying those values in a decision step, but following this convention should hopefully make decision tree specifications easier to follow and results easier to interpret. **Key expectations** @@ -174,13 +188,19 @@ A decision tree can include two types of nodes or functions. All functions are c **Decision node json structure** -There are 6 initial fields, necessary_metrics, intermediate_classification, and classification_tags, as described in the above section: +There are 7 initial fields, necessary_metrics, intermediate_classification, and classification_tags, as described in the above section and : - "tree_id": a descriptive name for the tree that will be logged. - "info": A brief description of the tree for info logging - "report": A narrative description of the tree that could be used in report logging - "refs" Publications that should be referenced when this tree is used +"generated_metrics" is an optional initial field. It lists metrics that are calculated as part of the decision tree. +This is used similarly to necessary_metrics except, since the decision tree starts before these metrics exist, it +won't raise an error when these metrics are not found. One might want to calculate a new metric if the metric uses +only a subset of the components based on previous classifications. This does make interpretation of results more +confusing, but, since this functionaly was part of the kundu decision tree, it is included. + The "nodes" field is a list of elements where each element defines a node in the decision tree. There are several key fields for each of these nodes: - "functionname": The exact function name in selection_nodes.py that will be called. diff --git a/docs/component_table_descriptions.rst b/docs/classification_output_descriptions.rst similarity index 100% rename from docs/component_table_descriptions.rst rename to docs/classification_output_descriptions.rst diff --git a/docs/faq.rst b/docs/faq.rst index 4dc851ca8..7dd013f9b 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -47,6 +47,8 @@ Nevertheless, we have some code (thanks to Julio Peraza) that works for version +.. _fMRIPrep: https://fmriprep.readthedocs.io + Warping scanner-space fMRIPrep outputs to standard space ======================================================== @@ -68,12 +70,13 @@ The standard space template in this example is "MNI152NLin2009cAsym", but will d The TEDICA step may fail to converge if TEDPCA is either too strict (i.e., there are too few components) or too lenient (there are too many). -In our experience, this may happen when preprocessing has not been applied to -the data, or when improper steps have been applied to the data (e.g., distortion -correction, rescaling, nuisance regression). +With updates to the ``tedana`` code, this issue is now rare, but it may happen +when preprocessing has not been applied to the data, or when improper steps have +been applied to the data (e.g. rescaling, nuisance regression). If you are confident that your data have been preprocessed correctly prior to applying tedana, and you encounter this problem, please submit a question to `NeuroStars`_. +.. _NeuroStars: https://neurostars.org .. _manual classification: @@ -136,24 +139,32 @@ can include additional criteria. .. _make their own: building\ decision\ trees.html ************************************************************************************* -[tedana] Why isn't v3.2 of the component selection algorithm supported in ``tedana``? +[tedana] What different versions of this method exist? ************************************************************************************* -There is a lot of solid logic behind the updated version of the TEDICA component -selection algorithm, first added to the original ME-ICA codebase `here`_ by Dr. Prantik Kundu. -However, we (the ``tedana`` developers) have encountered certain difficulties -with this method (e.g., misclassified components) and the method itself has yet -to be validated in any papers, posters, etc., which is why we have chosen to archive -the v3.2 code, with the goal of revisiting it when ``tedana`` is more stable. - -Anyone interested in using v3.2 may compile and install an earlier release (<=0.0.4) of ``tedana``. - - -.. _here: https://bitbucket.org/prantikk/me-ica/commits/906bd1f6db7041f88cd0efcac8a74074d673f4f5 - -.. _NeuroStars: https://neurostars.org -.. _fMRIPrep: https://fmriprep.readthedocs.io -.. _afni_proc.py: https://afni.nimh.nih.gov/pub/dist/doc/program_help/afni_proc.py.html +Dr. Prantik Kundu developed a multi-echo ICA (ME-ICA) denoising method and +`shared code on bitbucket`_ to allow others to use the method. A nearly identical +version of this code is `distributed with AFNI as MEICA v2.5 beta 11`_. Most early +publications that validated the MEICA method used variants of this code. That code +runs only on the now defunct python 2.7 and is not under active development. +``tedana`` when run with `--tree kundu --tedpca kundu` (or `--tedpca kundu-stabilize`), +uses the same core algorithm as in MEICA v2.5. Since ICA is a nondeterministic +algorithm and ``tedana`` and MEICA use different PCA and ICA code, the algorithm will +mostly be the same, but the results will not be identical. + +Prantik Kundu also worked on `MEICA v3.2`_ (also for python v2.7). The underlying ICA +step is very similar, but the component selection process was different. While this +new approach has potentialy useful ideas, the early ``tedana`` developers experienced +non-trivial component misclassifications and there were no publications that +validated this method. That is why ``tedana`` replicated the established and valided +MEICA v2.5 method and also includes options to ingrate additional component selection +methods. Recently Prantik has started to work `MEICA v3.3`_ (for python >=v3.7) so +that this version of the selection process would again be possible to run. + +.. _shared code on bitbucket: https://bitbucket.org/prantikk/me-ica/src/experimental +.. _distributed with AFNI as MEICA v2.5 beta 11: https://github.com/afni/afni/tree/master/src/pkundu +.. _MEICA v3.2: https://github.com/ME-ICA/me-ica/tree/53191a7e8838788acf837fdf7cb3026efadf49ac +.. _MEICA v3.3: https://github.com/ME-ICA/me-ica/tree/ME-ICA_v3.3.0 ******************************************************************* diff --git a/docs/index.rst b/docs/index.rst index 3e33daa15..670136bcd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -190,7 +190,7 @@ tedana is licensed under GNU Lesser General Public License version 2.1. dependence_metrics output_file_descriptions - component_table_descriptions + classification_output_descriptions ****************** diff --git a/docs/outputs.rst b/docs/outputs.rst index c67031cd8..bec73edbf 100644 --- a/docs/outputs.rst +++ b/docs/outputs.rst @@ -15,16 +15,16 @@ future processing. `descriptions of these output files are here`_. .. _descriptions of these output files are here: output_file_descriptions.html -**************** -Component tables -**************** +******************************************* +Component tables and classification outputs +******************************************* TEDPCA and TEDICA use component tables to track relevant metrics, component classifications, and rationales behind classifications. -The component tables are stored as tsv files for BIDS-compatibility. -`Full descriptions of these outputs are here`_. +The component tables and additional information are stored as tsv and json files. +`A full description of these outputs are here`_. -.. _Full descriptions of these outputs are here: component_table_descriptions.html +.. _A full description of these outputs are here: classification_output_descriptions.html ********************* diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 48cfbe5af..542d65bae 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -769,9 +769,9 @@ def calc_kappa_elbow( Note ---- This function is currently hard coded for a specific way to calculate the kappa elbow - based on the method by Kundu in the MEICA v2.7 code. This uses the minimum of + based on the method by Kundu in the MEICA v2.5 code. This uses the minimum of a kappa elbow calculation on all components and on a subset of kappa values below - a significance threshold. To get the same functionality as in MEICA v2.7, + a significance threshold. To get the same functionality as in MEICA v2.5, decide_comps must be 'all'. """ @@ -881,8 +881,8 @@ def calc_rho_elbow( Note ---- This script is currently hard coded for a specific way to calculate the rho elbow - based on the method by Kundu in the MEICA v2.7 code. To get the same functionality - in MEICA v2.7, decide_comps must be 'all' and subset_decide_comps must be + based on the method by Kundu in the MEICA v2.5 code. To get the same functionality + in MEICA v2.5, decide_comps must be 'all' and subset_decide_comps must be 'unclassified' See :obj:`tedana.selection.selection_utils.rho_elbow_kundu_liberal` for a more detailed explanation of the difference between the kundu and liberal options. diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 08e2281c3..a947eb16a 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -577,7 +577,7 @@ def getelbow(arr, return_val=False): def kappa_elbow_kundu(component_table, n_echos, comps2use=None): """ Calculate an elbow for kappa using the approach originally in - Prantik Kundu's MEICA v2.7 code + Prantik Kundu's MEICA v2.5 code Parameters ---------- @@ -649,7 +649,7 @@ def rho_elbow_kundu_liberal( ): """ Calculate an elbow for rho using the approach originally in - Prantik Kundu's MEICA v2.7 code and with a slightly more + Prantik Kundu's MEICA v2.5 code and with a slightly more liberal threshold Parameters diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index a0c1ff3d6..1b293680d 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -144,6 +144,8 @@ def _get_parser(): "PCA decomposition with the mdl, kic and aic options " "is based on a Moving Average (stationary Gaussian) " "process and are ordered from most to least aggressive. " + "'kundu' or 'kundu-stabilize' are selection methods that " + "were distributed with MEICA. " "Users may also provide a float from 0 to 1, " "in which case components will be selected based on the " "cumulative variance explained or an integer greater than 1" From e5fdec1d068cdc2f689cd6fb2a7d51f1bc72be34 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 15 Dec 2022 10:25:16 -0500 Subject: [PATCH 087/177] Get rid of optional method keyword --- tedana/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/utils.py b/tedana/utils.py index 131fcb4ad..1a5fd3329 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -76,7 +76,7 @@ def make_adaptive_mask(data, mask=None, getsum=False, threshold=1): # get 33rd %ile of `first_echo` and find corresponding index # NOTE: percentile is arbitrary - perc = np.percentile(first_echo, 33, method="higher") + perc = np.percentile(first_echo, 33) perc_val = echo_means[:, 0] == perc # extract values from all echos at relevant index From 4937f7666a231cc1a08fda54198cf6736f286af4 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 15 Dec 2022 10:28:52 -0500 Subject: [PATCH 088/177] Revert "Get rid of optional method keyword" This reverts commit e5fdec1d068cdc2f689cd6fb2a7d51f1bc72be34. --- tedana/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/utils.py b/tedana/utils.py index 1a5fd3329..131fcb4ad 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -76,7 +76,7 @@ def make_adaptive_mask(data, mask=None, getsum=False, threshold=1): # get 33rd %ile of `first_echo` and find corresponding index # NOTE: percentile is arbitrary - perc = np.percentile(first_echo, 33) + perc = np.percentile(first_echo, 33, method="higher") perc_val = echo_means[:, 0] == perc # extract values from all echos at relevant index From b9e17fba1301c51ed22142a1b750163c89df81ba Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 15 Dec 2022 10:28:57 -0500 Subject: [PATCH 089/177] Revert "Updates percentile call" This reverts commit 9d6a4872c3b1223938a7a3019e287032a3931bd8. --- tedana/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/utils.py b/tedana/utils.py index 131fcb4ad..0248f9f2a 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -76,7 +76,7 @@ def make_adaptive_mask(data, mask=None, getsum=False, threshold=1): # get 33rd %ile of `first_echo` and find corresponding index # NOTE: percentile is arbitrary - perc = np.percentile(first_echo, 33, method="higher") + perc = np.percentile(first_echo, 33, interpolation="higher") perc_val = echo_means[:, 0] == perc # extract values from all echos at relevant index From c96ad838f9e7db87b5234951a58aaee835a0e647 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 15 Dec 2022 10:31:37 -0500 Subject: [PATCH 090/177] Revert "Update line terminator" This reverts commit 8cf697cb42e2cc24eee3ceb8999e7d914f67c166. --- tedana/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/io.py b/tedana/io.py index 222908c01..045dce22d 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -329,7 +329,7 @@ def save_tsv(self, data, name): raise TypeError(f"data must be pd.Data, not type {data_type}.") # Replace blanks with numpy NaN deblanked = data.replace("", np.nan) - deblanked.to_csv(name, sep="\t", lineterminator="\n", na_rep="n/a", index=False) + deblanked.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) def save_self(self): fname = self.save_file(self.registry, "registry json") From 0505349ff2c94ceb04d38af9692e04fb5d3a03fe Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Thu, 15 Dec 2022 12:02:34 -0500 Subject: [PATCH 091/177] Autodocument ComponentSelector methods/attributes (#20) * Rename ComponentSelector module. * Document the ComponentSelector directly. --- docs/api.rst | 8 +++++++- .../{ComponentSelector.py => component_selector.py} | 0 tedana/selection/selection_nodes.py | 4 ++-- tedana/selection/selection_utils.py | 10 +++++----- tedana/selection/tedica.py | 4 ++-- 5 files changed, 16 insertions(+), 10 deletions(-) rename tedana/selection/{ComponentSelector.py => component_selector.py} (100%) diff --git a/docs/api.rst b/docs/api.rst index 6e72d913a..155d7ad71 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -122,12 +122,18 @@ API :toctree: generated/ :template: module.rst - tedana.selection.ComponentSelector + tedana.selection.component_selector tedana.selection.selection_nodes tedana.selection.selection_utils tedana.selection.tedica tedana.selection.tedpca +.. autosummary:: + :toctree: generated/ + :template: class.rst + + tedana.selection.component_selector.ComponentSelector + .. _api_gscontrol_ref: ********************************************** diff --git a/tedana/selection/ComponentSelector.py b/tedana/selection/component_selector.py similarity index 100% rename from tedana/selection/ComponentSelector.py rename to tedana/selection/component_selector.py diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 542d65bae..20fc251ca 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -27,7 +27,7 @@ decision_docs = { "selector": """\ -selector: :obj:`tedana.selection.ComponentSelector` +selector: :obj:`tedana.selection.component_selector.ComponentSelector` The selector to perform decision tree-based component selection with.""", "ifTrueFalse": """\ ifTrue: :obj:`str` @@ -62,7 +62,7 @@ tag_ifFalse: :obj:`str` The classification tag to apply if a component is classified False. Default="".""", "basicreturns": """\ -selector: :obj:`tedana.selection.ComponentSelector` +selector: :obj:`tedana.selection.component_selector.ComponentSelector` If only_used_metrics is False, the updated selector is returned used_metrics: :obj:`set(str)` If only_used_metrics is True, the names of the metrics used in the diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index a947eb16a..4e946108d 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -24,7 +24,7 @@ def selectcomps2use(selector, decide_comps): Parameters ---------- - selector: :obj:`tedana.selection.ComponentSelector` + selector: :obj:`tedana.selection.component_selector.ComponentSelector` Only uses the component_table in this object decide_comps: :obj:`str` or :obj:`list[str]` or :obj:`list[int]` This is string or a list of strings describing what classifications @@ -104,7 +104,7 @@ def change_comptable_classifications( Parameters ---------- - selector: :obj:`tedana.selection.ComponentSelector` + selector: :obj:`tedana.selection.component_selector.ComponentSelector` The attributes used are component_table, component_status_table, and current_node_idx ifTrue, ifFalse: :obj:`str` @@ -127,7 +127,7 @@ def change_comptable_classifications( Returns ------- - selector: :obj:`tedana.selection.ComponentSelector` + selector: :obj:`tedana.selection.component_selector.ComponentSelector` component_table["classifications"] will reflect any new classifications. component_status_table will have a new column titled @@ -184,7 +184,7 @@ def comptable_classification_changer( Parameters ---------- - selector: :obj:`tedana.selection.ComponentSelector` + selector: :obj:`tedana.selection.component_selector.ComponentSelector` The attributes used are component_table, component_status_table, and current_node_idx boolstate : :obj:`bool` @@ -211,7 +211,7 @@ def comptable_classification_changer( warning is suppressed. default=False Returns ------- - selector: :obj:`tedana.selection.ComponentSelector` + selector: :obj:`tedana.selection.component_selector.ComponentSelector` Operates on the True OR False components depending on boolstate component_table["classifications"] will reflect any new classifications. diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 3331136f4..5388522a9 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -4,7 +4,7 @@ import logging from tedana.metrics import collect -from tedana.selection.ComponentSelector import ComponentSelector +from tedana.selection.component_selector import ComponentSelector LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") @@ -25,7 +25,7 @@ def automatic_selection(component_table, n_echos, n_vols, tree="kundu"): Returns ------- - selector: :obj:`tedana.selection.ComponentSelector` + selector: :obj:`tedana.selection.component_selector.ComponentSelector` Contains component classifications in a component_table and provenance and metadata from the component selection process From 254b26a05bcefbc94cd63a724f1e31a792aad9bb Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 15 Dec 2022 16:27:59 -0500 Subject: [PATCH 092/177] fixed rename of component_selector --- tedana/selection/component_selector.py | 24 +++++++------- tedana/tests/test_ComponentSelector.py | 44 +++++++++++++------------- tedana/tests/test_selection_utils.py | 2 +- tedana/workflows/tedana_reclassify.py | 2 +- 4 files changed, 36 insertions(+), 36 deletions(-) diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index 024ea6012..af0f6c8a1 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -328,7 +328,7 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab def select(self): """ - Using the validated tree in `ComponentSelector` run the decision + Using the validated tree in `ComponentSelector` to run the decision tree functions to calculate cross_component metrics and classify each component as accepted or rejected. @@ -341,21 +341,21 @@ def select(self): the calculated metrics This can be used on a component_table with no component classifications or to alter - classifications and on a component_table that was already run (i.e. for manual + classifications on a component_table that was already run (i.e. for manual classificaiton changes after visual inspection) When this is run, multiple elements in `ComponentSelector` will change including: - component_table: `classification` column with `accepted` or `rejected labels` - and `classification_tags` column with can hold multiple labels explaining why - a classification happened + and `classification_tags` column with can hold multiple comma-separated labels + explaining why a classification happened - cross_component_metrics: Any values that were calculated based on the metric - values across components or by direct user input - - component_status_table: Contains the classification statuses at each node - in the decision tree + values across components or by direct user input + - component_status_table: Contains the classification statuses at each node in + the decision tree - used_metrics: A list of metrics used in the selection process - - nodes: The original tree definition with an added `outputs` key - listing everything that changed in each node + - nodes: The original tree definition with an added `outputs` key listing + everything that changed in each node - current_node_idx: The total number of nodes run in `ComponentSelector` """ @@ -421,7 +421,7 @@ def add_manual(self, indices, classification): indices: :obj:`list[int]` The indices to manually classify classification: :obj:`str` - The classification to set the nodes to + The classification to set the nodes to (i.e. accepted or rejected) """ self.tree["nodes"].append( { @@ -467,7 +467,7 @@ def check_null(self, params, fcn): def are_only_necessary_metrics_used(self): """ Check if all metrics that are declared as necessary are actually - used and if any used_metrics weren't explicitly declared necessary + used and if any used_metrics weren't explicitly declared necessary. If either of these happen, a warning is added to the logger """ if "generated_metrics" in self.tree.keys(): @@ -490,7 +490,7 @@ def are_only_necessary_metrics_used(self): def are_all_components_accepted_or_rejected(self): """ After the tree has finished executing, check if all component - classifications are either "accepted" or "rejected" + classifications are either "accepted" or "rejected". If any other component classifications remain, log a warning """ component_classifications = set(self.component_table["classification"].to_list()) diff --git a/tedana/tests/test_ComponentSelector.py b/tedana/tests/test_ComponentSelector.py index 957498295..61ff19f45 100644 --- a/tedana/tests/test_ComponentSelector.py +++ b/tedana/tests/test_ComponentSelector.py @@ -7,7 +7,7 @@ import pandas as pd import pytest -from tedana.selection import ComponentSelector +from tedana.selection import component_selector THIS_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -40,7 +40,7 @@ def dicts_to_test(treechoice): Returns ------- - tree: :ojb:`dict` A dict that can be input into ComponentSelector.validate_tree + tree: :ojb:`dict` A dict that can be input into component_selector.validate_tree """ # valid_dict is a simple valid dictionary to test @@ -136,7 +136,7 @@ def dicts_to_test(treechoice): # ---------------------------------------------------------------------- -# ComponentSelector Tests +# component_selector Tests # ---------------------------------------------------------------------- # load_config @@ -146,11 +146,11 @@ def test_load_config_fails(): # We recast to ValueError in the file not found and directory cases with pytest.raises(ValueError): - ComponentSelector.load_config("THIS FILE DOES NOT EXIST.txt") + component_selector.load_config("THIS FILE DOES NOT EXIST.txt") # Raises IsADirectoryError for a directory with pytest.raises(ValueError): - ComponentSelector.load_config(".") + component_selector.load_config(".") # Note: we defer validation errors for validate_tree even though # load_config may raise them @@ -160,7 +160,7 @@ def test_load_config_succeeds(): """Tests to make sure load_config succeeds""" # The minimal tree should have an id of "minimal_decision_tree_test1" - tree = ComponentSelector.load_config("minimal") + tree = component_selector.load_config("minimal") assert tree["tree_id"] == "minimal_decision_tree_test1" @@ -169,7 +169,7 @@ def test_minimal(): xcomp = { "n_echos": 3, } - tree = ComponentSelector.ComponentSelector( + tree = component_selector.ComponentSelector( "minimal", sample_comptable(), cross_component_metrics=xcomp, @@ -198,7 +198,7 @@ def test_validate_tree_succeeds(): for tree_name in default_tree_names: f = open(tree_name) tree = json.load(f) - assert ComponentSelector.validate_tree(tree) + assert component_selector.validate_tree(tree) # Test a few extra possabilities just using the minimal.json tree if "/minimal.json" in tree_name: @@ -206,7 +206,7 @@ def test_validate_tree_succeeds(): tree["reconstruct_from"] = "testinput" # Need to test handling of the tag_ifFalse kwarg somewhere tree["nodes"][1]["kwargs"]["tag_ifFalse"] = "testing tag" - assert ComponentSelector.validate_tree(tree) + assert component_selector.validate_tree(tree) def test_validate_tree_warnings(): @@ -216,7 +216,7 @@ def test_validate_tree_warnings(): """ # A tree that raises all possible warnings in the validator should still be valid - assert ComponentSelector.validate_tree(dicts_to_test("valid")) + assert component_selector.validate_tree(dicts_to_test("valid")) def test_validate_tree_fails(): @@ -227,25 +227,25 @@ def test_validate_tree_fails(): """ # An empty dict should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree({}) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree({}) # A tree that is missing a required key should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("missing_key")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("missing_key")) # Calling a selection node function that does not exist should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("missing_function")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("missing_function")) # Calling a function with an non-existent required parameter should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("extra_req_param")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("extra_req_param")) # Calling a function with an non-existent optional parameter should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("extra_opt_param")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("extra_opt_param")) # Calling a function missing a required parameter should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("missing_req_param")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("missing_req_param")) diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index e4d6f3c1a..9006b6126 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -6,7 +6,7 @@ import pytest from tedana.selection import selection_utils -from tedana.selection.ComponentSelector import ComponentSelector +from tedana.selection.component_selector import ComponentSelector THIS_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index d0fe8834c..7ad092010 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -282,7 +282,7 @@ def post_tedana( ) # Make a new selector with the added files - selector = selection.ComponentSelector.ComponentSelector( + selector = selection.component_selector.ComponentSelector( previous_tree_fname, comptable, cross_component_metrics=xcomp, status_table=status_table ) From 6389822d1025f3705b3e703f4783a50e7c6fec15 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Fri, 16 Dec 2022 12:45:05 -0500 Subject: [PATCH 093/177] Fixed remaining transition to component_selector (#21) * working on selector init documentation * Breaking up outputs.rst * partially updated output_file_descriptions.rst * changed n_bold_comps to n_accepted_comps * n_bold_comps to n_accepted_comps * ComponentSelector.py API docs cleaned up * selection_nodes decision_docs updated * selection_nodes docstrings cleaned up * Fixed a test for selection_nodes * Updated faq for tedana_reclassify and tree options * docstrings in tedica and other small updates * Updated docstrings in selection_utils.py * Update docs/output_file_descriptions.rst * more doc updates * fixed meica to v2.5 in docstrings * docs building again * more updates to building decision trees * fixed rename of component_selector Co-authored-by: Joshua Teves --- tedana/selection/component_selector.py | 24 +++++++------- tedana/tests/test_ComponentSelector.py | 44 +++++++++++++------------- tedana/tests/test_selection_utils.py | 2 +- tedana/workflows/tedana_reclassify.py | 2 +- 4 files changed, 36 insertions(+), 36 deletions(-) diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index 024ea6012..af0f6c8a1 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -328,7 +328,7 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab def select(self): """ - Using the validated tree in `ComponentSelector` run the decision + Using the validated tree in `ComponentSelector` to run the decision tree functions to calculate cross_component metrics and classify each component as accepted or rejected. @@ -341,21 +341,21 @@ def select(self): the calculated metrics This can be used on a component_table with no component classifications or to alter - classifications and on a component_table that was already run (i.e. for manual + classifications on a component_table that was already run (i.e. for manual classificaiton changes after visual inspection) When this is run, multiple elements in `ComponentSelector` will change including: - component_table: `classification` column with `accepted` or `rejected labels` - and `classification_tags` column with can hold multiple labels explaining why - a classification happened + and `classification_tags` column with can hold multiple comma-separated labels + explaining why a classification happened - cross_component_metrics: Any values that were calculated based on the metric - values across components or by direct user input - - component_status_table: Contains the classification statuses at each node - in the decision tree + values across components or by direct user input + - component_status_table: Contains the classification statuses at each node in + the decision tree - used_metrics: A list of metrics used in the selection process - - nodes: The original tree definition with an added `outputs` key - listing everything that changed in each node + - nodes: The original tree definition with an added `outputs` key listing + everything that changed in each node - current_node_idx: The total number of nodes run in `ComponentSelector` """ @@ -421,7 +421,7 @@ def add_manual(self, indices, classification): indices: :obj:`list[int]` The indices to manually classify classification: :obj:`str` - The classification to set the nodes to + The classification to set the nodes to (i.e. accepted or rejected) """ self.tree["nodes"].append( { @@ -467,7 +467,7 @@ def check_null(self, params, fcn): def are_only_necessary_metrics_used(self): """ Check if all metrics that are declared as necessary are actually - used and if any used_metrics weren't explicitly declared necessary + used and if any used_metrics weren't explicitly declared necessary. If either of these happen, a warning is added to the logger """ if "generated_metrics" in self.tree.keys(): @@ -490,7 +490,7 @@ def are_only_necessary_metrics_used(self): def are_all_components_accepted_or_rejected(self): """ After the tree has finished executing, check if all component - classifications are either "accepted" or "rejected" + classifications are either "accepted" or "rejected". If any other component classifications remain, log a warning """ component_classifications = set(self.component_table["classification"].to_list()) diff --git a/tedana/tests/test_ComponentSelector.py b/tedana/tests/test_ComponentSelector.py index 957498295..61ff19f45 100644 --- a/tedana/tests/test_ComponentSelector.py +++ b/tedana/tests/test_ComponentSelector.py @@ -7,7 +7,7 @@ import pandas as pd import pytest -from tedana.selection import ComponentSelector +from tedana.selection import component_selector THIS_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -40,7 +40,7 @@ def dicts_to_test(treechoice): Returns ------- - tree: :ojb:`dict` A dict that can be input into ComponentSelector.validate_tree + tree: :ojb:`dict` A dict that can be input into component_selector.validate_tree """ # valid_dict is a simple valid dictionary to test @@ -136,7 +136,7 @@ def dicts_to_test(treechoice): # ---------------------------------------------------------------------- -# ComponentSelector Tests +# component_selector Tests # ---------------------------------------------------------------------- # load_config @@ -146,11 +146,11 @@ def test_load_config_fails(): # We recast to ValueError in the file not found and directory cases with pytest.raises(ValueError): - ComponentSelector.load_config("THIS FILE DOES NOT EXIST.txt") + component_selector.load_config("THIS FILE DOES NOT EXIST.txt") # Raises IsADirectoryError for a directory with pytest.raises(ValueError): - ComponentSelector.load_config(".") + component_selector.load_config(".") # Note: we defer validation errors for validate_tree even though # load_config may raise them @@ -160,7 +160,7 @@ def test_load_config_succeeds(): """Tests to make sure load_config succeeds""" # The minimal tree should have an id of "minimal_decision_tree_test1" - tree = ComponentSelector.load_config("minimal") + tree = component_selector.load_config("minimal") assert tree["tree_id"] == "minimal_decision_tree_test1" @@ -169,7 +169,7 @@ def test_minimal(): xcomp = { "n_echos": 3, } - tree = ComponentSelector.ComponentSelector( + tree = component_selector.ComponentSelector( "minimal", sample_comptable(), cross_component_metrics=xcomp, @@ -198,7 +198,7 @@ def test_validate_tree_succeeds(): for tree_name in default_tree_names: f = open(tree_name) tree = json.load(f) - assert ComponentSelector.validate_tree(tree) + assert component_selector.validate_tree(tree) # Test a few extra possabilities just using the minimal.json tree if "/minimal.json" in tree_name: @@ -206,7 +206,7 @@ def test_validate_tree_succeeds(): tree["reconstruct_from"] = "testinput" # Need to test handling of the tag_ifFalse kwarg somewhere tree["nodes"][1]["kwargs"]["tag_ifFalse"] = "testing tag" - assert ComponentSelector.validate_tree(tree) + assert component_selector.validate_tree(tree) def test_validate_tree_warnings(): @@ -216,7 +216,7 @@ def test_validate_tree_warnings(): """ # A tree that raises all possible warnings in the validator should still be valid - assert ComponentSelector.validate_tree(dicts_to_test("valid")) + assert component_selector.validate_tree(dicts_to_test("valid")) def test_validate_tree_fails(): @@ -227,25 +227,25 @@ def test_validate_tree_fails(): """ # An empty dict should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree({}) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree({}) # A tree that is missing a required key should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("missing_key")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("missing_key")) # Calling a selection node function that does not exist should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("missing_function")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("missing_function")) # Calling a function with an non-existent required parameter should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("extra_req_param")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("extra_req_param")) # Calling a function with an non-existent optional parameter should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("extra_opt_param")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("extra_opt_param")) # Calling a function missing a required parameter should not be valid - with pytest.raises(ComponentSelector.TreeError): - ComponentSelector.validate_tree(dicts_to_test("missing_req_param")) + with pytest.raises(component_selector.TreeError): + component_selector.validate_tree(dicts_to_test("missing_req_param")) diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index e4d6f3c1a..9006b6126 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -6,7 +6,7 @@ import pytest from tedana.selection import selection_utils -from tedana.selection.ComponentSelector import ComponentSelector +from tedana.selection.component_selector import ComponentSelector THIS_DIR = os.path.dirname(os.path.abspath(__file__)) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index d0fe8834c..7ad092010 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -282,7 +282,7 @@ def post_tedana( ) # Make a new selector with the added files - selector = selection.ComponentSelector.ComponentSelector( + selector = selection.component_selector.ComponentSelector( previous_tree_fname, comptable, cross_component_metrics=xcomp, status_table=status_table ) From 075d6e26f58f80a871c77fcb0ea74ba0f2e74034 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Mon, 19 Dec 2022 16:04:46 -0500 Subject: [PATCH 094/177] more doc updates --- docs/building decision trees.rst | 7 +++--- docs/classification_output_descriptions.rst | 28 ++++++++++++++++----- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index 9cfd388a0..3496cefc6 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -5,14 +5,14 @@ Understanding and building a component selection process This guide is designed for users who want to better understand the mechanics of the component selection process and people who are considering customizing their own decision tree or contributing to ``tedana`` code. We have tried to -make this accessible with minimal jargon, but it is long. If you just want to -better understand what's in the outputs from ``tedana`` start with +make this accessible, but it is long. If you just want to better understand +what's in the outputs from ``tedana`` start with `classification output descriptions`_. ``tedana`` involves transforming data into components, currently via ICA, and then calculating metrics for each component. Each metric has one value per component that is stored in a component_table dataframe. This structure is then passed to a -"decision tree" through which a series of binary choices categorizes each component +"decision tree" through which a series of binary choices categorize each component as **accepted** or **rejected**. The time series for the rejected components are regressed from the data in the `final denoising step`_. @@ -98,6 +98,7 @@ New columns in the ``component_table`` (sometimes a stand alone variable ``compt Any reporting interface should use this field so that the tags that are possible are listed even if a given tag is not used by any component by the end of the selection process. +.. _saved in multiple files: output_file_descriptions.html **Outputs of each decision tree step** diff --git a/docs/classification_output_descriptions.rst b/docs/classification_output_descriptions.rst index b75169dd1..a1c54307f 100644 --- a/docs/classification_output_descriptions.rst +++ b/docs/classification_output_descriptions.rst @@ -1,11 +1,13 @@ -############################# -Component table descriptions -############################# +################################## +Classification output descriptions +################################## +In addition to the denoised time series, tedana outputs multiple files that +can be used to subsequent analyses and to better understand one's denoising +results. `In addition to the descriptions of file names`_ this page explains +the contents of several of those files in more detail. -In order to make sense of the rationale codes in the component tables, -consult the tables below. -TEDPCA rationale codes start with a "P", while TEDICA codes start with an "I". +.. _In addition to the descriptions of file names: output_file_descriptions.html =============== ============================================================= Classification Description @@ -20,6 +22,20 @@ ignored Low-variance components included in denoised, but excluded TEDPCA codes ============ +In ``tedana`` PCA is used to reduce the number of dimensions (components) in the +dataset. Without this steps, the number of components would be one less than +the number of volumes, many of those components would effectively be +Gaussian noise and ICA would not reliably converge. Standard methods for data +reduction use cost functions, like AIC, MDL, and KIC to estimate the variance +that is just noise and remove the lowest variance components under that threshold. + +``Tedana`` includes an addition `kundu` approach that identifies and removes +compnents that don't contain T2* or S0 signal and are more likely to be noise. +If the `--tedpca kundu` option is used, the PCA_metrics tsv file will include +an accepted vs rejected classification column and also a column of codes +documenting why a PCA component removed. These are brief explanations of those +codes. + ===== =============== ======================================================== Code Classification Description ===== =============== ======================================================== From f174dacd5ecd8eb2ac2e9da56411c1ed5b6b92c6 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 20 Dec 2022 00:21:36 -0500 Subject: [PATCH 095/177] mostly classification_output_descriptions --- docs/classification_output_descriptions.rst | 115 ++++++++++++-------- docs/conf.py | 2 +- docs/output_file_descriptions.rst | 11 ++ tedana/workflows/tedana.py | 5 +- 4 files changed, 86 insertions(+), 47 deletions(-) diff --git a/docs/classification_output_descriptions.rst b/docs/classification_output_descriptions.rst index a1c54307f..d8adf09f2 100644 --- a/docs/classification_output_descriptions.rst +++ b/docs/classification_output_descriptions.rst @@ -2,39 +2,32 @@ Classification output descriptions ################################## -In addition to the denoised time series, tedana outputs multiple files that -can be used to subsequent analyses and to better understand one's denoising -results. `In addition to the descriptions of file names`_ this page explains -the contents of several of those files in more detail. - -.. _In addition to the descriptions of file names: output_file_descriptions.html - -=============== ============================================================= -Classification Description -=============== ============================================================= -accepted BOLD-like components included in denoised and high-Kappa data -rejected Non-BOLD components excluded from denoised and high-Kappa data -ignored Low-variance components included in denoised, but excluded - from high-Kappa data -=============== ============================================================= +Tedana outputs multiple files that can be used to subsequent analyses and to +better understand one's denoising results. +In addition to the `descriptions of file names`_ this page explains the +contents of several of those files in more detail. TEDPCA codes ============ In ``tedana`` PCA is used to reduce the number of dimensions (components) in the -dataset. Without this steps, the number of components would be one less than +dataset. Without this step, the number of components would be one less than the number of volumes, many of those components would effectively be Gaussian noise and ICA would not reliably converge. Standard methods for data -reduction use cost functions, like AIC, MDL, and KIC to estimate the variance +reduction use cost functions, like MDL, KIC, and AIC to estimate the variance that is just noise and remove the lowest variance components under that threshold. +By default, ``tedana`` uses AIC. Of those three, AIC is the least agressive and +will retain the most components. -``Tedana`` includes an addition `kundu` approach that identifies and removes -compnents that don't contain T2* or S0 signal and are more likely to be noise. -If the `--tedpca kundu` option is used, the PCA_metrics tsv file will include -an accepted vs rejected classification column and also a column of codes -documenting why a PCA component removed. These are brief explanations of those -codes. +``Tedana`` includes additional `kundu` and `kundu-stabilize` approaches that +identify and remove components that don't contain T2* or S0 signal and are more +likely to be noise. If the `--tedpca kundu` option is used, the PCA_metrics tsv +file will include an accepted vs rejected classification column and also a +rationale column of codes documenting why a PCA component removed. If MDL, KIC, +or AIC are used then the classification column will exist, but will include +include the accepted components and the rationale column will contain n/a" +When kundu is used, these are brief explanations of the the rationale codes ===== =============== ======================================================== Code Classification Description @@ -50,24 +43,60 @@ P007 rejected Rho below fmin (only in stabilized PCA decision tree) ===== =============== ======================================================== -TEDICA codes -============ +ICA Classification Outputs +========================== + +The component table is stored in ``desc-tedana_metrics.tsv`` or +``tedana_metrics.tsv``. Each row is a component number. Each column is a metric +that is calculated for each component. Short descriptions of each column metric +are in the output log, ``tedana_[date_time].tsv``, and the actual metric +calculations are in `collect.py`_ The final two columns are `classification` +and `classification_tags`. `classification` should include `accepted` or +`rejected` for every component and `rejected` components are be removed +through denoising. `classification_tags` provide more information on why +components received a specific classification. Each component can receive +more than one tag. The following tags are included depending if ``--tree`` +is minimal, kundu, or if ``tedana_reclassify`` is run. + +===================== ================ ======================================== +Tag Included in Tree Explanation +===================== ================ ======================================== +Likely BOLD minimal,kundu Accepted because likely to include some + BOLD signal +Unlikely BOLD minimal,kundu Rejected because likely to include a + lot of non-BOLD signal +Low variance minimal,kundu Accepted because too low variance to + lose a degree-of-freedom by rejecting +Less likely BOLD kundu Rejected based on some edge criteria + based on relative rankings of components +Accept borderline kundu Accepted based on some edge criteria + based on relative rankings of components +No provisional accept kundu Accepted because because kundu tree did + not find any components to consider + accepting so the conservative "failure" + case is accept everything rather than + rejecting everything +manual reclassify manual_classify Classification based on user input. If + done after automatic selection then + the preceding tag from automatic + selection is retained and this tag + notes the classification was manually + changed +===================== ================ ======================================== + +The decision tree is a list of nodes where the classification of each component +could change. The information on which nodes and how classifications changed is +in several places: + +- The information in the output log includes the name of each + node and the count of components that changed classification during execution. +- The same information is stored in the `ICA decision tree` json file (see + `descriptions of file names`_) in the "output" field for each node. That information + is organized so that it can be used to generate a visual or text-based summary of + what happened when the decision tree was run on a dataset. +- The `ICA status table` lists the classification status of each component after + each node was run. This is particularly useful to trying to understand how a + specific component ended receiving its classification. -===== ================= ======================================================== -Code Classification Description -===== ================= ======================================================== -I001 rejected|accepted Manual classification -I002 rejected Rho greater than Kappa -I003 rejected More significant voxels in S0 model than R2 model -I004 rejected S0 Dice is higher than R2 Dice and high variance - explained -I005 rejected Noise F-value is higher than signal F-value and high - variance explained -I006 ignored No good components found -I007 rejected Mid-Kappa component -I008 ignored Low variance explained -I009 rejected Mid-Kappa artifact type A -I010 rejected Mid-Kappa artifact type B -I011 ignored ign_add0 -I012 ignored ign_add1 -===== ================= ======================================================== +.. _collect.py: https://github.com/ME-ICA/tedana/blob/main/tedana/metrics/collect.py +.. _descriptions of file names: output_file_descriptions.html \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index eb699bca2..fd67829b9 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -96,7 +96,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. diff --git a/docs/output_file_descriptions.rst b/docs/output_file_descriptions.rst index ff7c934ca..f2a2dd8a7 100644 --- a/docs/output_file_descriptions.rst +++ b/docs/output_file_descriptions.rst @@ -69,6 +69,17 @@ Key: Filename Con decomposition. "ICA metrics json": desc-tedana_metrics.json Metadata about the metrics in ``desc-tedana_metrics.tsv``. +"ICA cross component metrics json": desc-ICACrossComponent_metrics.tsv Metric names and values that are each a single number + calculated across components. For example, kappa and + rho elbows. +"ICA decision tree json": desc-ICA_decision_tree A copy of the inputted decision tree specification with + an added "output" field for each node. The output field + contains information about what happened during + execution. +"ICA status table tsv": desc-ICA_status_table.tsv A table where each column lists the classification + status of each component after each node was run. + Columns are only added for runs where component + statuses can change. "ICA accepted components img": desc-ICAAccepted_components.nii.gz High-kappa ICA coefficient feature set "z-scored ICA accepted components img": desc-ICAAcceptedZ_components.nii.gz Z-normalized spatial component maps report.txt A summary report for the workflow with relevant diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 1b293680d..6d627a3a7 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -149,9 +149,8 @@ def _get_parser(): "Users may also provide a float from 0 to 1, " "in which case components will be selected based on the " "cumulative variance explained or an integer greater than 1" - "in which case the specificed number of components will be" - "selected." - "Default='aic'." + "in which case the specificed number of components will be " + "selected. Default='aic'." ), default="aic", ) From fc3c2a005159abf7ea251c87f5645db3272a0fe4 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 20 Dec 2022 09:57:34 -0500 Subject: [PATCH 096/177] Fixed io API and selector API warnings --- docs/api.rst | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index 155d7ad71..dfe62e68c 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -120,19 +120,26 @@ API .. autosummary:: :toctree: generated/ - :template: module.rst + :template: class.rst + + tedana.selection.component_selector.ComponentSelector + tedana.selection.component_selector.TreeError + + :template: function.rst + + tedana.selection.component_selector.load_config + tedana.selection.component_selector.validate_tree - tedana.selection.component_selector +.. autosummary:: + :toctree: generated/ + :template: module.rst + tedana.selection.selection_nodes tedana.selection.selection_utils tedana.selection.tedica tedana.selection.tedpca -.. autosummary:: - :toctree: generated/ - :template: class.rst - tedana.selection.component_selector.ComponentSelector .. _api_gscontrol_ref: @@ -171,11 +178,16 @@ API :template: class.rst tedana.io.OutputGenerator + tedana.io.InputHarvester + tedana.io.CustomEncoder :template: function.rst tedana.io.load_data + tedana.io.load_json + tedana.io.get_fields tedana.io.new_nii_like + tedana.io.prep_data_for_json tedana.io.add_decomp_prefix tedana.io.denoise_ts tedana.io.split_ts From fd2dd960a35e661b63de5e03beeab40b4206c377 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 20 Dec 2022 10:01:41 -0500 Subject: [PATCH 097/177] message message --- docs/classification_output_descriptions.rst | 3 +++ tedana/io.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/classification_output_descriptions.rst b/docs/classification_output_descriptions.rst index d8adf09f2..c1eb241c2 100644 --- a/docs/classification_output_descriptions.rst +++ b/docs/classification_output_descriptions.rst @@ -6,7 +6,10 @@ Tedana outputs multiple files that can be used to subsequent analyses and to better understand one's denoising results. In addition to the `descriptions of file names`_ this page explains the contents of several of those files in more detail. +`Building decision trees`_ covers the full process, and not just the +descriptions of outputted files, in more detail. +.. _Building decision trees: building\ decision\ trees.html TEDPCA codes ============ diff --git a/tedana/io.py b/tedana/io.py index 045dce22d..aca75f377 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -25,7 +25,7 @@ class CustomEncoder(json.JSONEncoder): - """Convert some types because of JSON serialization and numpy + """Class for converting some types because of JSON serialization and numpy incompatibilities See here: https://stackoverflow.com/q/50916422/2589328 @@ -337,7 +337,7 @@ def save_self(self): class InputHarvester: - """Turns a registry file into a lookup table to get previous data.""" + """Class for turning a registry file into a lookup table to get previous data.""" loaders = { "json": lambda f: load_json(f), From 0c28c686057ab822221bdd51d5a9452e81a29159 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 20 Dec 2022 21:52:04 -0500 Subject: [PATCH 098/177] key parts of docs all updated --- docs/building decision trees.rst | 387 ++++++++++++--------- docs/output_file_descriptions.rst | 89 ++--- tedana/resources/config/outputs.json | 4 +- tedana/resources/decision_trees/kundu.json | 4 +- tedana/selection/selection_nodes.py | 2 +- 5 files changed, 282 insertions(+), 204 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index 3496cefc6..562944e7a 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -28,7 +28,7 @@ class, selection_nodes for the functions used in selection). We call the steps f to classify components a "decision tree" since each step in the selection process branches components into different intermediate or final classifications. -.. _classification output descriptions: classification output descriptions.html +.. _classification output descriptions: classification_output_descriptions.html .. _final denoising step: denoising.html .. contents:: :local: @@ -37,80 +37,78 @@ branches components into different intermediate or final classifications. Expected outputs after component selection ****************************************** -**All of these are stored in the ComponentSelector object and saved in multiple files** +During processing, everything is stored in a `ComponentSelector object`_ called +``selector``. The elements of that object are then saved to multiple files. +The file key names are used below the full file names in the +`output file descriptions`_. +.. _ComponentSelector object: generated/tedana.selection.component_selector.ComponentSelector.html +.. _output file descriptions: output_file_descriptions.html -New columns in the ``component_table`` (sometimes a stand alone variable ``comptable`` in other parts of the code): - The default file name for the component table is: ``desc-tedana_metrics.tsv`` +**General outputs from component selection** + +New columns in ``selector.component_table`` and the "ICA metrics tsv" file: classification: - In the final table, the only values should be 'accepted' or 'rejected'. While the decision table is running, there may also be intermediate - classification labels. Note: Nothing in the current code requires a tree to - assign one of these two labels to every component. There will be a warning - if other labels remain. + classification labels, but the final labels are expected to be + "accepted" or "rejected". There will be a warning if other labels remain. classification_tags: - Human readable tags that explain why a classification was reached. These can - be things like 'Likely BOLD', 'Unlikely BOLD', 'low variance' (i.e. accepted - because the variance is too low to justify losing a degree of freedom by - regressing it out as noise). - Each component can have no tags (an empty string), one tag, or a comma separated - list of tags. These tags may be useful parameters for visualizing and reviewing results - -``cross_component_metrics``: - Default file name: ``desc-ICA_cross_component_metrics.json`` + Human readable tags that explain why a classification was reached. + Each component can have no tags (an empty string or n/a), one tag, + or a comma separated list of tags. These tags may be useful parameters + for visualizing and reviewing results + +``selector.cross_component_metrics`` and "ICA cross component metrics json": A dictionary of metrics that are each a single value calculated across components. - For example, kappa and rho elbows. + For example, kappa and rho elbows. User or pre-defined scaling factors are + also be stored here. Any constant that is used in the component classification + processes that isn't pre-defined in the decision tree file should be saved here. -``component_status_table``: - Default file name: ``desc-ICA_status_table.tsv`` +``selector.component_status_table`` and "ICA status table tsv": A table where each column lists the classification status of each component after each node was run. Columns are only added for runs where component statuses can change. This is useful for understanding the classification path of each component through the decision tree -``tree``: - Default file name: ``desc-ICA_decision_tree.json`` +``selector.tree`` and "ICA decision tree json": A copy of the inputted decision tree specification with an added "output" field - for each node. The output field (see next section) contains information about what happened during - execution. Of particular note, each output includes a list of the metrics - used within the node, "node_label", which is a (hopefully) human readable brief - description of the node's function and, for nodes where component classifications - can change, "numFalse" & "numTrue" list what changed. The inputted parameters include - "ifTrue" and "ifFalse" which say what changes for each component. These fields can be used - to construct a visual flow chart or text-based summary of how classifications changed - for each run. - -``used_metrics``: - Saved as a field in the ``tree`` json file + for each node. The output field (see next section) contains information about + what happened during execution. Of particular note, each output includes a list + of the metrics used within the node, "node_label", which is a (hopefully) human + readable brief description of the node's function and, for nodes where component + classifications can change, "numFalse" & "numTrue" list who many components + changed classifications. The inputted parameters include "ifTrue" and "ifFalse" + which specify what changes for each component. These fields can be used to + construct a visual flow chart or text-based summary of how classifications + changed for each run. + +``selector.tree["used_metrics"]`` and a field in "ICA decision tree json": A list of the metrics that were used in the decision tree. Everything in ``used_metrics`` should be in either ``necessary_metrics`` or ``generated_metrics`` If a used metric isn't in either, a warning message - will appear. These may have an additional use for future work so that a - user can input a tree and metrics would be calculated based on what's - needed to execute the tree. + will appear. This is a useful check that makes sure every metric used was + pre-specified. -``classification_tags``: - Saved as a field in the ``tree`` json file +``selector.tree["classification_tags"]`` and a field in "ICA decision tree json": A list of the pre-specified classification tags that could be used in a decision tree. - Any reporting interface should use this field so that the tags that are possible are listed + Any reporting interface should use this field so that all possible tags are listed even if a given tag is not used by any component by the end of the selection process. .. _saved in multiple files: output_file_descriptions.html **Outputs of each decision tree step** -This includes all the information from the specified decision tree under each "node" or function -call. For each node, there is also an "outputs" subfield with information from when the tree -was executed. The tree with the output fields is in the Selector object and -with default file name: ``desc-ICA_decision_tree.json`` +"ICA decision tree json" includes all the information from the specified decision tree +for each "node" or function call. For each node, there is an "outputs" subfield with +information from when the tree was executed. Each outputs field includes: decison_node_idx: The decision tree functions are run as part of an ordered list. - This is the positional index for when this function was run - as part of this list, starting with index 0. + This is the positional index the location of the function in + the list, starting with index 0. used_metrics: A list of the metrics used in a node of the decision tree @@ -136,134 +134,205 @@ calc_cross_comp_metrics: added_component_table_metrics: It is possible to add a new metric to the component table during the selection process. This is useful if a metric is to be calculated on a subset of components based on what - happened during previous steps in the selection process. This is **not** recommended, but - since it was done as part of the original decision tree process defined in meica - it is possible. + happened during previous steps in the selection process. This is **not** recommended, + but since it was done as part of the original kundu decision tree process defined in + meica it is possible. ******************************* Defining a custom decision tree ******************************* -Decision trees are stored in json files. The default trees are stored as part of the tedana code repository in ./resources/decision_trees -The minimal tree, minimal.json is a good example highlighting the structure and steps in a tree. It may be helpful -to look at that tree while reading this section. kundu.json should replicate the decision tree used in MEICA version 2.5, -the predecessor to tedana. It is a more complex, but also highlights additional possible functionality in decision trees. - -A user can specify another decision tree and link to the tree location when tedana is executed with the ``--tree`` option. The format is -flexible to allow for future innovations, but be advised that this also allows you to -to create something with non-ideal results for the current code. Some criteria will result in an error -if violated, but more will just give a warning. If you are designing or editing a new tree, look carefully at the warnings. +Decision trees are stored in json files. The default trees are stored as part of +the tedana code repository in `resources/decision_trees`_ The minimal tree, +minimal.json is a good example highlighting the structure and steps in a tree. It +may be helpful to look at that tree while reading this section. kundu.json replicates +the decision tree used in MEICA version 2.5, the predecessor to tedana. It is a more +complex, but also highlights additional possible functionality in decision trees. + +A user can specify another decision tree and link to the tree location when tedana is +executed with the ``--tree`` option. The format is flexible to allow for future +innovations, but be advised that this also allows you to create something with +non-ideal results for the current code. Some criteria will result in an error if +violated, but more will just give a warning. If you are designing or editing a new +tree, look carefully at the warnings. + +A decision tree can include two types of nodes or functions. All functions are currently +in `selection_nodes.py`_ + +- A decision function will use existing metrics and potentially change the + classification of the components based on those metrics. By convention, all + these functions begin with "dec" +- A calculation function will take existing metrics and calculate a value across + components to be used for classification, for example the kappa and rho elbows. + By convention, all these functions begin with "calc" +- Nothing prevents a function from both calculating new cross component values and + applying those values in a decision step, but following this convention should + hopefully make decision tree specifications easier to follow and results easier + to interpret. + +.. _resources/decision_trees: https://github.com/ME-ICA/tedana/tree/main/tedana/resources/decision_trees +.. _selection_nodes.py: https://github.com/ME-ICA/tedana/tree/main/tedana/selection/selection_nodes.py + +**General information fields** + +There are several fields with general information. Some of these store general +information that's useful for reporting results and others store information +that Are used to checks whether results are plausible & can help avoid mistakes + + tree_id: + A descriptive name for the tree that will be logged. -A decision tree can include two types of nodes or functions. All functions are currently in selection_nodes.py + info: + A brief description of the tree for info logging -- A decision function will use existing metrics and potentially change the classification of the components based on those metrics. By convention, all these functions begin with "dec" -- A calculation function will take existing metrics and calculate a value across components to be used for classification, for example the kappa and rho elbows. By convention, all these functions begin with "calc" -- Nothing prevents a function from both calculating new cross component values and applying those values in a decision step, but following this convention should hopefully make decision tree specifications easier to follow and results easier to interpret. + report: + A narrative description of the tree that could be used in report logging -**Key expectations** + refs: + Publications that should be referenced when this tree is used -- All trees should start with a "manual_classification" node that should set all component classifications to "unclassified" and - have "clear_classification_tags" set to true. There might be special cases where someone might want to violate these rules, - but depending what else happens in preceding code, other functions will expect both of these columns to exist. - This manual_classification step will make sure those columns are created and initialized. -- Every possible path through the tree should result in each component being classified as 'accepted' or 'rejected' by the time the tree is completed. -- Three initialization variables will help prevent mistakes - necessary_metrics: - Is a list of the necessary metrics in the component table that will be used by the tree. If a metric doesn't exist then this - will raise an error instead of executing a tree. (This can eventually be used to call the metric calculation code based on - the decision tree specification). If a necessary metric isn't used, there will be a warning. This is just a warning because, - if the decision tree code specification is eventually used to execute the code to calculate metrics, one may want to calculate - a metric even if it's not being used. + Is a list of the necessary metrics in the component table that will be used + by the tree. If a metric doesn't exist then this will raise an error instead + of executing a tree. (Depending on future code development, this could + potentially be used to run ``tedana`` by specifying a decision tree and + metrics are calculated base on the contents of this field.) If a necessary + metric isn't used, there will be a warning. + + generated_metrics: + Is an optional initial field. It lists metrics that are to be calculated as + part of the decision tree's execution. This is used similarly to necessary_metrics + except, since the decision tree starts before these metrics exist, it won't raise + an error when these metrics are not found. One might want to calculate a new metric + if the metric uses only a subset of the components based on previous + classifications. This does make interpretation of results more confusing, but, since + this functionality was part of the kundu decision tree, it is included. intermediate_classifications: - A list of intermediate classifications (i.e. "provisionalaccept", "provisionalreject"). It is very important to pre-specify these - because the code will make sure only the default classifications ("accepted" "rejected" "unclassified") and intermediate classifications - are used in a tree. This prevents someone from accidentially losing a component due to a spelling error or other minor variation in a - classification label + A list of intermediate classifications (i.e. "provisionalaccept", + "provisionalreject"). It is very important to pre-specify these because the code + will make sure only the default classifications ("accepted" "rejected" + "unclassified") and intermediate classifications are used in a tree. This prevents + someone from accidentially losing a component due to a spelling error or other + minor variation in a classification label. classification_tags: - A list of acceptable classification tags (i.e. "Likely BOLD", "Unlikely BOLD", "Low variance"). This will both be used to make sure only - these tags are used in the tree and allow programs that interact with the results one place to see all potential tags + A list of acceptable classification tags (i.e. "Likely BOLD", "Unlikely BOLD", + "Low variance"). This will both be used to make sure only these tags are used in + the tree and allow programs that interact with the results to see all potential + tags in one place. -**Decision node json structure** +**Nodes in the decision tree** -There are 7 initial fields, necessary_metrics, intermediate_classification, and classification_tags, as described in the above section and : +The "nodes" field is an ordered list of elements where each element defines a +node in the decision tree. Each node contains the information to call a function. -- "tree_id": a descriptive name for the tree that will be logged. -- "info": A brief description of the tree for info logging -- "report": A narrative description of the tree that could be used in report logging -- "refs" Publications that should be referenced when this tree is used +All trees should start with a "manual_classification" node that should set all +component classifications to "unclassified" and have "clear_classification_tags" +set to true. There might be special cases where someone might want to violate +these rules, but depending what else happens in preceding code, other functions +will expect both of these columns to exist. This manual_classification step will +make sure those columns are created and initialized. -"generated_metrics" is an optional initial field. It lists metrics that are calculated as part of the decision tree. -This is used similarly to necessary_metrics except, since the decision tree starts before these metrics exist, it -won't raise an error when these metrics are not found. One might want to calculate a new metric if the metric uses -only a subset of the components based on previous classifications. This does make interpretation of results more -confusing, but, since this functionaly was part of the kundu decision tree, it is included. +Every possible path through the tree should result in each component being +classified as 'accepted' or 'rejected' by the time the tree is completed. -The "nodes" field is a list of elements where each element defines a node in the decision tree. There are several key fields for each of these nodes: +There are several key fields for each node: -- "functionname": The exact function name in selection_nodes.py that will be called. +- "functionname": The exact function name in `selection_nodes.py`_ that will be called. - "parameters": Specifications of all required parameters for the function in functionname -- "kwargs": Specification for optional parameters for the function in functionname - -The only parameter that is used in all functions is "decidecomps" which is used to identify, based on their classifications, -the components a function should be applied to. It can be a single classification, or a comma separated string of classifications. -In addition to the intermediate and default ("accepted" "rejected" "unclassified") component classifications, this can be "all" -for functions that should be applied to all components regardless of their classifications - -Most decision functions also include "ifTrue" and "ifFalse" which specify how to change the classification of each component -based on whether a the decision criterion is true or false. In addition to the default and intermediate classification options, -this can also be "nochange" (i.e. For components where a>b is true, "reject". For components where a>b is false, "nochange"). -The optional parameters "tag_ifTrue" and "tag_ifFalse" define the classification tags to be assigned to components. -Currently, the only exception is "manual_classify" which uses "new_classification" to designate the new component classification -and "tag" (optional) to designate which classification tag to apply. - -There are several optional parameters (to include within "kwargs") in every decision tree function: - -- custom_node_label: A brief label for what happens in this node that can be used in a decision tree summary table or flow chart. If custom_node_label is not not defined, then each function has default descriptive text. -- log_extra_report, log_extra_info: Text for each function call is automatically placed in the logger output. In addition to that text, the text in these these strings will also be included in the logger with the report or info codes respectively. These might be useful to give a narrative explanation of why a step was parameterized a certain way. -- only_used_metrics: If true, this function will only return the names of the component table metrics that will be used when this function is fully run. This can be used to identify all used metrics before running the decision tree. - -"_comments" can be used to add a longer explanation about what a node is doing. This will not be logged anywhere -except in the tree, but may be useful to make sure the purpose of a given node is clear. +- "kwargs": Specifications for optional parameters for the function in functionname + +The only parameter that is used in all functions is "decidecomps" which is used to +identify, based on their classifications, the components a function should be applied +to. It can be a single classification, or a comma separated string of classifications. +In addition to the intermediate and default ("accepted" "rejected" "unclassified") +component classifications, this can be "all" for functions that should be applied to +all components regardless of their classifications. + +Most decision functions also include "ifTrue" and "ifFalse" which specify how to change +the classification of each component based on whether a the decision criterion is true +or false. In addition to the default and intermediate classification options, this can +also be "nochange" (i.e. For components where a>b is true, "reject". For components +where a>b is false, "nochange"). The optional parameters "tag_ifTrue" and "tag_ifFalse" +define the classification tags to be assigned to components. Currently, the only +exceptions are "manual_classify" and "dec_classification_doesnt_exist" which use +"new_classification" to designate the new component classification and "tag" (optional) +to designate which classification tag to apply. + +There are several optional parameters (to include within "kwargs") in every decision +tree function: + +- custom_node_label: A brief label for what happens in this node that can be used in + a decision tree summary table or flow chart. If custom_node_label is not not defined, + then each function has default descriptive text. +- log_extra_report, log_extra_info: Text for each function call is automatically placed + in the logger output. In addition to that text, the text in these these strings will + also be included in the logger with the report or info codes respectively. These + might be useful to give a narrative explanation of why a step was parameterized a + certain way. +- only_used_metrics: If true, this function will only return the names of the component + table metrics that will be used when this function is fully run. This can be used to + identify all used metrics before running the decision tree. + +"_comments" can be used to add a longer explanation about what a node is doing. This +will not be logged anywhere except in the tree, but may be useful to help explain the +purpose of a given node. ******************************** Key parts of selection functions ******************************** -There are several expectations for selection functions that are necessary for them to properly execute. -In selection_nodes.py, manual_classify, dec_left_op_right, and calc_kappa_rho_elbows_kundu are good -examples for how to meet these expectations. +There are several expectations for selection functions that are necessary for them to +properly execute. In `selection_nodes.py`_, ``manual_classify``, ``dec_left_op_right``, +and ``calc_kappa_rho_elbows_kundu`` are good examples for how to meet these expectations. Create a dictionary called "outputs" that includes key fields that should be recorded. -The following line should be at the end of each function ``selector.nodes[selector.current_node_idx]["outputs"] = outputs`` -Additional fields can be used to log function-specific information, but the following fields are common and may be used by other parts of the code: - -- "decision_node_idx" (required): the ordered index for the current function in the decision tree. -- "node_label" (required): A decriptive label for what happens in the node. -- "numTrue" & "numFalse" (required for decision functions): For decision functions, the number of components labels true or false within the function call. -- "used_metrics" (required if a function uses metrics): The list of metrics used in the function. This can be hard coded, defined by input parameters, or empty. -- "used_cross_component_metrics" (required if a function uses cross component metrics): A list of cross component metrics used in the function. This can be hard coded, defined by input parameters, or empty. -- "calc_cross_comp_metrics" (required for calculation functions): A list of cross component metrics calculated within the function. The key-value pair for each calculated metric is also included in "outputs" - -Before any data are touched in the function, there should be an ``if only_used_metrics:`` clause that returns ``used_metrics`` for the function call. -This will be useful to gather all metrics a tree will use without requiring a specific dataset. - -Existing functions define ``function_name_idx = f"Step {selector.current_node_idx}: [text of function_name]`` This is used in logging and is cleaner to initialize near the top of each function. +The following line should be at the end of each function to retain the output info: +``selector.nodes[selector.current_node_idx]["outputs"] = outputs`` +Additional fields can be used to log function-specific information, but the following +fields are common and may be used by other parts of the code: -Each function has code that creates a default node label in ``outputs["node_label"]``. The default node label -may be used in decision tree visualization so it should be relatively short. Within this section, if there is -a user-provided custom_node_label, that should be used instead. - -Calculation nodes should check if the value they are calculating was already calculated and output a warning if the function overwrites an existing value - -Code that adds the text log_extra_info and log_extra_report into the appropriate logs (if they are provided by the user) - -After the above information is included, all functions will call ``selectcomps2use`` which returns the components with classifications included in ``decide_comps`` -and then run ``confirm_metrics_exist`` which is an added check to make sure the metrics used by this function exist in the component table. +- "decision_node_idx" (required): the ordered index for the current function in the + decision tree. +- "node_label" (required): A decriptive label for what happens in the node. +- "numTrue" & "numFalse" (required for decision functions): For decision functions, + the number of components labeled true or false within the function call. +- "used_metrics" (required if a function uses metrics): The list of metrics used in + the function. This can be hard coded, defined by input parameters, or empty. +- "used_cross_component_metrics" (required if a function uses cross component metrics): + A list of cross component metrics used in the function. This can be hard coded, + defined by input parameters, or empty. +- "calc_cross_comp_metrics" (required for calculation functions): A list of cross + component metrics calculated within the function. The key-value pair for each + calculated metric is also included in "outputs" + +Before any data are touched in the function, there should be an +``if only_used_metrics:`` clause that returns ``used_metrics`` for the function +call. This will be useful to gather all metrics a tree will use without requiring a +specific dataset. + +Existing functions define ``function_name_idx = f"Step {selector.current_node_idx}: [text of function_name]`` +This is used in logging and is cleaner to initialize near the top of each function. + + +Each function has code that creates a default node label in ``outputs["node_label"]``. +The default node label may be used in decision tree visualization so it should be +relatively short. Within this section, if there is a user-provided custom_node_label, +that should be used instead. + +Calculation nodes should check if the value they are calculating was already calculated +and output a warning if the function overwrites an existing value + +Code that adds the text ``log_extra_info`` and ``log_extra_report`` into the appropriate +logs (if they are provided by the user) + +After the above information is included, all functions will call ``selectcomps2use`` +which returns the components with classifications included in ``decide_comps`` +and then runs ``confirm_metrics_exist`` which is an added check to make sure the metrics +used by this function exist in the component table. Nearly every function has a clause like: @@ -275,20 +344,25 @@ Nearly every function has a clause like: outputs["numFalse"] = 0 else: -If there are no components with the classifications in ``decide_comps`` this logs that there's nothing for the function to be run on, else continue. +If there are no components with the classifications in ``decide_comps`` this logs that +there's nothing for the function to be run on, else continue. -For decision functions the key variable is ``decision_boolean`` which should be a dataframe column which is True or False for the components in ``decide_comps`` -based on the function's criteria. That column is an input to ``change_comptable_classifications`` which will update the component_table classifications, -update the classification history in component_status_table, and update the component classification_tags. Components not in ``decide_comps`` retain their -existing classifications and tags. -``change_comptable_classifications`` also returns and should assign values to ``outputs["numTrue"]`` and ``outputs["numFalse"]``. -These log how many components were identified as true or false within each function. +For decision functions the key variable is ``decision_boolean`` which should be a pandas +dataframe column which is True or False for the components in ``decide_comps`` based on +the function's criteria. That column is an input to ``change_comptable_classifications`` +which will update the component_table classifications, update the classification history +in component_status_table, and update the component classification_tags. Components not +in ``decide_comps`` retain their existing classifications and tags. +``change_comptable_classifications`` also returns and should assign values to +``outputs["numTrue"]`` and ``outputs["numFalse"]``. These log how many components were +identified as true or false within each function. -For calculation functions, the calculated values should be added as a value/key pair to both ``selector.cross_component_metrics`` and ``outputs`` +For calculation functions, the calculated values should be added as a value/key pair to +both ``selector.cross_component_metrics`` and ``outputs`` ``log_decision_tree_step`` puts the relevant info from the function call into the program's output log. -Every function should end. +Every function should end with: .. code-block:: python @@ -297,8 +371,11 @@ Every function should end. functionname.__doc__ = (functionname.__doc__.format(**decision_docs)) -This makes sure the outputs from the function are saved in the class structure and the class structure is returned. -The following line should include the function's name and is used to make sure repeated variable names are compiled correctly for the API documentation. +This makes sure the outputs from the function are saved in the class structure and the +class structure is returned. The following line should include the function's name and +is used to make sure repeated variable names are compiled correctly for the API +documentation. -If you have made it this far, congratulations. -If you follow these steps you'll be able to impress your colleagues, friends, and family by designing your very own decision tree functions. +If you have made it this far, congratulations!!! If you follow these steps, you'll be able +to impress your colleagues, friends, and family by designing your very own decision +tree functions. diff --git a/docs/output_file_descriptions.rst b/docs/output_file_descriptions.rst index f2a2dd8a7..1ae12609c 100644 --- a/docs/output_file_descriptions.rst +++ b/docs/output_file_descriptions.rst @@ -69,7 +69,7 @@ Key: Filename Con decomposition. "ICA metrics json": desc-tedana_metrics.json Metadata about the metrics in ``desc-tedana_metrics.tsv``. -"ICA cross component metrics json": desc-ICACrossComponent_metrics.tsv Metric names and values that are each a single number +"ICA cross component metrics json": desc-ICACrossComponent_metrics.json Metric names and values that are each a single number calculated across components. For example, kappa and rho elbows. "ICA decision tree json": desc-ICA_decision_tree A copy of the inputted decision tree specification with @@ -91,58 +91,59 @@ tedana_report.html The If ``verbose`` is set to True: -============================================================== ===================================================== -Filename Content -============================================================== ===================================================== -desc-limited_T2starmap.nii.gz Limited T2* map/time series. - Values are in seconds. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the S0 estimate from the first two echoes, while the - limited map has a NaN. -desc-limited_S0map.nii.gz Limited S0 map/time series. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the S0 estimate from the first two echoes, while the - limited map has a NaN. -echo-[echo]_desc-[PCA|ICA]_components.nii.gz Echo-wise PCA/ICA component weight maps. -echo-[echo]_desc-[PCA|ICA]R2ModelPredictions_components.nii.gz Component- and voxel-wise R2-model predictions, - separated by echo. -echo-[echo]_desc-[PCA|ICA]S0ModelPredictions_components.nii.gz Component- and voxel-wise S0-model predictions, - separated by echo. -desc-[PCA|ICA]AveragingWeights_components.nii.gz Component-wise averaging weights for metric - calculation. -desc-[PCA|ICA]S0_stat-F_statmap.nii.gz F-statistic map for each component, for the S0 model. -desc-[PCA|ICA]T2_stat-F_statmap.nii.gz F-statistic map for each component, for the T2 model. -desc-optcomPCAReduced_bold.nii.gz Optimally combined data after dimensionality - reduction with PCA. This is the input to the ICA. -echo-[echo]_desc-Accepted_bold.nii.gz High-Kappa time series for echo number ``echo`` -echo-[echo]_desc-Rejected_bold.nii.gz Low-Kappa time series for echo number ``echo`` -echo-[echo]_desc-Denoised_bold.nii.gz Denoised time series for echo number ``echo`` -============================================================== ===================================================== +============================================================================================= ===================================================== +Key: Filename Content +============================================================================================= ===================================================== +"limited t2star img": desc-limited_T2starmap.nii.gz Limited T2* map/time series. + Values are in seconds. + The difference between the limited and full maps + is that, for voxels affected by dropout where + only one echo contains good data, the full map uses + the S0 estimate from the first two echoes, while the + limited map has a NaN. +"limited s0 img": desc-limited_S0map.nii.gz Limited S0 map/time series. + The difference between the limited and full maps + is that, for voxels affected by dropout where + only one echo contains good data, the full map uses + the S0 estimate from the first two echoes, while the + limited map has a NaN. +"whitened img": desc-optcom_whitened_bold The optimally combined data after whitening +"echo weight [PCA|ICA] maps split img": echo-[echo]_desc-[PCA|ICA]_components.nii.gz Echo-wise PCA/ICA component weight maps. +"echo T2 [PCA|ICA] split img": echo-[echo]_desc-[PCA|ICA]T2ModelPredictions_components.nii.gz Component- and voxel-wise R2-model predictions, + separated by echo. +"echo S0 [PCA|ICA] split img": echo-[echo]_desc-[PCA|ICA]S0ModelPredictions_components.nii.gz Component- and voxel-wise S0-model predictions, + separated by echo. +"[PCA|ICA] component weights img": desc-[PCA|ICA]AveragingWeights_components.nii.gz Component-wise averaging weights for metric + calculation. +"[PCA|ICA] component F-S0 img": desc-[PCA|ICA]S0_stat-F_statmap.nii.gz F-statistic map for each component, for the S0 model. +"[PCA|ICA] component F-T2 img": desc-[PCA|ICA]T2_stat-F_statmap.nii.gz F-statistic map for each component, for the T2 model. +"PCA reduced img": desc-optcomPCAReduced_bold.nii.gz Optimally combined data after dimensionality + reduction with PCA. This is the input to the ICA. +"high kappa ts split img": echo-[echo]_desc-Accepted_bold.nii.gz High-Kappa time series for echo number ``echo`` +"low kappa ts split img": echo-[echo]_desc-Rejected_bold.nii.gz Low-Kappa time series for echo number ``echo`` +"denoised ts split img": echo-[echo]_desc-Denoised_bold.nii.gz Denoised time series for echo number ``echo`` +============================================================================================= ===================================================== If ``gscontrol`` includes 'gsr': -================================================ ===================================================== -Filename Content -================================================ ===================================================== -desc-globalSignal_map.nii.gz Spatial global signal -desc-globalSignal_timeseries.tsv Time series of global signal from optimally combined - data. -desc-optcomWithGlobalSignal_bold.nii.gz Optimally combined time series with global signal - retained. -desc-optcomNoGlobalSignal_bold.nii.gz Optimally combined time series with global signal - removed. -================================================ ===================================================== +================================================================= ===================================================== +Filename Content +================================================================= ===================================================== +"gs img": desc-globalSignal_map.nii.gz Spatial global signal +"global signal time series tsv": desc-globalSignal_timeseries.tsv Time series of global signal from optimally combined + data. +"has gs combined img": desc-optcomWithGlobalSignal_bold.nii.gz Optimally combined time series with global signal + retained. +"removed gs combined img": desc-optcomNoGlobalSignal_bold.nii.gz Optimally combined time series with global signal + removed. +================================================================= ===================================================== If ``gscontrol`` includes 't1c': ================================================ ===================================================== Filename Content ================================================ ===================================================== -desc-T1likeEffect_min.nii.gz T1-like effect +"t1 like img": desc-T1likeEffect_min.nii.gz T1-like effect desc-optcomAcceptedT1cDenoised_bold.nii.gz T1-corrected high-kappa time series by regression desc-optcomT1cDenoised_bold.nii.gz T1-corrected denoised time series desc-TEDICAAcceptedT1cDenoised_components.nii.gz T1-GS corrected high-kappa components diff --git a/tedana/resources/config/outputs.json b/tedana/resources/config/outputs.json index 3e185b887..622ab3bbe 100644 --- a/tedana/resources/config/outputs.json +++ b/tedana/resources/config/outputs.json @@ -57,7 +57,7 @@ }, "whitened img": { "orig": "ts_OC_whitened", - "bidsv1.5.0": "desc-optcomPCAReduced_bold" + "bidsv1.5.0": "desc-optcom_whitened_bold" }, "echo weight PCA map split img": { "orig": "e{echo}_PCA_comp", @@ -219,4 +219,4 @@ "orig": "registry", "bidsv1.5.0": "desc-tedana_registry" } -} +} \ No newline at end of file diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 33c09850a..d57ae84c6 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -1,7 +1,7 @@ { "tree_id": "kundu_MEICA27_decision_tree", "info": "Following the full decision tree designed by Prantik Kundu", - "report": "This is based on the criteria of the original MEICA decision tree", + "report": "This is based on the criteria of the MEICA v2.5 decision tree", "refs": "Kundu 2013", "necessary_metrics": [ "kappa", @@ -453,4 +453,4 @@ "_comment": "No code in the premodularized tedana" } ] -} +} \ No newline at end of file diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 20fc251ca..d709c421c 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1000,7 +1000,7 @@ def dec_classification_doesnt_exist( log_extra_info="", custom_node_label="", only_used_metrics=False, - tag="", + tag=None, ): """ If there are no components with a classification specified in class_comp_exists, From bbdd9bb6f036ef23d5bc95ac701760ce30309e6f Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Wed, 21 Dec 2022 11:30:14 -0500 Subject: [PATCH 099/177] output_file_descriptions fully updated --- docs/output_file_descriptions.rst | 93 ++++++++++++++++++------------- 1 file changed, 53 insertions(+), 40 deletions(-) diff --git a/docs/output_file_descriptions.rst b/docs/output_file_descriptions.rst index 1ae12609c..5ddf7d1e7 100644 --- a/docs/output_file_descriptions.rst +++ b/docs/output_file_descriptions.rst @@ -14,30 +14,28 @@ file names. =========================================================================== ===================================================== Key: Filename Content =========================================================================== ===================================================== +"registry json": desc-tedana_registry.json Mapping of file name keys to filename locations "data description json": dataset_description.json Top-level metadata for the workflow. -"t2star img": T2starmap.nii.gz Full estimated T2* 3D map. - Values are in seconds. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the T2* estimate from the first two echoes, while the - limited map has a NaN. -"s0 img": S0map.nii.gz Full S0 3D map. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the S0 estimate from the first two echoes, while the - limited map has a NaN. +tedana_report.html The interactive HTML report. "combined img": desc-optcom_bold.nii.gz Optimally combined time series. "denoised ts img": desc-optcomDenoised_bold.nii.gz Denoised optimally combined time series. Recommended dataset for analysis. -"low kappa ts img": desc-optcomRejected_bold.nii.gz Combined time series from rejected components. -"high kappa ts img": desc-optcomAccepted_bold.nii.gz High-kappa time series. This dataset does not - include thermal noise or low variance components. - Not the recommended dataset for analysis. "adaptive mask img": desc-adaptiveGoodSignal_mask.nii.gz Integer-valued mask used in the workflow, where each voxel's value corresponds to the number of good - echoes to be used for T2\*/S0 estimation. + echoes to be used for T2\*/S0 estimation. Will be + calculated whether original mask estimated within + tedana or user-provided. All voxels with 1 good + echo will be included in outputted time series + but only voxels with at least 3 good echoes will be + used in ICA and metric calculations +"t2star img": T2starmap.nii.gz Full estimated T2* 3D map. + Values are in seconds. If a voxel has at least 1 good + echo then the first two echoes will be used to estimate + a value (an impresise weighting for optimal combination + is better than fully excluding a voxel) +"s0 img": S0map.nii.gz Full S0 3D map. If a voxel has at least 1 good + echo then the first two echoes will be used to estimate + a value "PCA mixing tsv": desc-PCA_mixing.tsv Mixing matrix (component time series) from PCA decomposition in a tab-delimited file. Each column is a different component, and the column name is the @@ -52,6 +50,13 @@ Key: Filename Con information for each component from the PCA decomposition. "PCA metrics json": desc-PCA_metrics.json Metadata about the metrics in ``desc-PCA_metrics.tsv``. +"PCA cross component metrics json": desc-PCACrossComponent_metrics.json Measures calculated across PCA compononents including + values for the full cost function curves for all + AIC, KIC, and MDL cost functions and the number of + components and variance explained for multiple options + Figures for the cost functions and variance explained + are also in + ``./figures//pca_[criteria|variance_explained.png]`` "ICA mixing tsv": desc-ICA_mixing.tsv Mixing matrix (component time series) from ICA decomposition in a tab-delimited file. Each column is a different component, and the column name is the @@ -84,9 +89,13 @@ Key: Filename Con "z-scored ICA accepted components img": desc-ICAAcceptedZ_components.nii.gz Z-normalized spatial component maps report.txt A summary report for the workflow with relevant citations. +"low kappa ts img": desc-optcomRejected_bold.nii.gz Combined time series from rejected components. +"high kappa ts img": desc-optcomAccepted_bold.nii.gz High-kappa time series. This dataset does not + include thermal noise or low variance components. + Not the recommended dataset for analysis. references.bib The BibTeX entries for references cited in report.txt. -tedana_report.html The interactive HTML report. + =========================================================================== ===================================================== If ``verbose`` is set to True: @@ -96,17 +105,11 @@ Key: Filename ============================================================================================= ===================================================== "limited t2star img": desc-limited_T2starmap.nii.gz Limited T2* map/time series. Values are in seconds. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the S0 estimate from the first two echoes, while the - limited map has a NaN. + Unlike the full T2* maps, if only one 1 echo contains + good data the limited map will have NaN "limited s0 img": desc-limited_S0map.nii.gz Limited S0 map/time series. - The difference between the limited and full maps - is that, for voxels affected by dropout where - only one echo contains good data, the full map uses - the S0 estimate from the first two echoes, while the - limited map has a NaN. + Unlike the full S0 maps, if only one 1 echo contains + good data the limited map will have NaN "whitened img": desc-optcom_whitened_bold The optimally combined data after whitening "echo weight [PCA|ICA] maps split img": echo-[echo]_desc-[PCA|ICA]_components.nii.gz Echo-wise PCA/ICA component weight maps. "echo T2 [PCA|ICA] split img": echo-[echo]_desc-[PCA|ICA]T2ModelPredictions_components.nii.gz Component- and voxel-wise R2-model predictions, @@ -124,10 +127,19 @@ Key: Filename "denoised ts split img": echo-[echo]_desc-Denoised_bold.nii.gz Denoised time series for echo number ``echo`` ============================================================================================= ===================================================== +If ``tedort`` is True + +======================================================== ===================================================== +Key: Filename Content +======================================================== ===================================================== +"ICA orthogonalized mixing tsv": desc-ICAOrth_mixing.tsv Mixing matrix with rejected components orthogonalized + from accepted components +======================================================== ===================================================== + If ``gscontrol`` includes 'gsr': ================================================================= ===================================================== -Filename Content +Key: Filename Content ================================================================= ===================================================== "gs img": desc-globalSignal_map.nii.gz Spatial global signal "global signal time series tsv": desc-globalSignal_timeseries.tsv Time series of global signal from optimally combined @@ -138,14 +150,15 @@ Filename Content removed. ================================================================= ===================================================== -If ``gscontrol`` includes 't1c': +If ``gscontrol`` includes 'mir' (Minimal intensity regression, which may help remove some T1 noise and +was an option in the MEICA v2.5 code, but never fully explained or evaluted in a publication): -================================================ ===================================================== -Filename Content -================================================ ===================================================== -"t1 like img": desc-T1likeEffect_min.nii.gz T1-like effect -desc-optcomAcceptedT1cDenoised_bold.nii.gz T1-corrected high-kappa time series by regression -desc-optcomT1cDenoised_bold.nii.gz T1-corrected denoised time series -desc-TEDICAAcceptedT1cDenoised_components.nii.gz T1-GS corrected high-kappa components -desc-TEDICAT1cDenoised_mixing.tsv T1-GS corrected mixing matrix -================================================ ===================================================== \ No newline at end of file +======================================================================================= ===================================================== +Key: Filename Content +======================================================================================= ===================================================== +"t1 like img": desc-T1likeEffect_min.nii.gz T1-like effect +"mir denoised img": desc-optcomMIRDenoised_bold.nii.gz Denoised time series after MIR +"ICA MIR mixing tsv": desc-ICAMIRDenoised_mixing.tsv ICA mixing matrix after MIR +"ICA accepted mir component weights img": desc-ICAAcceptedMIRDenoised_components.nii.gz high-kappa components after MIR +"ICA accepted mir denoised img": desc-optcomAcceptedMIRDenoised_bold.nii.gz high-kappa time series after MIR +======================================================================================= ===================================================== From 682f352a3c5e21c7a1f92a254f9fa50026938b67 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Wed, 21 Dec 2022 13:53:31 -0500 Subject: [PATCH 100/177] filled testing gaps for component_selector --- tedana/selection/component_selector.py | 28 ++++----- ...Selector.py => test_component_selector.py} | 63 +++++++++++++++++++ 2 files changed, 77 insertions(+), 14 deletions(-) rename tedana/tests/{test_ComponentSelector.py => test_component_selector.py} (79%) diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index af0f6c8a1..6730b48c6 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -524,20 +524,20 @@ def rejected_comps(self): """The indices of components that are rejected.""" return self.component_table["classification"] == "rejected" - @property - def is_final(self): - """Whether the classifications are all acccepted/rejected""" - return (self.accepted_comps.sum() + self.rejected_comps.sum()) > self.n_comps - - @property - def mixing(self): - """The mixing matrix used to generate the components being decided upon.""" - return self.mixing_matrix - - @property - def oc_data(self): - """The optimally combined data being used for this tree.""" - return self.oc_data + # @property + # def is_final(self): + # """Whether the classifications are all acccepted/rejected""" + # return (self.accepted_comps.sum() + self.rejected_comps.sum()) > self.n_comps + + # @property + # def mixing(self): + # """The mixing matrix used to generate the components being decided upon.""" + # return self.mixing_matrix + + # @property + # def oc_data(self): + # """The optimally combined data being used for this tree.""" + # return self.oc_data def to_files(self, io_generator): """Convert this selector into component files diff --git a/tedana/tests/test_ComponentSelector.py b/tedana/tests/test_component_selector.py similarity index 79% rename from tedana/tests/test_ComponentSelector.py rename to tedana/tests/test_component_selector.py index 61ff19f45..4e388222f 100644 --- a/tedana/tests/test_ComponentSelector.py +++ b/tedana/tests/test_component_selector.py @@ -129,6 +129,8 @@ def dicts_to_test(treechoice): tree["nodes"][0]["functionname"] = "not_a_function" elif treechoice == "missing_key": tree.pop("refs") + elif treechoice == "null_value": + tree["nodes"][0]["parameters"]["left"] = None else: raise Exception(f"{treechoice} is an invalid option for treechoice") @@ -176,6 +178,15 @@ def test_minimal(): ) tree.select() + # rerun without classification_tags column initialized + tree = component_selector.ComponentSelector( + "minimal", + sample_comptable(), + cross_component_metrics=xcomp, + ) + tree.component_table = tree.component_table.drop(columns="classification_tags") + tree.select() + # validate_tree # ------------- @@ -249,3 +260,55 @@ def test_validate_tree_fails(): # Calling a function missing a required parameter should not be valid with pytest.raises(component_selector.TreeError): component_selector.validate_tree(dicts_to_test("missing_req_param")) + + +def test_check_null_fails(): + """Tests to trigger check_null missing parameter error""" + + selector = component_selector.ComponentSelector("minimal", sample_comptable()) + selector.tree = dicts_to_test("null_value") + + params = selector.tree["nodes"][0]["parameters"] + functionname = selector.tree["nodes"][0]["functionname"] + with pytest.raises(ValueError): + selector.check_null(params, functionname) + + +def test_check_null_succeeds(): + """Tests check_null finds empty parameter in self""" + + # "left" is missing from the function definition in node + # but is found as an initialized cross component metric + xcomp = { + "left": 3, + } + selector = component_selector.ComponentSelector( + "minimal", + sample_comptable(), + cross_component_metrics=xcomp, + ) + selector.tree = dicts_to_test("null_value") + + params = selector.tree["nodes"][0]["parameters"] + functionname = selector.tree["nodes"][0]["functionname"] + selector.check_null(params, functionname) + + +def test_are_only_necessary_metrics_used_warning(): + """Tests a warning that wasn't triggred in other test workflows""" + + selector = component_selector.ComponentSelector("minimal", sample_comptable()) + + # warning when an element of necessary_metrics was not in used_metrics + selector.tree["used_metrics"] = set(["A", "B", "C"]) + selector.necessary_metrics = set(["B", "C", "D"]) + selector.are_only_necessary_metrics_used() + + +def test_are_all_components_accepted_or_rejected(): + """Tests warnings are triggered in are_all_components_accepted_or_rejected""" + + selector = component_selector.ComponentSelector("minimal", sample_comptable()) + selector.component_table.loc[7, "classification"] = "intermediate1" + selector.component_table.loc[[1, 3, 5], "classification"] = "intermediate2" + selector.are_all_components_accepted_or_rejected() From 2a189d88513628466fe783523d316a55b1ebc4a6 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 21 Dec 2022 14:53:34 -0500 Subject: [PATCH 101/177] Updates integration test fnames --- tedana/tests/data/fiu_four_echo_outputs.txt | 2 +- tedana/tests/data/nih_five_echo_outputs_verbose.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tedana/tests/data/fiu_four_echo_outputs.txt b/tedana/tests/data/fiu_four_echo_outputs.txt index 77dc5869e..3d0a1b346 100644 --- a/tedana/tests/data/fiu_four_echo_outputs.txt +++ b/tedana/tests/data/fiu_four_echo_outputs.txt @@ -37,7 +37,7 @@ desc-optcomAccepted_bold.nii.gz desc-optcomDenoised_bold.nii.gz desc-optcomMIRDenoised_bold.nii.gz desc-optcomNoGlobalSignal_bold.nii.gz -desc-optcomPCAReduced_bold.nii.gz +desc-optcom_whitened_bold.nii.gz desc-optcomRejected_bold.nii.gz desc-optcomWithGlobalSignal_bold.nii.gz desc-optcom_bold.nii.gz diff --git a/tedana/tests/data/nih_five_echo_outputs_verbose.txt b/tedana/tests/data/nih_five_echo_outputs_verbose.txt index 948487065..6203c421b 100644 --- a/tedana/tests/data/nih_five_echo_outputs_verbose.txt +++ b/tedana/tests/data/nih_five_echo_outputs_verbose.txt @@ -30,7 +30,7 @@ desc-limited_S0map.nii.gz desc-limited_T2starmap.nii.gz desc-optcomAccepted_bold.nii.gz desc-optcomDenoised_bold.nii.gz -desc-optcomPCAReduced_bold.nii.gz +desc-optcom_whitened_bold.nii.gz desc-optcomRejected_bold.nii.gz desc-optcom_bold.nii.gz echo-1_desc-Accepted_bold.nii.gz From ee3d517c1b6161c1215c19875853fce1c0e8a96f Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 21 Dec 2022 15:31:04 -0500 Subject: [PATCH 102/177] Try a numpy fix --- tedana/tests/test_selection_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index 9006b6126..23b619a7f 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -311,7 +311,7 @@ def test_getelbow_smoke(): """A smoke test for the getelbow function.""" arr = np.random.random(100) idx = selection_utils.getelbow(arr) - assert isinstance(idx, np.integer) + assert isinstance(idx, np.int32) or isinstance(idx, np.int64) val = selection_utils.getelbow(arr, return_val=True) assert isinstance(val, float) From 2d5b8091156fb60d56040ec0e53d292f43936ba6 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 21 Dec 2022 15:38:05 -0500 Subject: [PATCH 103/177] Try again --- tedana/selection/selection_utils.py | 2 +- tedana/tests/test_selection_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 4e946108d..c1ad69642 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -513,7 +513,7 @@ def getelbow_cons(arr, return_val=False): (arr[nk - 5 - ii - 1] > arr[nk - 5 - ii : nk].mean() + 2 * arr[nk - 5 - ii : nk].std()) for ii in range(nk - 5) ] - ds = np.array(temp1[::-1], dtype=np.int) + ds = np.array(temp1[::-1], dtype=np.integer) dsum = [] c_ = 0 for d_ in ds: diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index 23b619a7f..9006b6126 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -311,7 +311,7 @@ def test_getelbow_smoke(): """A smoke test for the getelbow function.""" arr = np.random.random(100) idx = selection_utils.getelbow(arr) - assert isinstance(idx, np.int32) or isinstance(idx, np.int64) + assert isinstance(idx, np.integer) val = selection_utils.getelbow(arr, return_val=True) assert isinstance(val, float) From a3614176d5ecf06addc7a223ed675d08f62502f5 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 21 Dec 2022 15:47:12 -0500 Subject: [PATCH 104/177] Remove dead code --- tedana/selection/component_selector.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index af0f6c8a1..3295602af 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -524,21 +524,6 @@ def rejected_comps(self): """The indices of components that are rejected.""" return self.component_table["classification"] == "rejected" - @property - def is_final(self): - """Whether the classifications are all acccepted/rejected""" - return (self.accepted_comps.sum() + self.rejected_comps.sum()) > self.n_comps - - @property - def mixing(self): - """The mixing matrix used to generate the components being decided upon.""" - return self.mixing_matrix - - @property - def oc_data(self): - """The optimally combined data being used for this tree.""" - return self.oc_data - def to_files(self, io_generator): """Convert this selector into component files From 88721ae684ea92798b58a547dcd47d5fcf57a171 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 22 Dec 2022 10:09:37 -0500 Subject: [PATCH 105/177] full selector coverage (#23) --- tedana/tests/test_component_selector.py | 12 ++--- tedana/tests/test_selection_nodes.py | 60 ++++++++++++++++++++++++- tedana/tests/test_selection_utils.py | 4 ++ 3 files changed, 68 insertions(+), 8 deletions(-) diff --git a/tedana/tests/test_component_selector.py b/tedana/tests/test_component_selector.py index 4e388222f..f1d26eabf 100644 --- a/tedana/tests/test_component_selector.py +++ b/tedana/tests/test_component_selector.py @@ -171,21 +171,21 @@ def test_minimal(): xcomp = { "n_echos": 3, } - tree = component_selector.ComponentSelector( + selector = component_selector.ComponentSelector( "minimal", sample_comptable(), cross_component_metrics=xcomp, ) - tree.select() + selector.select() # rerun without classification_tags column initialized - tree = component_selector.ComponentSelector( + selector = component_selector.ComponentSelector( "minimal", sample_comptable(), cross_component_metrics=xcomp, ) - tree.component_table = tree.component_table.drop(columns="classification_tags") - tree.select() + selector.component_table = selector.component_table.drop(columns="classification_tags") + selector.select() # validate_tree @@ -295,7 +295,7 @@ def test_check_null_succeeds(): def test_are_only_necessary_metrics_used_warning(): - """Tests a warning that wasn't triggred in other test workflows""" + """Tests a warning that wasn't triggered in other test workflows""" selector = component_selector.ComponentSelector("minimal", sample_comptable()) diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index e5e05c1c1..4a3b620aa 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -469,6 +469,23 @@ def test_calc_kappa_elbow(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_allcomps_elbow"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_nonsig_elbow"] > 0 + # No components with "NotALabel" classification so nothing selected + selector = sample_selector() + decide_comps = "NotALabel" + + # Outputs just the metrics used in this function + selector = selection_nodes.calc_kappa_elbow(selector, decide_comps) + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] is None + ) + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_allcomps_elbow"] + is None + ) + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_nonsig_elbow"] is None + ) + def test_calc_rho_elbow(): """Smoke tests for calc_rho_elbow""" @@ -568,6 +585,23 @@ def test_calc_rho_elbow(): with pytest.raises(ValueError): selection_nodes.calc_rho_elbow(selector, decide_comps, rho_elbow_type="perfect") + # No components with "NotALabel" classification so nothing selected + selector = sample_selector() + decide_comps = "NotALabel" + + # Outputs just the metrics used in this function + selector = selection_nodes.calc_rho_elbow(selector, decide_comps) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] is None + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] is None + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_allcomps_elbow"] is None + ) + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_unclassified_elbow"] + is None + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["elbow_f05"] is None + def test_calc_median_smoke(): """Smoke tests for calc_median""" @@ -934,7 +968,7 @@ def test_calc_extend_factor_smoke(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["extend_factor"] == 1.2 -def test_max_good_meanmetricrank_smoke(): +def test_calc_max_good_meanmetricrank_smoke(): """Smoke tests for calc_max_good_meanmetricrank""" # Standard use of this function requires some components to be "provisional accept" @@ -966,6 +1000,25 @@ def test_max_good_meanmetricrank_smoke(): selector.tree["nodes"][selector.current_node_idx]["outputs"]["max_good_meanmetricrank"] > 0 ) + # Standard call to this function with a user defined metric_suffix + selector = sample_selector("provclass") + selector.cross_component_metrics["extend_factor"] = 2.0 + selector = selection_nodes.calc_max_good_meanmetricrank( + selector, "provisional accept", metric_suffix="testsfx" + ) + calc_cross_comp_metrics = {"max_good_meanmetricrank_testsfx"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"][ + "max_good_meanmetricrank_testsfx" + ] + > 0 + ) + # Run warning logging code for if any of the cross_component_metrics # already existed and would be over-written selector = sample_selector("provclass") @@ -973,7 +1026,10 @@ def test_max_good_meanmetricrank_smoke(): selector.cross_component_metrics["extend_factor"] = 2.0 selector = selection_nodes.calc_max_good_meanmetricrank(selector, "provisional accept") - + calc_cross_comp_metrics = {"max_good_meanmetricrank"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 assert ( selector.tree["nodes"][selector.current_node_idx]["outputs"]["max_good_meanmetricrank"] > 0 diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index 9006b6126..a8675595a 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -111,6 +111,10 @@ def test_selectcomps2use_fails(): with pytest.raises(ValueError): selection_utils.selectcomps2use(selector, decide_comps) + selector.component_table = selector.component_table.drop(columns="classification") + with pytest.raises(ValueError): + selection_utils.selectcomps2use(selector, "all") + def test_comptable_classification_changer_succeeds(): """ From e16c7a17f0b74970a53c99bf6865067cb60e4b54 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 15:57:09 -0500 Subject: [PATCH 106/177] Add tedana_reclassify tests --- .circleci/config.yml | 26 +++++ Makefile | 5 +- tedana/tests/data/reclassify_debug_out.txt | 25 +++++ tedana/tests/data/reclassify_quiet_out.txt | 93 ++++++++++++++++ tedana/tests/test_integration.py | 119 +++++++++++++++++++++ tedana/workflows/tedana.py | 12 +-- tedana/workflows/tedana_reclassify.py | 21 ++-- 7 files changed, 278 insertions(+), 23 deletions(-) create mode 100644 tedana/tests/data/reclassify_debug_out.txt create mode 100644 tedana/tests/data/reclassify_quiet_out.txt diff --git a/.circleci/config.yml b/.circleci/config.yml index 40cde265c..ebdda4374 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -244,6 +244,32 @@ jobs: paths: - src/coverage/.coverage.five-echo + reclassify: + docker: + - image: continuumio/miniconda3 + working_directory: /tmp/src/tedana + steps: + - checkout + - restore_cache: + key: conda-py37-v2-{{ checksum "setup.cfg" }} + - run: + name: Run integration tests + no_output_timeout: 40m + command: | + apt-get update + apt-get install -yqq make + source activate tedana_py37 # depends on makeenv_37 + make reclassify + mkdir /tmp/src/coverage + mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.reclassify + - store_artifacts: + path: /tmp/data + - persist_to_workspace: + root: /tmp + paths: + - src/coverage/.coverage.reclassify + + t2smap: docker: - image: continuumio/miniconda3 diff --git a/Makefile b/Makefile index d7cc34e9c..16fead0d9 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ .PHONY: all lint -all_tests: lint unittest three-echo four-echo five-echo t2smap +all_tests: lint unittest three-echo four-echo five-echo reclassify t2smap help: @echo "Please use 'make ' where is one of:" @@ -27,5 +27,8 @@ four-echo: five-echo: @py.test --log-cli-level=INFO --cov-append --cov-report term-missing --cov=tedana -k test_integration_five_echo tedana/tests/test_integration.py +reclassify: + @py.test --log-cli-level=INFO --cov-append --cov-report term-missing --cov=tedana -k test_integration_reclassify tedana/tests/test_integration.py + t2smap: @py.test --log-cli-level=INFO --cov-append --cov-report term-missing --cov=tedana -k test_integration_t2smap tedana/tests/test_integration.py diff --git a/tedana/tests/data/reclassify_debug_out.txt b/tedana/tests/data/reclassify_debug_out.txt new file mode 100644 index 000000000..7de495692 --- /dev/null +++ b/tedana/tests/data/reclassify_debug_out.txt @@ -0,0 +1,25 @@ +figures +references.bib +report.txt +sub-testymctestface_betas_OC.nii.gz +sub-testymctestface_betas_hik_OC.nii.gz +sub-testymctestface_betas_hik_OC_MIR.nii.gz +sub-testymctestface_dataset_description.json +sub-testymctestface_dn_ts_OC.nii.gz +sub-testymctestface_dn_ts_OC_MIR.nii.gz +sub-testymctestface_feats_OC2.nii.gz +sub-testymctestface_hik_ts_OC.nii.gz +sub-testymctestface_hik_ts_OC_MIR.nii.gz +sub-testymctestface_ica_components.nii.gz +sub-testymctestface_ica_cross_component_metrics.json +sub-testymctestface_ica_decision_tree.json +sub-testymctestface_ica_decomposition.json +sub-testymctestface_ica_metrics.json +sub-testymctestface_ica_metrics.tsv +sub-testymctestface_ica_mir_mixing.tsv +sub-testymctestface_ica_mixing.tsv +sub-testymctestface_ica_orth_mixing.tsv +sub-testymctestface_ica_status_table.tsv +sub-testymctestface_lowk_ts_OC.nii.gz +sub-testymctestface_registry.json +sub-testymctestface_sphis_hik.nii.gz diff --git a/tedana/tests/data/reclassify_quiet_out.txt b/tedana/tests/data/reclassify_quiet_out.txt new file mode 100644 index 000000000..2107755b9 --- /dev/null +++ b/tedana/tests/data/reclassify_quiet_out.txt @@ -0,0 +1,93 @@ +dataset_description.json +desc-ICAAccepted_components.nii.gz +desc-ICAAccepted_stat-z_components.nii.gz +desc-ICACrossComponent_metrics.json +desc-ICA_components.nii.gz +desc-ICA_decision_tree.json +desc-ICA_decomposition.json +desc-ICA_mixing.tsv +desc-ICA_stat-z_components.nii.gz +desc-ICA_status_table.tsv +desc-optcomAccepted_bold.nii.gz +desc-optcomDenoised_bold.nii.gz +desc-optcomRejected_bold.nii.gz +desc-tedana_metrics.json +desc-tedana_metrics.tsv +desc-tedana_registry.json +figures +references.bib +report.txt +tedana_report.html +figures/carpet_accepted.svg +figures/carpet_denoised.svg +figures/carpet_optcom.svg +figures/carpet_rejected.svg +figures/comp_000.png +figures/comp_001.png +figures/comp_002.png +figures/comp_003.png +figures/comp_004.png +figures/comp_005.png +figures/comp_006.png +figures/comp_007.png +figures/comp_008.png +figures/comp_009.png +figures/comp_010.png +figures/comp_011.png +figures/comp_012.png +figures/comp_013.png +figures/comp_014.png +figures/comp_015.png +figures/comp_016.png +figures/comp_017.png +figures/comp_018.png +figures/comp_019.png +figures/comp_020.png +figures/comp_021.png +figures/comp_022.png +figures/comp_023.png +figures/comp_024.png +figures/comp_025.png +figures/comp_026.png +figures/comp_027.png +figures/comp_028.png +figures/comp_029.png +figures/comp_030.png +figures/comp_031.png +figures/comp_032.png +figures/comp_033.png +figures/comp_034.png +figures/comp_035.png +figures/comp_036.png +figures/comp_037.png +figures/comp_038.png +figures/comp_039.png +figures/comp_040.png +figures/comp_041.png +figures/comp_042.png +figures/comp_043.png +figures/comp_044.png +figures/comp_045.png +figures/comp_046.png +figures/comp_047.png +figures/comp_048.png +figures/comp_049.png +figures/comp_050.png +figures/comp_051.png +figures/comp_052.png +figures/comp_053.png +figures/comp_054.png +figures/comp_055.png +figures/comp_056.png +figures/comp_057.png +figures/comp_058.png +figures/comp_059.png +figures/comp_060.png +figures/comp_061.png +figures/comp_062.png +figures/comp_063.png +figures/comp_064.png +figures/comp_065.png +figures/comp_066.png +figures/comp_067.png +figures/comp_068.png diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 5b16f075f..0e35cfda7 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -7,6 +7,7 @@ import os.path as op import re import shutil +import subprocess import tarfile from gzip import GzipFile from io import BytesIO @@ -16,6 +17,7 @@ import requests from pkg_resources import resource_filename +from tedana.io import InputHarvester from tedana.workflows import t2smap as t2smap_cli from tedana.workflows import tedana as tedana_cli from tedana.workflows.tedana_reclassify import post_tedana @@ -73,6 +75,43 @@ def download_test_data(osf, outpath): t.extractall(outpath) +def reclassify_path() -> str: + """Get the path to the reclassify test data.""" + return "/tmp/data/reclassify/" + + +def reclassify_raw() -> str: + return os.path.join(reclassify_path(), "TED.three-echo-previous") + + +def reclassify_raw_registry() -> str: + return os.path.join(reclassify_raw(), "desc-tedana_registry.json") + + +def reclassify_url() -> str: + """Get the URL to reclassify test data.""" + return "https://osf.io/mt59n/download" + + +def guarantee_reclassify_data() -> None: + """Ensures that the reclassify data exists at the expected path.""" + if not os.path.exists(reclassify_raw_registry()): + download_test_data(reclassify_url(), reclassify_path()) + else: + # Path exists, be sure that everything in registry exists + ioh = InputHarvester(os.path.join(reclassify_raw(), "desc-tedana_registry.json")) + all_present = True + for _, v in ioh.registry.items(): + if not isinstance(v, list): + if not os.path.exists(os.path.join(reclassify_raw(), v)): + all_present = False + break + if not all_present: + # Something was removed, need to re-download + shutil.rmtree(reclassify_raw()) + guarantee_reclassify_data() + + def test_integration_five_echo(skip_integration): """Integration test of the full tedana workflow using five-echo test data.""" @@ -209,6 +248,86 @@ def test_integration_three_echo(skip_integration): check_integration_outputs(fn, out_dir) +def test_integration_reclassify_insufficient_args(skip_integration): + if skip_integration: + pytest.skip("Skipping reclassify insufficient args") + + guarantee_reclassify_data() + + args = [ + "tedana_reclassify", + os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + ] + + result = subprocess.run(args, capture_output=True) + assert b"ValueError: Must manually accept or reject" in result.stderr + assert result.returncode != 0 + + +def test_integration_reclassify_quiet(skip_integration): + if skip_integration: + pytest.skip("Skip reclassify quiet") + + guarantee_reclassify_data() + out_dir = os.path.join(reclassify_path(), "quiet") + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + args = [ + "tedana_reclassify", + "--manacc", + "1", + "2", + "3", + "--manrej", + "4", + "5", + "6", + "-o", + out_dir, + os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + ] + + results = subprocess.run(args, capture_output=True) + assert results.returncode == 0 + fn = resource_filename("tedana", "tests/data/reclassify_quiet_out.txt") + check_integration_outputs(fn, out_dir) + + +def test_integration_reclassify_debug(skip_integration): + if skip_integration: + pytest.skip("Skip reclassify debug") + + guarantee_reclassify_data() + out_dir = os.path.join(reclassify_path(), "debug") + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + args = [ + "tedana_reclassify", + "--manacc", + "1", + "2", + "3", + "--prefix", + "sub-testymctestface", + "--convention", + "orig", + "--tedort", + "--mir", + "--noreports", + "-o", + out_dir, + "--debug", + os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + ] + + results = subprocess.run(args, capture_output=True) + assert results.returncode == 0 + fn = resource_filename("tedana", "tests/data/reclassify_debug_out.txt") + check_integration_outputs(fn, out_dir) + + def test_integration_t2smap(skip_integration): """Integration test of the full t2smap workflow using five-echo test data""" if skip_integration: diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 6d627a3a7..bb1773bbc 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -8,7 +8,6 @@ import os import os.path as op import shutil -import sys from glob import glob import numpy as np @@ -849,15 +848,8 @@ def tedana_workflow( png_cmap=png_cmap, ) - if sys.version_info.major == 3 and sys.version_info.minor < 6: - warn_msg = ( - "Reports requested but Python version is less than " - "3.6.0. Dynamic reports will not be generated." - ) - LGR.warn(warn_msg) - else: - LGR.info("Generating dynamic report") - reporting.generate_report(io_generator, tr=img_t_r) + LGR.info("Generating dynamic report") + reporting.generate_report(io_generator, tr=img_t_r) LGR.info("Workflow completed") utils.teardown_loggers() diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 7ad092010..2ce861b15 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -6,7 +6,6 @@ import logging import os import os.path as op -import sys from glob import glob import numpy as np @@ -50,6 +49,7 @@ def main(): default="auto", ) parser.add_argument( + "-o", "--out-dir", dest="out_dir", type=str, @@ -82,7 +82,7 @@ def main(): help="Run minimum image regression.", ) parser.add_argument( - "--no-reports", + "--noreports", dest="no_reports", action="store_true", help=( @@ -128,6 +128,7 @@ def main(): reject=args.manual_reject, out_dir=args.out_dir, config=args.config, + prefix=args.prefix, convention=args.convention, tedort=args.tedort, mir=args.mir, @@ -219,6 +220,9 @@ def post_tedana( rej = () if (not accept) and (not reject): + # TODO: remove + print(accept) + print(reject) raise ValueError("Must manually accept or reject at least one component") in_both = [] @@ -328,7 +332,7 @@ def post_tedana( betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0] pred_rej_ts = np.dot(acc_ts, betas) resid = rej_ts - pred_rej_ts - rej_idx = comps_accepted[comps_accepted].index + rej_idx = comps_rejected[comps_rejected].index mmix[:, rej_idx] = resid comp_names = [ io.add_decomp_prefix(comp, prefix="ica", max_value=comptable.index.max()) @@ -429,15 +433,8 @@ def post_tedana( png_cmap=png_cmap, ) - if sys.version_info.major == 3 and sys.version_info.minor < 6: - warn_msg = ( - "Reports requested but Python version is less than " - "3.6.0. Dynamic reports will not be generated." - ) - LGR.warn(warn_msg) - else: - LGR.info("Generating dynamic report") - reporting.generate_report(io_generator, tr=img_t_r) + LGR.info("Generating dynamic report") + reporting.generate_report(io_generator, tr=img_t_r) io_generator.save_self() LGR.info("Workflow completed") From 00bbb9b44e0a6f4e0a371f41b00e150afb9d32a5 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 15:59:44 -0500 Subject: [PATCH 107/177] Actually add test to circle workflow --- .circleci/config.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index ebdda4374..64f507d55 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -339,6 +339,9 @@ workflows: - five-echo: requires: - makeenv_37 + - reclassify: + requires: + - makeenv_37 - t2smap: requires: - makeenv_37 From 9711aef0587c552bb445ef55efc54ae03d752636 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 16:00:57 -0500 Subject: [PATCH 108/177] Maybe actually add it --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 64f507d55..0ee8e2f97 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -357,4 +357,5 @@ workflows: - three-echo - four-echo - five-echo + - reclassify - t2smap From 460a4532e2a0f1579dc1bb37a2b7e100a609b782 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 16:11:02 -0500 Subject: [PATCH 109/177] Change o to outdir --- tedana/tests/test_integration.py | 4 ++-- tedana/workflows/tedana_reclassify.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 0e35cfda7..5373a3d22 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -283,7 +283,7 @@ def test_integration_reclassify_quiet(skip_integration): "4", "5", "6", - "-o", + "--out-dir", out_dir, os.path.join(reclassify_raw(), "desc-tedana_registry.json"), ] @@ -316,7 +316,7 @@ def test_integration_reclassify_debug(skip_integration): "--tedort", "--mir", "--noreports", - "-o", + "--out-dir", out_dir, "--debug", os.path.join(reclassify_raw(), "desc-tedana_registry.json"), diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 2ce861b15..b58683009 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -49,7 +49,6 @@ def main(): default="auto", ) parser.add_argument( - "-o", "--out-dir", dest="out_dir", type=str, From 691d9d50b938db7118804d40695cb8feb2d6854a Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 16:16:02 -0500 Subject: [PATCH 110/177] Fix noreports maybe --- tedana/tests/test_integration.py | 2 +- tedana/workflows/tedana_reclassify.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 5373a3d22..fd6975e50 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -315,7 +315,7 @@ def test_integration_reclassify_debug(skip_integration): "orig", "--tedort", "--mir", - "--noreports", + "--no-reports", "--out-dir", out_dir, "--debug", diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index b58683009..d19b1ad19 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -81,7 +81,7 @@ def main(): help="Run minimum image regression.", ) parser.add_argument( - "--noreports", + "--no-reports", dest="no_reports", action="store_true", help=( From 989e10edbf032fee55c943e1fc26f43a0618d464 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 16:21:16 -0500 Subject: [PATCH 111/177] Fix tedort --- tedana/workflows/tedana_reclassify.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index d19b1ad19..6cca99d04 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -331,8 +331,7 @@ def post_tedana( betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0] pred_rej_ts = np.dot(acc_ts, betas) resid = rej_ts - pred_rej_ts - rej_idx = comps_rejected[comps_rejected].index - mmix[:, rej_idx] = resid + mmix[:, comps_rejected] = resid comp_names = [ io.add_decomp_prefix(comp, prefix="ica", max_value=comptable.index.max()) for comp in range(selector.n_comps) From 3c2d76fc24e6fa749bb9b0662b9dfcb0bd1a5bb3 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 17:27:15 -0500 Subject: [PATCH 112/177] CircleCI are you okay? From ad29c0d26bfdcb8687e7446bc96ce6dd39f1dec9 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 17:39:54 -0500 Subject: [PATCH 113/177] Circle if you keep this up I will switch to Actions --- tedana/workflows/tedana_reclassify.py | 443 -------------------------- 1 file changed, 443 deletions(-) delete mode 100644 tedana/workflows/tedana_reclassify.py diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py deleted file mode 100644 index 6cca99d04..000000000 --- a/tedana/workflows/tedana_reclassify.py +++ /dev/null @@ -1,443 +0,0 @@ -""" -Run the reclassification workflow for a previous tedana run -""" -import argparse -import datetime -import logging -import os -import os.path as op -from glob import glob - -import numpy as np -import pandas as pd - -import tedana.gscontrol as gsc -from tedana import __version__, io, reporting, selection, utils -from tedana.bibtex import get_description_references - -LGR = logging.getLogger("GENERAL") -RepLGR = logging.getLogger("REPORT") - - -def main(): - from tedana import __version__ - - verstr = "tedana_reclassify v{}".format(__version__) - parser = argparse.ArgumentParser() - parser.add_argument( - "registry", - help="File registry from a previous tedana run", - ) - parser.add_argument( - "--manacc", - dest="manual_accept", - nargs="+", - type=int, - help="Component indices to accept (zero-indexed).", - ) - parser.add_argument( - "--manrej", - dest="manual_reject", - nargs="+", - type=int, - help="Component indices to reject (zero-indexed).", - ) - parser.add_argument( - "--config", - dest="config", - help="File naming configuration. Default auto (prepackaged).", - default="auto", - ) - parser.add_argument( - "--out-dir", - dest="out_dir", - type=str, - metavar="PATH", - help="Output directory.", - default=".", - ) - parser.add_argument( - "--prefix", dest="prefix", type=str, help="Prefix for filenames generated.", default="" - ) - parser.add_argument( - "--convention", - dest="convention", - action="store", - choices=["orig", "bids"], - help=("Filenaming convention. bids will use the latest BIDS derivatives version."), - default="bids", - ) - parser.add_argument( - "--tedort", - dest="tedort", - action="store_true", - help=("Orthogonalize rejected components w.r.t. accepted components prior to denoising."), - default=False, - ) - parser.add_argument( - "--mir", - dest="mir", - action="store_true", - help="Run minimum image regression.", - ) - parser.add_argument( - "--no-reports", - dest="no_reports", - action="store_true", - help=( - "Creates a figures folder with static component " - "maps, timecourse plots and other diagnostic " - "images and displays these in an interactive " - "reporting framework" - ), - default=False, - ) - parser.add_argument( - "--png-cmap", dest="png_cmap", type=str, help="Colormap for figures", default="coolwarm" - ) - parser.add_argument( - "--debug", - dest="debug", - action="store_true", - help=( - "Logs in the terminal will have increased " - "verbosity, and will also be written into " - "a .tsv file in the output directory." - ), - default=False, - ) - parser.add_argument( - "--force", - "-f", - dest="force", - action="store_true", - help="Force overwriting of files. Default False.", - ) - parser.add_argument( - "--quiet", dest="quiet", help=argparse.SUPPRESS, action="store_true", default=False - ) - parser.add_argument("-v", "--version", action="version", version=verstr) - - args = parser.parse_args() - - # Run post-tedana - post_tedana( - args.registry, - accept=args.manual_accept, - reject=args.manual_reject, - out_dir=args.out_dir, - config=args.config, - prefix=args.prefix, - convention=args.convention, - tedort=args.tedort, - mir=args.mir, - no_reports=args.no_reports, - png_cmap=args.png_cmap, - force=args.force, - debug=args.debug, - quiet=args.quiet, - ) - - -def post_tedana( - registry, - accept=[], - reject=[], - out_dir=".", - config="auto", - convention="bids", - prefix="", - tedort=False, - mir=False, - no_reports=False, - png_cmap="coolwarm", - force=False, - debug=False, - quiet=False, -): - """ - Run the post-tedana manual classification workflow. - - Please remember to cite [1]_. - - Parameters - ---------- - registry: :obj:`str` - The previously run registry as a JSON file. - accept: :obj: `list` - A list of integer values of components to accept in this workflow. - reject: :obj: `list` - A list of integer values of components to reject in this workflow. - out_dir : :obj:`str`, optional - Output directory. - tedort : :obj:`bool`, optional - Orthogonalize rejected components w.r.t. accepted ones prior to - denoising. Default is False. - mir : :obj:`bool`, optional - Run minimum image regression after denoising. Default is False. - no_reports : obj:'bool', optional - Do not generate .html reports and .png plots. Default is false such - that reports are generated. - png_cmap : obj:'str', optional - Name of a matplotlib colormap to be used when generating figures. - Cannot be used with --no-png. Default is 'coolwarm'. - debug : :obj:`bool`, optional - Whether to run in debugging mode or not. Default is False. - force : :obj:`bool`, optional - Whether to force file overwrites. Default is False. - quiet : :obj:`bool`, optional - If True, suppresses logging/printing of messages. Default is False. - - Notes - ----- - This workflow writes out several files. For a complete list of the files - generated by this workflow, please visit - https://tedana.readthedocs.io/en/latest/outputs.html - - References - ---------- - .. [1] DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L., - Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S., - Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S., - Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M., - Whitaker, K., & Handwerker, D. A. (2021). - TE-dependent analysis of multi-echo fMRI with tedana. - Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669. - """ - out_dir = op.abspath(out_dir) - if not op.isdir(out_dir): - os.mkdir(out_dir) - - # Check that there is no overlap in accepted/rejected components - if accept: - acc = set(accept) - else: - acc = () - if reject: - rej = set(reject) - else: - rej = () - - if (not accept) and (not reject): - # TODO: remove - print(accept) - print(reject) - raise ValueError("Must manually accept or reject at least one component") - - in_both = [] - for a in acc: - if a in rej: - in_both.append(a) - for r in rej: - if r in acc and r not in rej: - in_both.append(r) - if len(in_both) != 0: - raise ValueError("The following components were both accepted and rejected: " f"{in_both}") - - # boilerplate - basename = "report" - extension = "txt" - repname = op.join(out_dir, (basename + "." + extension)) - bibtex_file = op.join(out_dir, "references.bib") - repex = op.join(out_dir, (basename + "*")) - previousreps = glob(repex) - previousreps.sort(reverse=True) - for f in previousreps: - previousparts = op.splitext(f) - newname = previousparts[0] + "_old" + previousparts[1] - os.rename(f, newname) - - # create logfile name - basename = "tedana_" - extension = "tsv" - start_time = datetime.datetime.now().strftime("%Y-%m-%dT%H%M%S") - logname = op.join(out_dir, (basename + start_time + "." + extension)) - utils.setup_loggers(logname=logname, repname=repname, quiet=quiet, debug=debug) - - LGR.info("Using output directory: {}".format(out_dir)) - - ioh = io.InputHarvester(registry) - comptable = ioh.get_file_contents("ICA metrics tsv") - xcomp = ioh.get_file_contents("ICA cross component metrics json") - status_table = ioh.get_file_contents("ICA status table tsv") - previous_tree_fname = ioh.get_file_path("ICA decision tree json") - mmix = np.asarray(ioh.get_file_contents("ICA mixing tsv")) - mask_denoise = ioh.get_file_contents("adaptive mask img") - # If global signal was removed in the previous run, we can assume that - # the user wants to use that file again. If not, use the default of - # optimally combined data. - gskey = "removed gs combined img" - if ioh.get_file_path(gskey): - data_oc = ioh.get_file_contents(gskey) - used_gs = True - else: - data_oc = ioh.get_file_contents("combined img") - used_gs = False - io_generator = io.OutputGenerator( - data_oc, - convention=convention, - prefix=prefix, - config=config, - force=force, - verbose=False, - out_dir=out_dir, - old_registry=ioh.registry, - ) - - # Make a new selector with the added files - selector = selection.component_selector.ComponentSelector( - previous_tree_fname, comptable, cross_component_metrics=xcomp, status_table=status_table - ) - - if accept: - selector.add_manual(accept, "accepted") - if reject: - selector.add_manual(reject, "rejected") - selector.select() - comptable = selector.component_table - - # NOTE: most of these will be identical to previous, but this makes - # things easier for programs which will view the data after running. - # First, make the output generator - comp_names = comptable["Component"].values - mixing_df = pd.DataFrame(data=mmix, columns=comp_names) - to_copy = [ - "z-scored ICA components img", - "ICA mixing tsv", - "ICA decomposition json", - "ICA metrics json", - ] - if used_gs: - to_copy.append(gskey) - to_copy.append("has gs combined img") - - for tc in to_copy: - print(tc) - io_generator.save_file(ioh.get_file_contents(tc), tc) - - # Save component selector and tree - selector.to_files(io_generator) - - if selector.n_accepted_comps == 0: - LGR.warning("No BOLD components detected! Please check data and results!") - - mmix_orig = mmix.copy() - # TODO: make this a function - if tedort: - comps_accepted = selector.accepted_comps - comps_rejected = selector.rejected_comps - acc_ts = mmix[:, comps_accepted] - rej_ts = mmix[:, comps_rejected] - betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0] - pred_rej_ts = np.dot(acc_ts, betas) - resid = rej_ts - pred_rej_ts - mmix[:, comps_rejected] = resid - comp_names = [ - io.add_decomp_prefix(comp, prefix="ica", max_value=comptable.index.max()) - for comp in range(selector.n_comps) - ] - mixing_df = pd.DataFrame(data=mmix, columns=comp_names) - io_generator.save_file(mixing_df, "ICA orthogonalized mixing tsv") - RepLGR.info( - "Rejected components' time series were then " - "orthogonalized with respect to accepted components' time " - "series." - ) - - n_vols = data_oc.shape[3] - img_t_r = io_generator.reference_img.header.get_zooms()[-1] - mask_denoise = utils.reshape_niimg(mask_denoise).astype(bool) - data_oc = utils.reshape_niimg(data_oc) - - # TODO: make a better result-writing function - # #############################################!!!! - # TODO: make a better time series creation function - # - get_ts_fit_tag(include=[], exclude=[]) - # - get_ts_regress/residual_tag(include=[], exclude=[]) - # How to handle [acc/rej] + tag ? - io.writeresults( - data_oc, - mask=mask_denoise, - comptable=comptable, - mmix=mmix, - n_vols=n_vols, - io_generator=io_generator, - ) - - if mir: - io_generator.force = True - gsc.minimum_image_regression(data_oc, mmix, mask_denoise, comptable, io_generator) - io_generator.force = False - - # Write out BIDS-compatible description file - derivative_metadata = { - "Name": "tedana Outputs", - "BIDSVersion": "1.5.0", - "DatasetType": "derivative", - "GeneratedBy": [ - { - "Name": "tedana_reclassify", - "Version": __version__, - "Description": ( - "A denoising pipeline for the identification and removal " - "of non-BOLD noise from multi-echo fMRI data." - ), - "CodeURL": "https://github.com/ME-ICA/tedana", - } - ], - } - io_generator.save_file(derivative_metadata, "data description json") - - with open(repname, "r") as fo: - report = [line.rstrip() for line in fo.readlines()] - report = " ".join(report) - with open(repname, "w") as fo: - fo.write(report) - - # Collect BibTeX entries for cited papers - references = get_description_references(report) - - with open(bibtex_file, "w") as fo: - fo.write(references) - - if not no_reports: - LGR.info("Making figures folder with static component maps and timecourse plots.") - - dn_ts, hikts, lowkts = io.denoise_ts(data_oc, mmix, mask_denoise, comptable) - - # Figure out which control methods were used - gscontrol = [] - if used_gs: - gscontrol.append("gsr") - if mir: - gscontrol.append("mir") - gscontrol = None if gscontrol is [] else gscontrol - - reporting.static_figures.carpet_plot( - optcom_ts=data_oc, - denoised_ts=dn_ts, - hikts=hikts, - lowkts=lowkts, - mask=mask_denoise, - io_generator=io_generator, - gscontrol=gscontrol, - ) - reporting.static_figures.comp_figures( - data_oc, - mask=mask_denoise, - comptable=comptable, - mmix=mmix_orig, - io_generator=io_generator, - png_cmap=png_cmap, - ) - - LGR.info("Generating dynamic report") - reporting.generate_report(io_generator, tr=img_t_r) - - io_generator.save_self() - LGR.info("Workflow completed") - utils.teardown_loggers() - - -if __name__ == "__main__": - main() From ff1d6319a35b310298200cfa6cb7623360891a40 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 17:42:31 -0500 Subject: [PATCH 114/177] Revert "Circle if you keep this up I will switch to Actions" This reverts commit ad29c0d26bfdcb8687e7446bc96ce6dd39f1dec9. --- tedana/workflows/tedana_reclassify.py | 443 ++++++++++++++++++++++++++ 1 file changed, 443 insertions(+) create mode 100644 tedana/workflows/tedana_reclassify.py diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py new file mode 100644 index 000000000..6cca99d04 --- /dev/null +++ b/tedana/workflows/tedana_reclassify.py @@ -0,0 +1,443 @@ +""" +Run the reclassification workflow for a previous tedana run +""" +import argparse +import datetime +import logging +import os +import os.path as op +from glob import glob + +import numpy as np +import pandas as pd + +import tedana.gscontrol as gsc +from tedana import __version__, io, reporting, selection, utils +from tedana.bibtex import get_description_references + +LGR = logging.getLogger("GENERAL") +RepLGR = logging.getLogger("REPORT") + + +def main(): + from tedana import __version__ + + verstr = "tedana_reclassify v{}".format(__version__) + parser = argparse.ArgumentParser() + parser.add_argument( + "registry", + help="File registry from a previous tedana run", + ) + parser.add_argument( + "--manacc", + dest="manual_accept", + nargs="+", + type=int, + help="Component indices to accept (zero-indexed).", + ) + parser.add_argument( + "--manrej", + dest="manual_reject", + nargs="+", + type=int, + help="Component indices to reject (zero-indexed).", + ) + parser.add_argument( + "--config", + dest="config", + help="File naming configuration. Default auto (prepackaged).", + default="auto", + ) + parser.add_argument( + "--out-dir", + dest="out_dir", + type=str, + metavar="PATH", + help="Output directory.", + default=".", + ) + parser.add_argument( + "--prefix", dest="prefix", type=str, help="Prefix for filenames generated.", default="" + ) + parser.add_argument( + "--convention", + dest="convention", + action="store", + choices=["orig", "bids"], + help=("Filenaming convention. bids will use the latest BIDS derivatives version."), + default="bids", + ) + parser.add_argument( + "--tedort", + dest="tedort", + action="store_true", + help=("Orthogonalize rejected components w.r.t. accepted components prior to denoising."), + default=False, + ) + parser.add_argument( + "--mir", + dest="mir", + action="store_true", + help="Run minimum image regression.", + ) + parser.add_argument( + "--no-reports", + dest="no_reports", + action="store_true", + help=( + "Creates a figures folder with static component " + "maps, timecourse plots and other diagnostic " + "images and displays these in an interactive " + "reporting framework" + ), + default=False, + ) + parser.add_argument( + "--png-cmap", dest="png_cmap", type=str, help="Colormap for figures", default="coolwarm" + ) + parser.add_argument( + "--debug", + dest="debug", + action="store_true", + help=( + "Logs in the terminal will have increased " + "verbosity, and will also be written into " + "a .tsv file in the output directory." + ), + default=False, + ) + parser.add_argument( + "--force", + "-f", + dest="force", + action="store_true", + help="Force overwriting of files. Default False.", + ) + parser.add_argument( + "--quiet", dest="quiet", help=argparse.SUPPRESS, action="store_true", default=False + ) + parser.add_argument("-v", "--version", action="version", version=verstr) + + args = parser.parse_args() + + # Run post-tedana + post_tedana( + args.registry, + accept=args.manual_accept, + reject=args.manual_reject, + out_dir=args.out_dir, + config=args.config, + prefix=args.prefix, + convention=args.convention, + tedort=args.tedort, + mir=args.mir, + no_reports=args.no_reports, + png_cmap=args.png_cmap, + force=args.force, + debug=args.debug, + quiet=args.quiet, + ) + + +def post_tedana( + registry, + accept=[], + reject=[], + out_dir=".", + config="auto", + convention="bids", + prefix="", + tedort=False, + mir=False, + no_reports=False, + png_cmap="coolwarm", + force=False, + debug=False, + quiet=False, +): + """ + Run the post-tedana manual classification workflow. + + Please remember to cite [1]_. + + Parameters + ---------- + registry: :obj:`str` + The previously run registry as a JSON file. + accept: :obj: `list` + A list of integer values of components to accept in this workflow. + reject: :obj: `list` + A list of integer values of components to reject in this workflow. + out_dir : :obj:`str`, optional + Output directory. + tedort : :obj:`bool`, optional + Orthogonalize rejected components w.r.t. accepted ones prior to + denoising. Default is False. + mir : :obj:`bool`, optional + Run minimum image regression after denoising. Default is False. + no_reports : obj:'bool', optional + Do not generate .html reports and .png plots. Default is false such + that reports are generated. + png_cmap : obj:'str', optional + Name of a matplotlib colormap to be used when generating figures. + Cannot be used with --no-png. Default is 'coolwarm'. + debug : :obj:`bool`, optional + Whether to run in debugging mode or not. Default is False. + force : :obj:`bool`, optional + Whether to force file overwrites. Default is False. + quiet : :obj:`bool`, optional + If True, suppresses logging/printing of messages. Default is False. + + Notes + ----- + This workflow writes out several files. For a complete list of the files + generated by this workflow, please visit + https://tedana.readthedocs.io/en/latest/outputs.html + + References + ---------- + .. [1] DuPre, E. M., Salo, T., Ahmed, Z., Bandettini, P. A., Bottenhorn, K. L., + Caballero-Gaudes, C., Dowdle, L. T., Gonzalez-Castillo, J., Heunis, S., + Kundu, P., Laird, A. R., Markello, R., Markiewicz, C. J., Moia, S., + Staden, I., Teves, J. B., Uruñuela, E., Vaziri-Pashkam, M., + Whitaker, K., & Handwerker, D. A. (2021). + TE-dependent analysis of multi-echo fMRI with tedana. + Journal of Open Source Software, 6(66), 3669. doi:10.21105/joss.03669. + """ + out_dir = op.abspath(out_dir) + if not op.isdir(out_dir): + os.mkdir(out_dir) + + # Check that there is no overlap in accepted/rejected components + if accept: + acc = set(accept) + else: + acc = () + if reject: + rej = set(reject) + else: + rej = () + + if (not accept) and (not reject): + # TODO: remove + print(accept) + print(reject) + raise ValueError("Must manually accept or reject at least one component") + + in_both = [] + for a in acc: + if a in rej: + in_both.append(a) + for r in rej: + if r in acc and r not in rej: + in_both.append(r) + if len(in_both) != 0: + raise ValueError("The following components were both accepted and rejected: " f"{in_both}") + + # boilerplate + basename = "report" + extension = "txt" + repname = op.join(out_dir, (basename + "." + extension)) + bibtex_file = op.join(out_dir, "references.bib") + repex = op.join(out_dir, (basename + "*")) + previousreps = glob(repex) + previousreps.sort(reverse=True) + for f in previousreps: + previousparts = op.splitext(f) + newname = previousparts[0] + "_old" + previousparts[1] + os.rename(f, newname) + + # create logfile name + basename = "tedana_" + extension = "tsv" + start_time = datetime.datetime.now().strftime("%Y-%m-%dT%H%M%S") + logname = op.join(out_dir, (basename + start_time + "." + extension)) + utils.setup_loggers(logname=logname, repname=repname, quiet=quiet, debug=debug) + + LGR.info("Using output directory: {}".format(out_dir)) + + ioh = io.InputHarvester(registry) + comptable = ioh.get_file_contents("ICA metrics tsv") + xcomp = ioh.get_file_contents("ICA cross component metrics json") + status_table = ioh.get_file_contents("ICA status table tsv") + previous_tree_fname = ioh.get_file_path("ICA decision tree json") + mmix = np.asarray(ioh.get_file_contents("ICA mixing tsv")) + mask_denoise = ioh.get_file_contents("adaptive mask img") + # If global signal was removed in the previous run, we can assume that + # the user wants to use that file again. If not, use the default of + # optimally combined data. + gskey = "removed gs combined img" + if ioh.get_file_path(gskey): + data_oc = ioh.get_file_contents(gskey) + used_gs = True + else: + data_oc = ioh.get_file_contents("combined img") + used_gs = False + io_generator = io.OutputGenerator( + data_oc, + convention=convention, + prefix=prefix, + config=config, + force=force, + verbose=False, + out_dir=out_dir, + old_registry=ioh.registry, + ) + + # Make a new selector with the added files + selector = selection.component_selector.ComponentSelector( + previous_tree_fname, comptable, cross_component_metrics=xcomp, status_table=status_table + ) + + if accept: + selector.add_manual(accept, "accepted") + if reject: + selector.add_manual(reject, "rejected") + selector.select() + comptable = selector.component_table + + # NOTE: most of these will be identical to previous, but this makes + # things easier for programs which will view the data after running. + # First, make the output generator + comp_names = comptable["Component"].values + mixing_df = pd.DataFrame(data=mmix, columns=comp_names) + to_copy = [ + "z-scored ICA components img", + "ICA mixing tsv", + "ICA decomposition json", + "ICA metrics json", + ] + if used_gs: + to_copy.append(gskey) + to_copy.append("has gs combined img") + + for tc in to_copy: + print(tc) + io_generator.save_file(ioh.get_file_contents(tc), tc) + + # Save component selector and tree + selector.to_files(io_generator) + + if selector.n_accepted_comps == 0: + LGR.warning("No BOLD components detected! Please check data and results!") + + mmix_orig = mmix.copy() + # TODO: make this a function + if tedort: + comps_accepted = selector.accepted_comps + comps_rejected = selector.rejected_comps + acc_ts = mmix[:, comps_accepted] + rej_ts = mmix[:, comps_rejected] + betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0] + pred_rej_ts = np.dot(acc_ts, betas) + resid = rej_ts - pred_rej_ts + mmix[:, comps_rejected] = resid + comp_names = [ + io.add_decomp_prefix(comp, prefix="ica", max_value=comptable.index.max()) + for comp in range(selector.n_comps) + ] + mixing_df = pd.DataFrame(data=mmix, columns=comp_names) + io_generator.save_file(mixing_df, "ICA orthogonalized mixing tsv") + RepLGR.info( + "Rejected components' time series were then " + "orthogonalized with respect to accepted components' time " + "series." + ) + + n_vols = data_oc.shape[3] + img_t_r = io_generator.reference_img.header.get_zooms()[-1] + mask_denoise = utils.reshape_niimg(mask_denoise).astype(bool) + data_oc = utils.reshape_niimg(data_oc) + + # TODO: make a better result-writing function + # #############################################!!!! + # TODO: make a better time series creation function + # - get_ts_fit_tag(include=[], exclude=[]) + # - get_ts_regress/residual_tag(include=[], exclude=[]) + # How to handle [acc/rej] + tag ? + io.writeresults( + data_oc, + mask=mask_denoise, + comptable=comptable, + mmix=mmix, + n_vols=n_vols, + io_generator=io_generator, + ) + + if mir: + io_generator.force = True + gsc.minimum_image_regression(data_oc, mmix, mask_denoise, comptable, io_generator) + io_generator.force = False + + # Write out BIDS-compatible description file + derivative_metadata = { + "Name": "tedana Outputs", + "BIDSVersion": "1.5.0", + "DatasetType": "derivative", + "GeneratedBy": [ + { + "Name": "tedana_reclassify", + "Version": __version__, + "Description": ( + "A denoising pipeline for the identification and removal " + "of non-BOLD noise from multi-echo fMRI data." + ), + "CodeURL": "https://github.com/ME-ICA/tedana", + } + ], + } + io_generator.save_file(derivative_metadata, "data description json") + + with open(repname, "r") as fo: + report = [line.rstrip() for line in fo.readlines()] + report = " ".join(report) + with open(repname, "w") as fo: + fo.write(report) + + # Collect BibTeX entries for cited papers + references = get_description_references(report) + + with open(bibtex_file, "w") as fo: + fo.write(references) + + if not no_reports: + LGR.info("Making figures folder with static component maps and timecourse plots.") + + dn_ts, hikts, lowkts = io.denoise_ts(data_oc, mmix, mask_denoise, comptable) + + # Figure out which control methods were used + gscontrol = [] + if used_gs: + gscontrol.append("gsr") + if mir: + gscontrol.append("mir") + gscontrol = None if gscontrol is [] else gscontrol + + reporting.static_figures.carpet_plot( + optcom_ts=data_oc, + denoised_ts=dn_ts, + hikts=hikts, + lowkts=lowkts, + mask=mask_denoise, + io_generator=io_generator, + gscontrol=gscontrol, + ) + reporting.static_figures.comp_figures( + data_oc, + mask=mask_denoise, + comptable=comptable, + mmix=mmix_orig, + io_generator=io_generator, + png_cmap=png_cmap, + ) + + LGR.info("Generating dynamic report") + reporting.generate_report(io_generator, tr=img_t_r) + + io_generator.save_self() + LGR.info("Workflow completed") + utils.teardown_loggers() + + +if __name__ == "__main__": + main() From 4325c32458d6c1c1129ebde25dfb63963d13b597 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 17:47:46 -0500 Subject: [PATCH 115/177] Maybe silence duecredit and re-trigger Circle --- tedana/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tedana/__init__.py b/tedana/__init__.py index a26b9ef10..01d2f97c2 100644 --- a/tedana/__init__.py +++ b/tedana/__init__.py @@ -13,5 +13,6 @@ # cmp is not used, so ignore nipype-generated warnings warnings.filterwarnings("ignore", r"cmp not installed") +warnings.filterwarnings("ignore", r"Failed to import duecredit due to No module named 'duecredit'") del get_versions From 003666b22db27172b1bfcc4990dc25b8688ea0b2 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 17:56:26 -0500 Subject: [PATCH 116/177] Try something else --- tedana/__init__.py | 2 +- tedana/workflows/tedana_reclassify.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tedana/__init__.py b/tedana/__init__.py index 01d2f97c2..c31209973 100644 --- a/tedana/__init__.py +++ b/tedana/__init__.py @@ -13,6 +13,6 @@ # cmp is not used, so ignore nipype-generated warnings warnings.filterwarnings("ignore", r"cmp not installed") -warnings.filterwarnings("ignore", r"Failed to import duecredit due to No module named 'duecredit'") +warnings.filterwarnings("ignore", r"*duecredit*") del get_versions diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 6cca99d04..ff5475245 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -324,6 +324,7 @@ def post_tedana( mmix_orig = mmix.copy() # TODO: make this a function if tedort: + raise ValueError('CircleCI sanity check') comps_accepted = selector.accepted_comps comps_rejected = selector.rejected_comps acc_ts = mmix[:, comps_accepted] From 69356a79f7835540041a3d3a8942127805a4df2a Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 17:58:38 -0500 Subject: [PATCH 117/177] Guess that wasn't legal --- tedana/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tedana/__init__.py b/tedana/__init__.py index c31209973..a26b9ef10 100644 --- a/tedana/__init__.py +++ b/tedana/__init__.py @@ -13,6 +13,5 @@ # cmp is not used, so ignore nipype-generated warnings warnings.filterwarnings("ignore", r"cmp not installed") -warnings.filterwarnings("ignore", r"*duecredit*") del get_versions From 953d020e36714a83ad244f085073484c16bbf0b5 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 18:04:02 -0500 Subject: [PATCH 118/177] Switch main to _main --- setup.cfg | 2 +- tedana/workflows/tedana_reclassify.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index d0e610db9..975db0b94 100644 --- a/setup.cfg +++ b/setup.cfg @@ -60,7 +60,7 @@ all = console_scripts = t2smap = tedana.workflows.t2smap:_main tedana = tedana.workflows.tedana:_main - tedana_reclassify = tedana.workflows.tedana_reclassify:main + tedana_reclassify = tedana.workflows.tedana_reclassify:_main [options.package_data] * = diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index ff5475245..3419044c8 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -19,7 +19,7 @@ RepLGR = logging.getLogger("REPORT") -def main(): +def _main(): from tedana import __version__ verstr = "tedana_reclassify v{}".format(__version__) @@ -441,4 +441,4 @@ def post_tedana( if __name__ == "__main__": - main() + _main() From 12b562987c420a7815e23227ef49dea707d23e5c Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 18:07:30 -0500 Subject: [PATCH 119/177] Add to pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2c751976d..2da1d630b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ version = "0.0.12" [project.scripts] tedana = "tedana.workflows.tedana:_main" -tedana_reclassify = "tedana.workflows.tedana_reclassify:main" +tedana_reclassify = "tedana.workflows.tedana_reclassify:_main" [build-system] requires = ["setuptools>=64", "wheel"] From a1adaa3b6927fcc3587a08506d296378000a3f23 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 18:15:11 -0500 Subject: [PATCH 120/177] Force it to be editable --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0ee8e2f97..1437ece74 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -21,7 +21,7 @@ jobs: if [ ! -d /opt/conda/envs/tedana_py37 ]; then conda create -yq -n tedana_py37 python=3.7 source activate tedana_py37 - pip install .[tests] + pip install -e .[tests] fi - save_cache: key: conda-py37-v2-{{ checksum "setup.cfg" }} From dfe6720965726816b0f6a7bcfd2f2da56e9e5d9c Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 18:20:44 -0500 Subject: [PATCH 121/177] Add references to resources package --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 975db0b94..4473fa5cd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -60,10 +60,10 @@ all = console_scripts = t2smap = tedana.workflows.t2smap:_main tedana = tedana.workflows.tedana:_main - tedana_reclassify = tedana.workflows.tedana_reclassify:_main [options.package_data] * = + resources/references.bib resources/config/* reporting/data/* reporting/data/html/* From bc9ec11386371dbd671adae8ed58b6159f62a7da Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 19:01:43 -0500 Subject: [PATCH 122/177] Dispose of sanity check --- tedana/workflows/tedana_reclassify.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 3419044c8..e9660a201 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -324,7 +324,6 @@ def post_tedana( mmix_orig = mmix.copy() # TODO: make this a function if tedort: - raise ValueError('CircleCI sanity check') comps_accepted = selector.accepted_comps comps_rejected = selector.rejected_comps acc_ts = mmix[:, comps_accepted] From 2748fd904bf9cc5a0c82e488285c75d89ef125a7 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 19:47:29 -0500 Subject: [PATCH 123/177] Add more reclassify tests --- tedana/tests/test_integration.py | 85 +++++++++++++++++++++++++-- tedana/workflows/tedana_reclassify.py | 3 - 2 files changed, 81 insertions(+), 7 deletions(-) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index fd6975e50..bfe259dae 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -3,6 +3,7 @@ """ import glob +import logging import os import os.path as op import re @@ -22,8 +23,11 @@ from tedana.workflows import tedana as tedana_cli from tedana.workflows.tedana_reclassify import post_tedana +# Need to see if a no BOLD warning occurred +LOGGER = logging.getLogger(__name__) -def check_integration_outputs(fname, outpath): + +def check_integration_outputs(fname, outpath, n_logs=1): """ Checks outputs of integration tests @@ -44,10 +48,11 @@ def check_integration_outputs(fname, outpath): # Checks for log file log_regex = "^tedana_[12][0-9]{3}-[0-9]{2}-[0-9]{2}T[0-9]{2}[0-9]{2}[0-9]{2}.tsv$" logfiles = [out for out in existing if re.match(log_regex, out)] - assert len(logfiles) == 1 + assert len(logfiles) == n_logs - # Removes logfile from list of existing files - existing.remove(logfiles[0]) + # Removes logfiles from list of existing files + for log in logfiles: + existing.remove(log) # Compares remaining files with those expected with open(fname, "r") as f: @@ -328,6 +333,78 @@ def test_integration_reclassify_debug(skip_integration): check_integration_outputs(fn, out_dir) +def test_integration_reclassify_both_rej_acc(skip_integration): + if skip_integration: + pytest.skip("Skip reclassify both rejected and accepted") + + guarantee_reclassify_data() + out_dir = os.path.join(reclassify_path(), "both_rej_acc") + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + with pytest.raises( + ValueError, + match=r"The following components were both accepted and", + ): + post_tedana( + reclassify_raw_registry(), + accept=[1, 2, 3], + reject=[1, 2, 3], + out_dir=out_dir, + ) + + +def test_integration_reclassify_run_twice(skip_integration): + if skip_integration: + pytest.skip("Skip reclassify both rejected and accepted") + + guarantee_reclassify_data() + out_dir = os.path.join(reclassify_path(), "run_twice") + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + post_tedana( + reclassify_raw_registry(), + accept=[1, 2, 3], + out_dir=out_dir, + no_reports=True, + ) + post_tedana( + reclassify_raw_registry(), + accept=[1, 2, 3], + out_dir=out_dir, + force=True, + no_reports=True, + ) + fn = resource_filename("tedana", "tests/data/reclassify_run_twice.txt") + check_integration_outputs(fn, out_dir, n_logs=2) + + +def test_integration_reclassify_no_bold(skip_integration, caplog): + if skip_integration: + pytest.skip("Skip reclassify both rejected and accepted") + + guarantee_reclassify_data() + out_dir = os.path.join(reclassify_path(), "no_bold") + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + ioh = InputHarvester(reclassify_raw_registry()) + comptable = ioh.get_file_contents("ICA metrics tsv") + to_accept = [i for i in range(len(comptable))] + + post_tedana( + reclassify_raw_registry(), + reject=to_accept, + out_dir=out_dir, + no_reports=True, + ) + assert "No BOLD components detected!" in caplog.text + + fn = resource_filename("tedana", "tests/data/reclassify_no_bold.txt") + check_integration_outputs(fn, out_dir) + + def test_integration_t2smap(skip_integration): """Integration test of the full t2smap workflow using five-echo test data""" if skip_integration: diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index e9660a201..b53109115 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -228,9 +228,6 @@ def post_tedana( for a in acc: if a in rej: in_both.append(a) - for r in rej: - if r in acc and r not in rej: - in_both.append(r) if len(in_both) != 0: raise ValueError("The following components were both accepted and rejected: " f"{in_both}") From f7db360e61c837fa3ba34416501bc50422e9f480 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 20:03:34 -0500 Subject: [PATCH 124/177] Adaptive mask is not a bool --- tedana/workflows/tedana_reclassify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index b53109115..836925db6 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -343,7 +343,7 @@ def post_tedana( n_vols = data_oc.shape[3] img_t_r = io_generator.reference_img.header.get_zooms()[-1] - mask_denoise = utils.reshape_niimg(mask_denoise).astype(bool) + mask_denoise = utils.reshape_niimg(mask_denoise).astype(int) data_oc = utils.reshape_niimg(data_oc) # TODO: make a better result-writing function From a842c60bb6c9f8fa3a8ae812a7305aacec3d17d4 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 22 Dec 2022 20:07:26 -0500 Subject: [PATCH 125/177] Add label for setup.cfg --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 4473fa5cd..2caa6c325 100644 --- a/setup.cfg +++ b/setup.cfg @@ -65,6 +65,7 @@ console_scripts = * = resources/references.bib resources/config/* + # Includes all integration test output text files reporting/data/* reporting/data/html/* From ecee71d7aab0cec8650e05154e3da6838129db10 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 23 Dec 2022 10:19:45 -0500 Subject: [PATCH 126/177] Revert "Adaptive mask is not a bool" This reverts commit f7db360e61c837fa3ba34416501bc50422e9f480. --- tedana/workflows/tedana_reclassify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 836925db6..b53109115 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -343,7 +343,7 @@ def post_tedana( n_vols = data_oc.shape[3] img_t_r = io_generator.reference_img.header.get_zooms()[-1] - mask_denoise = utils.reshape_niimg(mask_denoise).astype(int) + mask_denoise = utils.reshape_niimg(mask_denoise).astype(bool) data_oc = utils.reshape_niimg(data_oc) # TODO: make a better result-writing function From 71d905c9604c672f1e167025d13b9d71bb2ecdbb Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 23 Dec 2022 10:22:43 -0500 Subject: [PATCH 127/177] Add resource files --- tedana/tests/data/reclassify_no_bold.txt | 16 ++++++++++++++++ tedana/tests/data/reclassify_run_twice.txt | 20 ++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 tedana/tests/data/reclassify_no_bold.txt create mode 100644 tedana/tests/data/reclassify_run_twice.txt diff --git a/tedana/tests/data/reclassify_no_bold.txt b/tedana/tests/data/reclassify_no_bold.txt new file mode 100644 index 000000000..277e94cc4 --- /dev/null +++ b/tedana/tests/data/reclassify_no_bold.txt @@ -0,0 +1,16 @@ +dataset_description.json +desc-ICACrossComponent_metrics.json +desc-ICA_components.nii.gz +desc-ICA_decision_tree.json +desc-ICA_decomposition.json +desc-ICA_mixing.tsv +desc-ICA_stat-z_components.nii.gz +desc-ICA_status_table.tsv +desc-optcomDenoised_bold.nii.gz +desc-optcomRejected_bold.nii.gz +desc-tedana_metrics.json +desc-tedana_metrics.tsv +desc-tedana_registry.json +figures +references.bib +report.txt diff --git a/tedana/tests/data/reclassify_run_twice.txt b/tedana/tests/data/reclassify_run_twice.txt new file mode 100644 index 000000000..9609c85ba --- /dev/null +++ b/tedana/tests/data/reclassify_run_twice.txt @@ -0,0 +1,20 @@ +dataset_description.json +desc-ICAAccepted_components.nii.gz +desc-ICAAccepted_stat-z_components.nii.gz +desc-ICACrossComponent_metrics.json +desc-ICA_components.nii.gz +desc-ICA_decision_tree.json +desc-ICA_decomposition.json +desc-ICA_mixing.tsv +desc-ICA_stat-z_components.nii.gz +desc-ICA_status_table.tsv +desc-optcomAccepted_bold.nii.gz +desc-optcomDenoised_bold.nii.gz +desc-optcomRejected_bold.nii.gz +desc-tedana_metrics.json +desc-tedana_metrics.tsv +desc-tedana_registry.json +figures +references.bib +report.txt +report_old.txt From e9ac5f21ebb5dbb629f9eed8ae6133f1eb42a10d Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 23 Dec 2022 11:30:34 -0500 Subject: [PATCH 128/177] Clarify variables --- tedana/workflows/tedana_reclassify.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index b53109115..ee2024e83 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -259,7 +259,7 @@ def post_tedana( status_table = ioh.get_file_contents("ICA status table tsv") previous_tree_fname = ioh.get_file_path("ICA decision tree json") mmix = np.asarray(ioh.get_file_contents("ICA mixing tsv")) - mask_denoise = ioh.get_file_contents("adaptive mask img") + adaptive_mask = ioh.get_file_contents("adaptive mask img") # If global signal was removed in the previous run, we can assume that # the user wants to use that file again. If not, use the default of # optimally combined data. @@ -343,7 +343,8 @@ def post_tedana( n_vols = data_oc.shape[3] img_t_r = io_generator.reference_img.header.get_zooms()[-1] - mask_denoise = utils.reshape_niimg(mask_denoise).astype(bool) + adaptive_mask = utils.reshape_niimg(adaptive_mask) + mask_denoise = adaptive_mask >= 1 data_oc = utils.reshape_niimg(data_oc) # TODO: make a better result-writing function From 52a6bc8a4bc2868d982efeddce4a5779515f7306 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 23 Dec 2022 13:06:30 -0500 Subject: [PATCH 129/177] Update date and weep --- tedana/metrics/collect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/metrics/collect.py b/tedana/metrics/collect.py index 1a73689ca..c8cd8b076 100644 --- a/tedana/metrics/collect.py +++ b/tedana/metrics/collect.py @@ -559,7 +559,7 @@ def get_metadata(comptable): "LongName": "Rationale for component classification", "Description": ( "The reason for the original classification. " - "This column label was replaced with classification_tags in late 2021" + "This column label was replaced with classification_tags in late 2022" ), } if "kappa ratio" in comptable: From 7628d13a523e72f7218131e7bd919ca423ce3351 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Wed, 4 Jan 2023 10:37:22 -0500 Subject: [PATCH 130/177] Fixed NoLikelyBOLDBug (#24) * Fixed NoLikelyBOLDBug * Updated docs for Likely BOLD * Added note for when ICA will rerun * updated message * New verbose tag for more detailed logging. * at_least_num_exist to classification_doesnt_exist * Cleaned up selector logging output --- docs/building decision trees.rst | 4 +- tedana/resources/decision_trees/kundu.json | 44 ++++--------- tedana/resources/decision_trees/minimal.json | 7 -- tedana/selection/component_selector.py | 39 ++++++++--- tedana/selection/selection_nodes.py | 69 +++++++++++++------- tedana/selection/tedica.py | 8 ++- tedana/tests/test_component_selector.py | 15 +++++ tedana/tests/test_integration.py | 2 +- tedana/tests/test_selection_nodes.py | 20 ++++++ tedana/utils.py | 6 ++ tedana/workflows/tedana.py | 12 ++-- tedana/workflows/tedana_reclassify.py | 10 ++- 12 files changed, 156 insertions(+), 80 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index 562944e7a..b81a57fbc 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -221,7 +221,9 @@ that Are used to checks whether results are plausible & can help avoid mistakes A list of acceptable classification tags (i.e. "Likely BOLD", "Unlikely BOLD", "Low variance"). This will both be used to make sure only these tags are used in the tree and allow programs that interact with the results to see all potential - tags in one place. + tags in one place. Note: "Likely BOLD" is a required tag. If tedana is run and + none of the components include the "Likely BOLD" tag, then ICA will be repeated + with a different seed and then the selection process will repeat. **Nodes in the decision tree** diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index d57ae84c6..8bb0538d4 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -38,7 +38,6 @@ "decide_comps": "all" }, "kwargs": { - "log_extra_info": "Initializing all classifications as unclassified and all classification tags as blank", "log_extra_report": "", "clear_classification_tags": true, "dont_warn_reclassify": true @@ -55,8 +54,6 @@ "right": "kappa" }, "kwargs": { - "log_extra_info": "Reject if Kappa>Rho", - "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" }, "_comment": "Code I002 in premodularized tedana" @@ -75,8 +72,6 @@ "left2": "countsigFT2", "op2": ">", "right2": 0, - "log_extra_info": "Reject if countsig_in S0clusters > T2clusters & countsig_in_T2clusters>0", - "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" }, "_comment": "Code I003 in premodularized tedana" @@ -103,8 +98,6 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "log_extra_info": "Reject if DICE S0>T2 & varex>median", - "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" }, "_comment": "Code I004 in premodularized tedana" @@ -123,8 +116,6 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0 & varex>median", - "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" }, "_comment": "Code I005 in premodularized tedana" @@ -164,7 +155,6 @@ "right": "kappa_elbow_kundu" }, "kwargs": { - "log_extra_info": "Provisionally accept if kappa>elbow", "log_extra_report": "" } }, @@ -181,7 +171,6 @@ "right": "rho_elbow_kundu" }, "kwargs": { - "log_extra_info": "Move any provisionally accepted components back to unclassified if rho>elbow", "log_extra_report": "" } }, @@ -196,8 +185,9 @@ "class_comp_exists": "provisionalaccept" }, "kwargs": { + "at_least_num_exist": 2, "tag": "No provisional accept", - "log_extra_info": "If nothing is provisionally accepted by this point, be conservative and accept everything", + "log_extra_info": "If nothing is provisionally accepted by this point, then rerun ICA & selection. If max iterations of rerunning done, then accept everything not already rejected", "log_extra_report": "" }, "_comment": "Code I006 in premodularized tedana" @@ -209,9 +199,7 @@ "thresh_label": "upper", "percentile_thresh": 90 }, - "kwargs": { - "log_extra_info": "Calculuate a high variance threshold based on the 90th percentile variance component" - } + "kwargs": {} }, { "functionname": "calc_varex_thresh", @@ -220,35 +208,28 @@ "thresh_label": "lower", "percentile_thresh": 25 }, - "kwargs": { - "log_extra_info": "Calculuate a low variance threshold based on the 25th percentile variance component" - } + "kwargs": {} }, { "functionname": "calc_extend_factor", "parameters": {}, - "kwargs": { - "log_extra_info": "2 if fewer than 90 fMRI volumes, 3 if more than 110 and linear in-between" - }, - "_comment": "This is a scaling number that is used for a few thresholds" + "kwargs": {}, + "_comment": "This is a scaling number that is used for a few thresholds. 2 if fewer than 90 fMRI volumes, 3 if more than 110 and linear in-between" }, { "functionname": "calc_max_good_meanmetricrank", "parameters": { "decide_comps": "provisionalaccept" }, - "kwargs": { - "log_extra_info": "Number of provisionalaccept components * extend_factor" - } + "kwargs": {}, + "_comment": "Number of provisionalaccept components * extend_factor" }, { "functionname": "calc_varex_kappa_ratio", "parameters": { "decide_comps": "provisionalaccept" }, - "kwargs": { - "log_extra_info": "Scaled ratio of variance/kappa" - }, + "kwargs": {}, "_comment": "This is used to calculate the new 'varex kappa ratio' column in the component_table" }, { @@ -388,8 +369,7 @@ "percentile_thresh": 25 }, "kwargs": { - "num_lowest_var_comps": "num_acc_guess", - "log_extra_info": "Calculuate a low variance threshold based on the 25th percentile variance component" + "num_lowest_var_comps": "num_acc_guess" } }, { @@ -432,9 +412,9 @@ "op2": ">", "left2": "variance explained", "right2": "varex_new_lower_thresh", - "log_extra_info": "Accept components above the kappa elbow, but are at the higher end of the remaining variance so more cautious to not remove" + "log_extra_info": "For not already rejected components, accept ones below the kappa elbow, but at the higher end of the remaining variance so more cautious to not remove" }, - "_comment": "Code I012 in premodularized tedana. Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh would be lower than it is here. If there are differences in results, might be worth adding a scaling factor" + "_comment": "Code I012 in premodularized tedana. Yet another quirky criterion, but this one to keep components. In the original tree, varex_new_lower_thresh might be lower than it is here. If there are differences in results, might be worth adding a scaling factor" }, { "functionname": "manual_classify", diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index b596e9293..97ee0b217 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -30,7 +30,6 @@ "decide_comps": "all" }, "kwargs": { - "log_extra_info": "Initializing all classifications as unclassified and all classification tags as blank", "log_extra_report": "", "clear_classification_tags": true, "dont_warn_reclassify": true @@ -47,7 +46,6 @@ "right": "kappa" }, "kwargs": { - "log_extra_info": "Reject if Kappa", "right2": 0, - "log_extra_info": "Reject if countsig_in S0clusters > T2clusters & countsig_in_T2clusters>0", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" } @@ -93,7 +90,6 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "log_extra_info": "Reject if DICE S0>T2 & varex>median", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" } @@ -112,7 +108,6 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0 & varex>median", "log_extra_report": "", "tag_ifTrue": "Unlikely BOLD" } @@ -152,7 +147,6 @@ "right": "kappa_elbow_kundu" }, "kwargs": { - "log_extra_info": "kappa>elbow", "log_extra_report": "" } }, @@ -187,7 +181,6 @@ "right": "rho_elbow_liberal" }, "kwargs": { - "log_extra_info": "rho>elbow", "log_extra_report": "" } }, diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index 3295602af..bb8bf8517 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -229,7 +229,9 @@ class ComponentSelector: a specified `tree` """ - def __init__(self, tree, component_table, cross_component_metrics={}, status_table=None): + def __init__( + self, tree, component_table, verbose=False, cross_component_metrics={}, status_table=None + ): """ Initialize the class using the info specified in the json file `tree` @@ -284,6 +286,7 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab self.__dict__.update(cross_component_metrics) self.cross_component_metrics = cross_component_metrics + self.verbose = verbose # Construct an un-executed selector self.component_table = component_table.copy() @@ -386,12 +389,16 @@ def select(self): else: kwargs = None all_params = {**params} - # log the function name and parameters used - LGR.info( - "Step {}: Running function {} with parameters: {}".format( - self.current_node_idx, node["functionname"], all_params + + if self.verbose: + # If verbose outputs requested, log the function name and parameters used + # This info is already saved in the tree json output files, but adding + # to the screen log output is useful for debugging + LGR.info( + "Step {}: Running function {} with parameters: {}".format( + self.current_node_idx, node["functionname"], all_params + ) ) - ) # run the decision node function if kwargs is not None: self = fcn(self, **params, **kwargs) @@ -509,9 +516,25 @@ def n_comps(self): """The number of components in the component table.""" return len(self.component_table) + @property + def LikelyBOLD_comps(self): + """A boolean pd.DataSeries of components that are tagged "Likely BOLD".""" + LikelyBOLD_comps = self.component_table["classification_tags"].copy() + for idx in range(len(LikelyBOLD_comps)): + if "Likely BOLD" in LikelyBOLD_comps.loc[idx]: + LikelyBOLD_comps.loc[idx] = True + else: + LikelyBOLD_comps.loc[idx] = False + return LikelyBOLD_comps + + @property + def n_LikelyBOLD_comps(self): + """The number of components that are tagged "Likely BOLD".""" + return self.LikelyBOLD_comps.sum() + @property def accepted_comps(self): - """The indices of components that are accepted.""" + """A boolean pd.DataSeries of components that are accepted.""" return self.component_table["classification"] == "accepted" @property @@ -521,7 +544,7 @@ def n_accepted_comps(self): @property def rejected_comps(self): - """The indices of components that are rejected.""" + """A boolean pd.DataSeries of components that are rejected.""" return self.component_table["classification"] == "rejected" def to_files(self, io_generator): diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index d709c421c..853d48bb7 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -154,8 +154,9 @@ def manual_classify( else: outputs["node_label"] = "Set " + str(decide_comps) + " to " + new_classification + LGR.info(f"{function_name_idx}: {outputs['node_label']} ") if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -442,8 +443,9 @@ def operator_scale_descript(val_scale, val): # Might want to add additional default logging to functions here # The function input will be logged before the function call + LGR.info(f"{function_name_idx}: {ifTrue} if {outputs['node_label']}, else {ifFalse}") if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -581,12 +583,13 @@ def dec_variance_lessthan_thresholds( if custom_node_label: outputs["node_label"] = custom_node_label else: - outputs["node_label"] = ("{}<{}. All variance<{}").format( - outputs["used_metrics"], single_comp_threshold, all_comp_threshold - ) + outputs[ + "node_label" + ] = f"{var_metric}<{single_comp_threshold}. All variance<{all_comp_threshold}" + LGR.info(f"{function_name_idx}: {ifTrue} if {outputs['node_label']}, else {ifFalse}") if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -708,10 +711,11 @@ def calc_median( if custom_node_label: outputs["node_label"] = custom_node_label else: - outputs["node_label"] = f"Calc {label_name}" + outputs["node_label"] = f"Median({label_name})" + LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -808,8 +812,9 @@ def calc_kappa_elbow( else: outputs["node_label"] = "Calc Kappa Elbow" + LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -941,8 +946,9 @@ def calc_rho_elbow( else: outputs["node_label"] = "Calc Rho Elbow" + LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -996,6 +1002,7 @@ def dec_classification_doesnt_exist( new_classification, decide_comps, class_comp_exists, + at_least_num_exist=1, log_extra_report="", log_extra_info="", custom_node_label="", @@ -1017,6 +1024,9 @@ def dec_classification_doesnt_exist( This has the same structure options as decide_comps. This function tests whether any components in decide_comps have the classifications defined in this variable. + at_least_num_exist: :obj:`int` + Instead of just testing whether a classification exists, test whether at least + this number of components have that classification. Default=1 {log_extra_info} {log_extra_report} {custom_node_label} @@ -1034,7 +1044,7 @@ def dec_classification_doesnt_exist( ---- This function is useful to end the component selection process early even if there are additional nodes. For example, in the original - kundu tree, if no components are identified with kappa>elbow and + kundu tree, if 0 or 1 components are identified with kappa>elbow and rho>elbow then, instead of removing everything, it effectively says something's wrong and conservatively keeps everything. Similarly, later in the kundu tree, there are several steps deciding how to @@ -1047,7 +1057,7 @@ def dec_classification_doesnt_exist( outputs = { "decision_node_idx": selector.current_node_idx, "used_metrics": set(), - "used_cross_component_metrics": set(), + "used_cross_comp_metrics": set(), "node_label": None, "numTrue": None, "numFalse": None, @@ -1059,11 +1069,18 @@ def dec_classification_doesnt_exist( function_name_idx = "Step {}: classification_doesnt_exist".format((selector.current_node_idx)) if custom_node_label: outputs["node_label"] = custom_node_label + elif at_least_num_exist == 1: + outputs[ + "node_label" + ] = f"Change {decide_comps} to {new_classification} if {class_comp_exists} doesn't exist" else: - outputs["node_label"] = f"Change {decide_comps} if {class_comp_exists} doesn't exist" + outputs[ + "node_label" + ] = f"Change {decide_comps} to {new_classification} if less than {at_least_num_exist} components with {class_comp_exists} exist" + LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -1074,7 +1091,7 @@ def dec_classification_doesnt_exist( do_comps_exist = selectcomps2use(selector, class_comp_exists) - if (not comps2use) or (do_comps_exist): + if (not comps2use) or (len(do_comps_exist) >= at_least_num_exist): outputs["numTrue"] = 0 # If nothing chanages, then assign the number of components in comps2use to numFalse outputs["numFalse"] = len(comps2use) @@ -1232,10 +1249,11 @@ def calc_varex_thresh( if custom_node_label: outputs["node_label"] = custom_node_label else: - outputs["node_label"] = f"Calc {varex_name}" + outputs["node_label"] = f"Calc {varex_name}, {percentile_thresh}th percentile threshold" + LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -1287,6 +1305,7 @@ def calc_extend_factor( ): """ Calculate the scalar used to set a threshold for d_table_score. + 2 if fewer than 90 fMRI volumes, 3 if more than 110 and linear in-between The explanation for the calculation is in :obj:`tedana.selection.selection_utils.get_extend_factor` @@ -1331,7 +1350,7 @@ def calc_extend_factor( outputs["node_label"] = "Calc extend_factor" if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -1422,7 +1441,7 @@ def calc_max_good_meanmetricrank( outputs["node_label"] = f"Calc {metric_name}" if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -1529,7 +1548,7 @@ def calc_varex_kappa_ratio( outputs["node_label"] = "Calc varex kappa ratio" if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx}: {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) @@ -1553,7 +1572,12 @@ def calc_varex_kappa_ratio( - np.nanmin(selector.component_table.loc[comps2use, "variance explained"]) ) outputs["kappa_rate"] = kappa_rate - LGR.info(f"Kappa rate found to be {kappa_rate} from components " f"{comps2use}") + # TODO Was useful for debugging, but unnessary for typical outputs. Maybe add + # back in when verbose tag is used + # LGR.info( + # f"{function_name_idx} Kappa rate found to be {kappa_rate} from components " + # f"{comps2use}" + # ) # NOTE: kappa_rate is calculated on a subset of components while # "varex kappa ratio" is calculated for all compnents selector.component_table["varex kappa ratio"] = ( @@ -1703,8 +1727,9 @@ def calc_revised_meanmetricrank_guesses( else: outputs["node_label"] = "Calc revised d_table_score & num accepted component guesses" + LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: - LGR.info(log_extra_info) + LGR.info(f"{function_name_idx}: {log_extra_info}") if log_extra_report: RepLGR.info(log_extra_report) diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 5388522a9..6641ea34d 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -10,7 +10,7 @@ RepLGR = logging.getLogger("REPORT") -def automatic_selection(component_table, n_echos, n_vols, tree="kundu"): +def automatic_selection(component_table, n_echos, n_vols, tree="kundu", verbose=False): """Classify components based on component table and decision tree type. Parameters @@ -21,6 +21,8 @@ def automatic_selection(component_table, n_echos, n_vols, tree="kundu"): The number of echoes in this dataset tree: :obj:`str` The type of tree to use for the ComponentSelector object. Default="kundu" + verbose: :obj:`bool` + More verbose logging output if True. Default=False Returns ------- @@ -67,7 +69,9 @@ def automatic_selection(component_table, n_echos, n_vols, tree="kundu"): "n_echos": n_echos, "n_vols": n_vols, } - selector = ComponentSelector(tree, component_table, cross_component_metrics=xcomp) + selector = ComponentSelector( + tree, component_table, verbose=verbose, cross_component_metrics=xcomp + ) selector.select() selector.metadata = collect.get_metadata(selector.component_table) diff --git a/tedana/tests/test_component_selector.py b/tedana/tests/test_component_selector.py index f1d26eabf..371b4848f 100644 --- a/tedana/tests/test_component_selector.py +++ b/tedana/tests/test_component_selector.py @@ -312,3 +312,18 @@ def test_are_all_components_accepted_or_rejected(): selector.component_table.loc[7, "classification"] = "intermediate1" selector.component_table.loc[[1, 3, 5], "classification"] = "intermediate2" selector.are_all_components_accepted_or_rejected() + + +def test_selector_properties_smoke(): + """Tests to confirm properties match expected results""" + + selector = component_selector.ComponentSelector("minimal", sample_comptable()) + + assert selector.n_comps == 21 + + # Also runs selector.LikelyBOLD_comps and should need to deal with sets in each field + assert selector.n_LikelyBOLD_comps == 17 + + assert selector.n_accepted_comps == 17 + + assert selector.rejected_comps.sum() == 4 diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index bfe259dae..f62e319b2 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -399,7 +399,7 @@ def test_integration_reclassify_no_bold(skip_integration, caplog): out_dir=out_dir, no_reports=True, ) - assert "No BOLD components detected!" in caplog.text + assert "No accepted components remaining after manual classification!" in caplog.text fn = resource_filename("tedana", "tests/data/reclassify_no_bold.txt") check_integration_outputs(fn, out_dir) diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 4a3b620aa..eca37c041 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -701,6 +701,7 @@ def test_dec_classification_doesnt_exist_smoke(): selector, "accepted", decide_comps, + at_least_num_exist=1, class_comp_exists="provisional accept", log_extra_report="report log", log_extra_info="info log", @@ -745,6 +746,25 @@ def test_dec_classification_doesnt_exist_smoke(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 assert f"Node {selector.current_node_idx}" in selector.component_status_table + # Standard execution with at_least_num_exist=5 which should trigger the + # components don't exist output + selector = sample_selector(options="unclass") + selector = selection_nodes.dec_classification_doesnt_exist( + selector, + "accepted", + decide_comps=["unclassified", "provisional accept"], + at_least_num_exist=5, + class_comp_exists="provisional accept", + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + tag="test true tag", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 17 + # Lists the number of components in decide_comps in numFalse + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + def test_calc_varex_thresh_smoke(): """Smoke tests for calc_varex_thresh""" diff --git a/tedana/utils.py b/tedana/utils.py index 0248f9f2a..8a4b7e44a 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -76,6 +76,12 @@ def make_adaptive_mask(data, mask=None, getsum=False, threshold=1): # get 33rd %ile of `first_echo` and find corresponding index # NOTE: percentile is arbitrary + # TODO: "interpolation" param changed to "method" in numpy 1.22.0 + # confirm method="higher" is the same as interpolation="higher" + # Current minimum version for numpy in tedana is 1.16 where + # there is no "method" parameter. Either wait until we bump + # our minimum numpy version to 1.22 or add a version check + # or try/catch statement. perc = np.percentile(first_echo, 33, interpolation="higher") perc_val = echo_means[:, 0] == perc diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index bb1773bbc..222ffe3ae 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -657,11 +657,13 @@ def tedana_workflow( "ICA", metrics=required_metrics, ) - ica_selection = selection.automatic_selection(comptable, n_echos, n_vols, tree=tree) - n_accepted_comps = ica_selection.n_accepted_comps - if (n_restarts < maxrestart) and (n_accepted_comps == 0): + ica_selection = selection.automatic_selection( + comptable, n_echos, n_vols, tree=tree, verbose=verbose + ) + n_LikelyBOLD_comps = ica_selection.n_LikelyBOLD_comps + if (n_restarts < maxrestart) and (n_LikelyBOLD_comps == 0): LGR.warning("No BOLD components found. Re-attempting ICA.") - elif n_accepted_comps == 0: + elif n_LikelyBOLD_comps == 0: LGR.warning("No BOLD components found, but maximum number of restarts reached.") keep_restarting = False else: @@ -732,7 +734,7 @@ def tedana_workflow( } io_generator.save_file(decomp_metadata, "ICA decomposition json") - if ica_selection.n_accepted_comps == 0: + if ica_selection.n_LikelyBOLD_comps == 0: LGR.warning("No BOLD components detected! Please check data and results!") # TODO: un-hack separate comptable diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index ee2024e83..6c89e71d2 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -283,7 +283,11 @@ def post_tedana( # Make a new selector with the added files selector = selection.component_selector.ComponentSelector( - previous_tree_fname, comptable, cross_component_metrics=xcomp, status_table=status_table + previous_tree_fname, + comptable, + cross_component_metrics=xcomp, + status_table=status_table, + verbose=debug, ) if accept: @@ -316,7 +320,9 @@ def post_tedana( selector.to_files(io_generator) if selector.n_accepted_comps == 0: - LGR.warning("No BOLD components detected! Please check data and results!") + LGR.warning( + "No accepted components remaining after manual classification! Please check data and results!" + ) mmix_orig = mmix.copy() # TODO: make this a function From 133947ffc2d7463d146988695685888d3e5c47e5 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Wed, 4 Jan 2023 14:32:12 -0500 Subject: [PATCH 131/177] fixed debug logging --- tedana/selection/component_selector.py | 21 +++++++-------------- tedana/selection/selection_nodes.py | 10 ++++------ tedana/selection/tedica.py | 4 +--- tedana/workflows/tedana.py | 4 +--- tedana/workflows/tedana_reclassify.py | 1 - 5 files changed, 13 insertions(+), 27 deletions(-) diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index bb8bf8517..b832a42bf 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -229,9 +229,7 @@ class ComponentSelector: a specified `tree` """ - def __init__( - self, tree, component_table, verbose=False, cross_component_metrics={}, status_table=None - ): + def __init__(self, tree, component_table, cross_component_metrics={}, status_table=None): """ Initialize the class using the info specified in the json file `tree` @@ -286,7 +284,6 @@ def __init__( self.__dict__.update(cross_component_metrics) self.cross_component_metrics = cross_component_metrics - self.verbose = verbose # Construct an un-executed selector self.component_table = component_table.copy() @@ -390,15 +387,9 @@ def select(self): kwargs = None all_params = {**params} - if self.verbose: - # If verbose outputs requested, log the function name and parameters used - # This info is already saved in the tree json output files, but adding - # to the screen log output is useful for debugging - LGR.info( - "Step {}: Running function {} with parameters: {}".format( - self.current_node_idx, node["functionname"], all_params - ) - ) + LGR.debug( + f"Step {self.current_node_idx}: Running function {node['functionname']} with parameters: {all_params}" + ) # run the decision node function if kwargs is not None: self = fcn(self, **params, **kwargs) @@ -410,7 +401,9 @@ def select(self): # log the current counts for all classification labels log_classification_counts(self.current_node_idx, self.component_table) - + LGR.debug( + f"Step {self.current_node_idx} Full outputs: {self.tree['nodes'][self.current_node_idx]['outputs']}" + ) # move decision columns to end self.component_table = clean_dataframe(self.component_table) # warning anything called a necessary metric wasn't used and if diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 853d48bb7..56ee41c4c 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1572,12 +1572,10 @@ def calc_varex_kappa_ratio( - np.nanmin(selector.component_table.loc[comps2use, "variance explained"]) ) outputs["kappa_rate"] = kappa_rate - # TODO Was useful for debugging, but unnessary for typical outputs. Maybe add - # back in when verbose tag is used - # LGR.info( - # f"{function_name_idx} Kappa rate found to be {kappa_rate} from components " - # f"{comps2use}" - # ) + LGR.debug( + f"{function_name_idx} Kappa rate found to be {kappa_rate} from components " + f"{comps2use}" + ) # NOTE: kappa_rate is calculated on a subset of components while # "varex kappa ratio" is calculated for all compnents selector.component_table["varex kappa ratio"] = ( diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index 6641ea34d..fa7420b04 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -69,9 +69,7 @@ def automatic_selection(component_table, n_echos, n_vols, tree="kundu", verbose= "n_echos": n_echos, "n_vols": n_vols, } - selector = ComponentSelector( - tree, component_table, verbose=verbose, cross_component_metrics=xcomp - ) + selector = ComponentSelector(tree, component_table, cross_component_metrics=xcomp) selector.select() selector.metadata = collect.get_metadata(selector.component_table) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 222ffe3ae..723124f9d 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -657,9 +657,7 @@ def tedana_workflow( "ICA", metrics=required_metrics, ) - ica_selection = selection.automatic_selection( - comptable, n_echos, n_vols, tree=tree, verbose=verbose - ) + ica_selection = selection.automatic_selection(comptable, n_echos, n_vols, tree=tree) n_LikelyBOLD_comps = ica_selection.n_LikelyBOLD_comps if (n_restarts < maxrestart) and (n_LikelyBOLD_comps == 0): LGR.warning("No BOLD components found. Re-attempting ICA.") diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 6c89e71d2..a20fba172 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -287,7 +287,6 @@ def post_tedana( comptable, cross_component_metrics=xcomp, status_table=status_table, - verbose=debug, ) if accept: From 1273718bc7e6265e6fe3e6c3466cbceed7c78e02 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Wed, 4 Jan 2023 14:57:40 -0500 Subject: [PATCH 132/177] Temporarily turn on force overwrite for redo ICA --- tedana/workflows/tedana.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 723124f9d..b8791abef 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -667,8 +667,12 @@ def tedana_workflow( else: keep_restarting = False + # If we're going to restart, temporarily allow force overwrite + if keep_restarting: + io_generator.force = True RepLGR.disabled = True # Disable the report to avoid duplicate text RepLGR.disabled = False # Re-enable the report after the while loop is escaped + io_generator.force = force # Re-enable original overwrite behavior else: LGR.info("Using supplied mixing matrix from ICA") mixing_file = io_generator.get_name("ICA mixing tsv") From 05e3fc3b0dfbc6f7cf567fa37ffbc5e819e64b25 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Wed, 4 Jan 2023 17:11:11 -0500 Subject: [PATCH 133/177] Fixed I007 divergence --- tedana/resources/decision_trees/kundu.json | 1 + 1 file changed, 1 insertion(+) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 8bb0538d4..47fdb1d5a 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -249,6 +249,7 @@ "op2": ">", "left2": "variance explained", "right2": "varex_upper_thresh", + "right2_scale": "extend_factor", "log_extra_info": "If variance and d_table_scores are high, then reject", "tag_ifTrue": "Less likely BOLD" }, From 6b19cfc6cb0f44c53bd17f00d44ccb2ab6788a4a Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Wed, 4 Jan 2023 21:30:12 -0500 Subject: [PATCH 134/177] calc_varex_thresh now has num_highest_var_comps --- tedana/resources/decision_trees/kundu.json | 2 +- tedana/selection/selection_nodes.py | 42 +++++++++++----------- tedana/tests/test_selection_nodes.py | 24 ++++++------- 3 files changed, 35 insertions(+), 33 deletions(-) diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 47fdb1d5a..7d39eeb84 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -370,7 +370,7 @@ "percentile_thresh": 25 }, "kwargs": { - "num_lowest_var_comps": "num_acc_guess" + "num_highest_var_comps": "num_acc_guess" } }, { diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 56ee41c4c..cb9aeab66 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1137,7 +1137,7 @@ def calc_varex_thresh( decide_comps, thresh_label, percentile_thresh, - num_lowest_var_comps=None, + num_highest_var_comps=None, log_extra_report="", log_extra_info="", custom_node_label="", @@ -1159,8 +1159,8 @@ def calc_varex_thresh( A percentile threshold to apply to components to set the variance threshold. In the original kundu decision tree this was 90 for varex_upper_thresh and 25 for varex_lower_thresh - num_lowest_var_comps: :obj:`str` :obj:`int` - percentile can be calculated on the num_lowest_var_comps components with the + num_highest_var_comps: :obj:`str` :obj:`int` + percentile can be calculated on the num_highest_var_comps components with the lowest variance. Either input an integer directly or input a string that is a parameter stored in selector.cross_component_metrics ("num_acc_guess" in original decision tree). Default=None @@ -1188,7 +1188,7 @@ def calc_varex_thresh( "decision_node_idx": selector.current_node_idx, "node_label": None, varex_name: None, - "num_lowest_var_comps": num_lowest_var_comps, + "num_highest_var_comps": num_highest_var_comps, "used_metrics": set(["variance explained"]), } if ( @@ -1221,28 +1221,28 @@ def calc_varex_thresh( selector.component_table, outputs["used_metrics"], function_name=function_name_idx ) - if num_lowest_var_comps is not None: - if isinstance(num_lowest_var_comps, str): - if num_lowest_var_comps in selector.cross_component_metrics: - num_lowest_var_comps = selector.cross_component_metrics[num_lowest_var_comps] + if num_highest_var_comps is not None: + if isinstance(num_highest_var_comps, str): + if num_highest_var_comps in selector.cross_component_metrics: + num_highest_var_comps = selector.cross_component_metrics[num_highest_var_comps] elif not comps2use: # Note: It is possible the comps2use requested for this function # is not empty, but the comps2use requested to calculate - # {num_lowest_var_comps} was empty. Given the way this node is + # {num_highest_var_comps} was empty. Given the way this node is # used, that's unlikely, but worth a comment. LGR.info( - f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) " + f"{function_name_idx}: num_highest_var_comps ( {num_highest_var_comps}) " "is not in selector.cross_component_metrics, but no components with " f"{decide_comps} remain by this node so nothing happens" ) else: raise ValueError( - f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) " + f"{function_name_idx}: num_highest_var_comps ( {num_highest_var_comps}) " "is not in selector.cross_component_metrics" ) - if not isinstance(num_lowest_var_comps, int) and comps2use: + if not isinstance(num_highest_var_comps, int) and comps2use: raise ValueError( - f"{function_name_idx}: num_lowest_var_comps ( {num_lowest_var_comps}) " + f"{function_name_idx}: num_highest_var_comps ( {num_highest_var_comps}) " "is used as an array index and should be an integer" ) @@ -1264,23 +1264,25 @@ def calc_varex_thresh( decide_comps=decide_comps, ) else: - if num_lowest_var_comps is None: + if num_highest_var_comps is None: outputs[varex_name] = scoreatpercentile( selector.component_table.loc[comps2use, "variance explained"], percentile_thresh ) else: - # Using only the first num_lowest_var_comps components sorted to include + # Using only the first num_highest_var_comps components sorted to include # lowest variance - if num_lowest_var_comps <= len(comps2use): - sorted_varex = np.sort( - (selector.component_table.loc[comps2use, "variance explained"]).to_numpy() + if num_highest_var_comps <= len(comps2use): + sorted_varex = np.flip( + np.sort( + (selector.component_table.loc[comps2use, "variance explained"]).to_numpy() + ) ) outputs[varex_name] = scoreatpercentile( - sorted_varex[:num_lowest_var_comps], percentile_thresh + sorted_varex[:num_highest_var_comps], percentile_thresh ) else: raise ValueError( - f"{function_name_idx}: num_lowest_var_comps ({num_lowest_var_comps})" + f"{function_name_idx}: num_highest_var_comps ({num_highest_var_comps})" f"needs to be <= len(comps2use) ({len(comps2use)})" ) selector.cross_component_metrics[varex_name] = outputs[varex_name] diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index eca37c041..7182f04ea 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -817,13 +817,13 @@ def test_calc_varex_thresh_smoke(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_thresh"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["perc"] == 90 - # Standard call using num_lowest_var_comps as an integer + # Standard call using num_highest_var_comps as an integer selector = selection_nodes.calc_varex_thresh( selector, decide_comps, thresh_label="new_lower", percentile_thresh=25, - num_lowest_var_comps=8, + num_highest_var_comps=8, ) calc_cross_comp_metrics = {"varex_new_lower_thresh", "new_lower_perc"} output_calc_cross_comp_metrics = set( @@ -836,14 +836,14 @@ def test_calc_varex_thresh_smoke(): ) assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["new_lower_perc"] == 25 - # Standard call using num_lowest_var_comps as a value in cross_component_metrics + # Standard call using num_highest_var_comps as a value in cross_component_metrics selector.cross_component_metrics["num_acc_guess"] = 10 selector = selection_nodes.calc_varex_thresh( selector, decide_comps, thresh_label="new_lower", percentile_thresh=25, - num_lowest_var_comps="num_acc_guess", + num_highest_var_comps="num_acc_guess", ) calc_cross_comp_metrics = {"varex_new_lower_thresh", "new_lower_perc"} output_calc_cross_comp_metrics = set( @@ -856,24 +856,24 @@ def test_calc_varex_thresh_smoke(): ) assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["new_lower_perc"] == 25 - # Raise error if num_lowest_var_comps is a string, but not in cross_component_metrics + # Raise error if num_highest_var_comps is a string, but not in cross_component_metrics with pytest.raises(ValueError): selector = selection_nodes.calc_varex_thresh( selector, decide_comps, thresh_label="new_lower", percentile_thresh=25, - num_lowest_var_comps="NotACrossCompMetric", + num_highest_var_comps="NotACrossCompMetric", ) - # Do not raise error if num_lowest_var_comps is a string & not in cross_component_metrics, + # Do not raise error if num_highest_var_comps is a string & not in cross_component_metrics, # but decide_comps doesn't select any components selector = selection_nodes.calc_varex_thresh( selector, decide_comps="NoComponents", thresh_label="new_lower", percentile_thresh=25, - num_lowest_var_comps="NotACrossCompMetric", + num_highest_var_comps="NotACrossCompMetric", ) assert ( selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_new_lower_thresh"] @@ -882,24 +882,24 @@ def test_calc_varex_thresh_smoke(): # percentile_thresh doesn't depend on components and is assigned assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["new_lower_perc"] == 25 - # Raise error if num_lowest_var_comps is not an integer + # Raise error if num_highest_var_comps is not an integer with pytest.raises(ValueError): selector = selection_nodes.calc_varex_thresh( selector, decide_comps, thresh_label="new_lower", percentile_thresh=25, - num_lowest_var_comps=9.5, + num_highest_var_comps=9.5, ) - # Raise error if num_lowest_var_comps is larger than the number of selected components + # Raise error if num_highest_var_comps is larger than the number of selected components with pytest.raises(ValueError): selector = selection_nodes.calc_varex_thresh( selector, decide_comps, thresh_label="new_lower", percentile_thresh=25, - num_lowest_var_comps=55, + num_highest_var_comps=55, ) # Run warning logging code to see if any of the cross_component_metrics From 0b3d1b8829001765022e4aebb371c48169027e5a Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 5 Jan 2023 10:13:23 -0500 Subject: [PATCH 135/177] fixed linting errors --- tedana/selection/component_selector.py | 6 ++++-- tedana/selection/selection_nodes.py | 7 ++++--- tedana/workflows/tedana.py | 2 +- tedana/workflows/tedana_reclassify.py | 3 ++- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index b832a42bf..6805d3d33 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -388,7 +388,8 @@ def select(self): all_params = {**params} LGR.debug( - f"Step {self.current_node_idx}: Running function {node['functionname']} with parameters: {all_params}" + f"Step {self.current_node_idx}: Running function {node['functionname']} " + f"with parameters: {all_params}" ) # run the decision node function if kwargs is not None: @@ -402,7 +403,8 @@ def select(self): # log the current counts for all classification labels log_classification_counts(self.current_node_idx, self.component_table) LGR.debug( - f"Step {self.current_node_idx} Full outputs: {self.tree['nodes'][self.current_node_idx]['outputs']}" + f"Step {self.current_node_idx} Full outputs: " + f"{self.tree['nodes'][self.current_node_idx]['outputs']}" ) # move decision columns to end self.component_table = clean_dataframe(self.component_table) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index cb9aeab66..bf4fa85c6 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1074,9 +1074,10 @@ def dec_classification_doesnt_exist( "node_label" ] = f"Change {decide_comps} to {new_classification} if {class_comp_exists} doesn't exist" else: - outputs[ - "node_label" - ] = f"Change {decide_comps} to {new_classification} if less than {at_least_num_exist} components with {class_comp_exists} exist" + outputs["node_label"] = ( + f"Change {decide_comps} to {new_classification} if less than " + f"{at_least_num_exist} components with {class_comp_exists} exist" + ) LGR.info(f"{function_name_idx}: {outputs['node_label']}") if log_extra_info: diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index b8791abef..6bacbeb30 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -672,7 +672,7 @@ def tedana_workflow( io_generator.force = True RepLGR.disabled = True # Disable the report to avoid duplicate text RepLGR.disabled = False # Re-enable the report after the while loop is escaped - io_generator.force = force # Re-enable original overwrite behavior + io_generator.force = force # Re-enable original overwrite behavior else: LGR.info("Using supplied mixing matrix from ICA") mixing_file = io_generator.get_name("ICA mixing tsv") diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index a20fba172..364739d6f 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -320,7 +320,8 @@ def post_tedana( if selector.n_accepted_comps == 0: LGR.warning( - "No accepted components remaining after manual classification! Please check data and results!" + "No accepted components remaining after manual classification! " + "Please check data and results!" ) mmix_orig = mmix.copy() From 05a6b1b5603a4ccf184d650157c1851c9b8a16fb Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 5 Jan 2023 17:42:19 -0500 Subject: [PATCH 136/177] Update integration test data --- tedana/tests/test_integration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index f62e319b2..95f0bc0a8 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -86,7 +86,7 @@ def reclassify_path() -> str: def reclassify_raw() -> str: - return os.path.join(reclassify_path(), "TED.three-echo-previous") + return os.path.join(reclassify_path(), "TED.three-echo") def reclassify_raw_registry() -> str: @@ -95,7 +95,7 @@ def reclassify_raw_registry() -> str: def reclassify_url() -> str: """Get the URL to reclassify test data.""" - return "https://osf.io/mt59n/download" + return "https://osf.io/f6g45/download" def guarantee_reclassify_data() -> None: From 50ba4a878ae3acaa5f8bcf0d5adc82fec06ffbca Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 5 Jan 2023 20:21:41 -0500 Subject: [PATCH 137/177] Adds csv and text file reading for manual acc/rej --- tedana/io.py | 85 +++++++++++++++++++++++++++ tedana/tests/test_io.py | 49 +++++++++++++++ tedana/workflows/tedana_reclassify.py | 45 ++++++++++++-- 3 files changed, 173 insertions(+), 6 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index aca75f377..b93fe828c 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -10,6 +10,7 @@ import os.path as op from copy import deepcopy from string import Formatter +from typing import List import nibabel as nib import numpy as np @@ -23,6 +24,13 @@ LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") +ALLOWED_COMPONENT_DELIMITERS = ( + "\t", + "\n", + " ", + ",", +) + class CustomEncoder(json.JSONEncoder): """Class for converting some types because of JSON serialization and numpy @@ -830,3 +838,80 @@ def prep_data_for_json(d) -> dict: # comment line as an elif block d[k] = v return d + + +def str_to_component_list(s: str) -> List[int]: + """Convert a string to a list of component indices. + + Parameters + ---------- + s: str + The string to convert into a list of component indices. + + Returns + ------- + List[int] of component indices. + + Raises + ------ + ValueError, if the string cannot be split by an allowed delimeter + """ + # Strip off newline at end in case we've been given a one-line file + if s[-1] == "\n": + s = s[:-1] + + # Search across all allowed delimiters for a match + for d in ALLOWED_COMPONENT_DELIMITERS: + possible_list = s.split(d) + if len(possible_list) > 1: + # We have a likely hit + # Check to see if extra delimeter at end and get rid of it + if possible_list[-1] == "": + possible_list = possible_list[:-1] + break + elif len(possible_list) == 1 and possible_list[0].isnumeric(): + # We have a likely hit and there is just one component + break + # Make sure we can actually convert this split list into an integer + # Crash with a sensible error if not + for x in possible_list: + try: + int(x) + except ValueError: + raise ValueError( + "While parsing component list, failed to convert to int." + f' Offending element is "{x}", offending string is "{s}".' + ) + + return [int(x) for x in possible_list] + + +def fname_to_component_list(fname: str) -> List[int]: + """Read a file of component indices. + + Parameters + ---------- + fname: str + The name of the file to read the list of component indices from. + + Returns + ------- + List[int] of component indices. + + Raises + ------ + ValueError, if the string cannot be split by an allowed delimeter or the + csv file cannot be interpreted. + """ + if fname[-3:] == "csv": + contents = pd.read_csv(fname) + columns = contents.columns + if len(columns) == 2 and "0" in columns: + return contents["0"].tolist() + elif len(columns) >= 2 and "Components" in columns: + return contents["Components"].tolist() + else: + raise ValueError(f"Cannot determine a components column in file {fname}") + with open(fname, "r") as fp: + contents = fp.read() + return str_to_component_list(contents) diff --git a/tedana/tests/test_io.py b/tedana/tests/test_io.py index 5b59758aa..7af393ac5 100644 --- a/tedana/tests/test_io.py +++ b/tedana/tests/test_io.py @@ -218,3 +218,52 @@ def test_prep_data_for_json(): } new_d = me.prep_data_for_json(d) assert isinstance(new_d["dictionary"]["array"], list) + + +def test_str_to_component_list(): + """ + Tests for converting a string to a component list + """ + int_list_1 = [1] + int_list_2 = [1, 4, 5] + test_list_1 = [str(x) for x in int_list_1] + test_list_2 = [str(x) for x in int_list_2] + delims_to_test = ( + "\t", + "\n", + " ", + ",", + ) + for d in delims_to_test: + test_data = d.join(test_list_1) + assert me.str_to_component_list(test_data) == int_list_1 + test_data = d.join(test_list_2) + assert me.str_to_component_list(test_data) == int_list_2 + + # Test that one-line, one-element works + assert me.str_to_component_list("1\n") == [1] + # Test that one-line, multi-element works + assert me.str_to_component_list("1,1\n") == [1, 1] + # Test that extra delimeter is ignored + assert me.str_to_component_list("1,1,") == [1, 1] + + with pytest.raises(ValueError, match=r"While parsing component"): + me.str_to_component_list("1,2\t") + + +def test_fname_to_component_list(): + test_data = [1, 2, 3] + temp_csv_fname = os.path.join(data_dir, "test.csv") + df = pd.DataFrame(data=test_data) + df.to_csv(path_or_buf=temp_csv_fname) + result = me.fname_to_component_list(temp_csv_fname) + os.remove(temp_csv_fname) + assert result == test_data + + temp_txt_fname = os.path.join(data_dir, "test.txt") + with open(temp_txt_fname, "w") as fp: + fp.write("1,1,") + + result = me.fname_to_component_list(temp_txt_fname) + os.remove(temp_txt_fname) + assert result == [1, 1] diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 364739d6f..5dc46b047 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -14,6 +14,11 @@ import tedana.gscontrol as gsc from tedana import __version__, io, reporting, selection, utils from tedana.bibtex import get_description_references +from tedana.io import ( + ALLOWED_COMPONENT_DELIMITERS, + fname_to_component_list, + str_to_component_list, +) LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") @@ -32,15 +37,23 @@ def _main(): "--manacc", dest="manual_accept", nargs="+", - type=int, - help="Component indices to accept (zero-indexed).", + help=( + "Component indices to accept (zero-indexed)." + "Supply as a comma-delimited liist with no spaces, " + "as a csv file, or as a text file with an allowed " + f"delimiter {repr(ALLOWED_COMPONENT_DELIMITERS)}." + ), ) parser.add_argument( "--manrej", dest="manual_reject", nargs="+", - type=int, - help="Component indices to reject (zero-indexed).", + help=( + "Component indices to accept (zero-indexed)." + "Supply as a comma-delimited liist with no spaces, " + "as a csv file, or as a text file with an allowed " + f"delimiter {repr(ALLOWED_COMPONENT_DELIMITERS)}." + ), ) parser.add_argument( "--config", @@ -120,11 +133,31 @@ def _main(): args = parser.parse_args() + if not args.manual_accept: + manual_accept = [] + elif len(args.manual_accept) > 1: + # We should assume that this is a list of integers + manual_accept = [int(x) for x in args.manual_accept] + elif op.exists(args.manual_accept): + manual_accept = fname_to_component_list(args.manual_accept) + else: + manual_accept = str_to_component_list(args.manual_accept) + + if not args.manual_reject: + manual_reject = [] + elif len(args.manual_reject) > 1: + # We should assume that this is a list of integers + manual_reject = [int(x) for x in args.manual_reject] + elif op.exists(args.manual_reject): + manual_reject = fname_to_component_list(args.manual_reject) + else: + manual_reject = str_to_component_list(args.manual_reject) + # Run post-tedana post_tedana( args.registry, - accept=args.manual_accept, - reject=args.manual_reject, + accept=manual_accept, + reject=manual_reject, out_dir=args.out_dir, config=args.config, prefix=args.prefix, From a2ec444646473f126969252a37dd32f6456eb73c Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 6 Jan 2023 13:59:44 -0500 Subject: [PATCH 138/177] Add tests for CustomEncoder --- tedana/tests/test_io.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tedana/tests/test_io.py b/tedana/tests/test_io.py index 7af393ac5..19062babd 100644 --- a/tedana/tests/test_io.py +++ b/tedana/tests/test_io.py @@ -2,6 +2,7 @@ Tests for tedana.io """ +import json import os import nibabel as nib @@ -267,3 +268,32 @@ def test_fname_to_component_list(): result = me.fname_to_component_list(temp_txt_fname) os.remove(temp_txt_fname) assert result == [1, 1] + + +def test_CustomEncoder(): + """ + Test the encoder we use for JSON incompatibilities + """ + # np int64 + test_data = {"data": np.int64(4)} + encoded = json.dumps(test_data, cls=me.CustomEncoder) + decoded = json.loads(encoded) + assert test_data == decoded + + # np array + test_data = {"data": np.asarray([1, 2, 3])} + encoded = json.dumps(test_data, cls=me.CustomEncoder) + decoded = json.loads(encoded) + assert np.array_equal(test_data["data"], decoded["data"]) + + # set should become list + test_data = {"data": set(["cat", "dog", "fish"])} + encoded = json.dumps(test_data, cls=me.CustomEncoder) + decoded = json.loads(encoded) + assert list(test_data["data"]) == decoded["data"] + + # no special cases should use standard encoder + test_data = {"pet": "dog"} + encoded = json.dumps(test_data, cls=me.CustomEncoder) + decoded = json.loads(encoded) + assert test_data == decoded From 24e3f73773b9b0719a20fcd97b4ddffffb040b66 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 6 Jan 2023 14:17:01 -0500 Subject: [PATCH 139/177] Adds bibtex warning check test --- tedana/tests/test_bibtex.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 tedana/tests/test_bibtex.py diff --git a/tedana/tests/test_bibtex.py b/tedana/tests/test_bibtex.py new file mode 100644 index 000000000..dfb2712fd --- /dev/null +++ b/tedana/tests/test_bibtex.py @@ -0,0 +1,13 @@ +"""Tests for bibtex""" + +import logging + +import pytest + +from tedana import bibtex + +def test_warn_no_citation_found(caplog): + citations = ["Nonexistent et al, 0 AD"] + ref_list = [] + bibtex.reduce_references(citations, ref_list) + assert f"Citation {citations[0]} not found." in caplog.text From 922f33699f15ff1f9fb0b8aabea8f5895a7c98cf Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 6 Jan 2023 14:17:57 -0500 Subject: [PATCH 140/177] Appease linter --- tedana/tests/test_bibtex.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tedana/tests/test_bibtex.py b/tedana/tests/test_bibtex.py index dfb2712fd..885593ba7 100644 --- a/tedana/tests/test_bibtex.py +++ b/tedana/tests/test_bibtex.py @@ -1,11 +1,8 @@ """Tests for bibtex""" -import logging - -import pytest - from tedana import bibtex + def test_warn_no_citation_found(caplog): citations = ["Nonexistent et al, 0 AD"] ref_list = [] From a2524bc7afb1b143a9b1313d604007eaf2bc439b Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 6 Jan 2023 14:36:10 -0500 Subject: [PATCH 141/177] Fix unused metrics warning --- tedana/selection/component_selector.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index 6805d3d33..2cf493244 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -119,10 +119,14 @@ def validate_tree(tree): # Warn if unused fields exist unused_keys = set(tree.keys()) - set(tree_expected_keys) - set(["used_metrics"]) - # Make sure reconstruct_from doesn't trigger a warning; hacky, sorry - if "reconstruct_from" in unused_keys: - unused_keys.remove("reconstruct_from") - + # Make sure some fields don't trigger a warning; hacky, sorry + ok_to_not_use = ( + "reconstruct_from", + "generated_metrics", + ) + for k in ok_to_not_use: + if k in unused_keys: + unused_keys.remove(k) if unused_keys: LGR.warning(f"Decision tree includes fields that are not used or logged {unused_keys}") @@ -472,10 +476,7 @@ def are_only_necessary_metrics_used(self): used and if any used_metrics weren't explicitly declared necessary. If either of these happen, a warning is added to the logger """ - if "generated_metrics" in self.tree.keys(): - necessary_metrics = set(self.tree["generated_metrics"]) | self.necessary_metrics - else: - necessary_metrics = self.necessary_metrics + necessary_metrics = self.necessary_metrics not_declared = self.tree["used_metrics"] - necessary_metrics not_used = necessary_metrics - self.tree["used_metrics"] if len(not_declared) > 0: From 563060b86da81963ae5f6f444c4f7c06205230d2 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Fri, 6 Jan 2023 15:01:30 -0500 Subject: [PATCH 142/177] Add reclassify tests and patches to test failures --- tedana/tests/test_integration.py | 91 ++++++++++++++++++++++++++- tedana/workflows/tedana_reclassify.py | 16 +++-- 2 files changed, 99 insertions(+), 8 deletions(-) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 95f0bc0a8..5cb6c738b 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -269,9 +269,45 @@ def test_integration_reclassify_insufficient_args(skip_integration): assert result.returncode != 0 -def test_integration_reclassify_quiet(skip_integration): +def test_integration_reclassify_quiet_csv(skip_integration): if skip_integration: - pytest.skip("Skip reclassify quiet") + pytest.skip("Skip reclassify quiet csv") + + guarantee_reclassify_data() + out_dir = os.path.join(reclassify_path(), "quiet") + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + # Make some files that have components to manually accept and reject + to_accept = [i for i in range(3)] + to_reject = [i for i in range(7, 4)] + acc_df = pd.DataFrame(data=to_accept, columns=["Components"]) + rej_df = pd.DataFrame(data=to_reject, columns=["Components"]) + acc_csv_fname = os.path.join(reclassify_raw(), "accept.csv") + rej_csv_fname = os.path.join(reclassify_raw(), "reject.csv") + acc_df.to_csv(acc_csv_fname) + rej_df.to_csv(rej_csv_fname) + + args = [ + "tedana_reclassify", + "--manacc", + acc_csv_fname, + "--manrej", + rej_csv_fname, + "--out-dir", + out_dir, + os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + ] + + results = subprocess.run(args, capture_output=True) + assert results.returncode == 0 + fn = resource_filename("tedana", "tests/data/reclassify_quiet_out.txt") + check_integration_outputs(fn, out_dir) + + +def test_integration_reclassify_quiet_spaces(skip_integration): + if skip_integration: + pytest.skip("Skip reclassify quiet space-delimited integers") guarantee_reclassify_data() out_dir = os.path.join(reclassify_path(), "quiet") @@ -299,6 +335,32 @@ def test_integration_reclassify_quiet(skip_integration): check_integration_outputs(fn, out_dir) +def test_integration_reclassify_quiet_string(skip_integration): + if skip_integration: + pytest.skip("Skip reclassify quiet string of integers") + + guarantee_reclassify_data() + out_dir = os.path.join(reclassify_path(), "quiet") + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + args = [ + "tedana_reclassify", + "--manacc", + "1,2,3", + "--manrej", + "4,5,6,", + "--out-dir", + out_dir, + os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + ] + + results = subprocess.run(args, capture_output=True) + assert results.returncode == 0 + fn = resource_filename("tedana", "tests/data/reclassify_quiet_out.txt") + check_integration_outputs(fn, out_dir) + + def test_integration_reclassify_debug(skip_integration): if skip_integration: pytest.skip("Skip reclassify debug") @@ -405,6 +467,31 @@ def test_integration_reclassify_no_bold(skip_integration, caplog): check_integration_outputs(fn, out_dir) +def test_integration_reclassify_accrej_files(skip_integration, caplog): + if skip_integration: + pytest.skip("Skip reclassify both rejected and accepted") + + guarantee_reclassify_data() + out_dir = os.path.join(reclassify_path(), "no_bold") + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + ioh = InputHarvester(reclassify_raw_registry()) + comptable = ioh.get_file_contents("ICA metrics tsv") + to_accept = [i for i in range(len(comptable))] + + post_tedana( + reclassify_raw_registry(), + reject=to_accept, + out_dir=out_dir, + no_reports=True, + ) + assert "No accepted components remaining after manual classification!" in caplog.text + + fn = resource_filename("tedana", "tests/data/reclassify_no_bold.txt") + check_integration_outputs(fn, out_dir) + + def test_integration_t2smap(skip_integration): """Integration test of the full t2smap workflow using five-echo test data""" if skip_integration: diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 5dc46b047..6cfaaef76 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -138,20 +138,24 @@ def _main(): elif len(args.manual_accept) > 1: # We should assume that this is a list of integers manual_accept = [int(x) for x in args.manual_accept] - elif op.exists(args.manual_accept): - manual_accept = fname_to_component_list(args.manual_accept) + elif op.exists(args.manual_accept[0]): + # filename was given + manual_accept = fname_to_component_list(args.manual_accept[0]) else: - manual_accept = str_to_component_list(args.manual_accept) + # arbitrary string was given, length of list is 1 + manual_accept = str_to_component_list(args.manual_accept[0]) if not args.manual_reject: manual_reject = [] elif len(args.manual_reject) > 1: # We should assume that this is a list of integers manual_reject = [int(x) for x in args.manual_reject] - elif op.exists(args.manual_reject): - manual_reject = fname_to_component_list(args.manual_reject) + elif op.exists(args.manual_reject[0]): + # filename was given + manual_reject = fname_to_component_list(args.manual_reject[0]) else: - manual_reject = str_to_component_list(args.manual_reject) + # arbitrary string + manual_reject = str_to_component_list(args.manual_reject[0]) # Run post-tedana post_tedana( From 6d6eda485132dca57d37b114eee8ad90d22a0c59 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Tue, 10 Jan 2023 17:08:45 -0500 Subject: [PATCH 143/177] Make stylistic changes. --- docs/building decision trees.rst | 16 ++-- tedana/selection/component_selector.py | 75 ++++++++-------- tedana/selection/selection_nodes.py | 85 +++++++++---------- tedana/selection/selection_utils.py | 108 +++++++++++------------- tedana/selection/tedica.py | 18 ++-- tedana/selection/tedpca.py | 3 +- tedana/tests/test_component_selector.py | 4 +- tedana/workflows/tedana.py | 8 +- 8 files changed, 147 insertions(+), 170 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index b81a57fbc..a4137430c 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -45,13 +45,13 @@ The file key names are used below the full file names in the .. _ComponentSelector object: generated/tedana.selection.component_selector.ComponentSelector.html .. _output file descriptions: output_file_descriptions.html -**General outputs from component selection** +**General outputs from component selection** New columns in ``selector.component_table`` and the "ICA metrics tsv" file: classification: While the decision table is running, there may also be intermediate - classification labels, but the final labels are expected to be + classification labels, but the final labels are expected to be "accepted" or "rejected". There will be a warning if other labels remain. classification_tags: @@ -61,7 +61,7 @@ New columns in ``selector.component_table`` and the "ICA metrics tsv" file: for visualizing and reviewing results ``selector.cross_component_metrics`` and "ICA cross component metrics json": - A dictionary of metrics that are each a single value calculated across components. + A dictionary of metrics that are each a single value calculated across components. For example, kappa and rho elbows. User or pre-defined scaling factors are also be stored here. Any constant that is used in the component classification processes that isn't pre-defined in the decision tree file should be saved here. @@ -109,7 +109,7 @@ decison_node_idx: The decision tree functions are run as part of an ordered list. This is the positional index the location of the function in the list, starting with index 0. - + used_metrics: A list of the metrics used in a node of the decision tree @@ -179,14 +179,14 @@ in `selection_nodes.py`_ There are several fields with general information. Some of these store general information that's useful for reporting results and others store information that Are used to checks whether results are plausible & can help avoid mistakes - + tree_id: A descriptive name for the tree that will be logged. info: A brief description of the tree for info logging - report: + report: A narrative description of the tree that could be used in report logging refs: @@ -290,7 +290,7 @@ There are several expectations for selection functions that are necessary for th properly execute. In `selection_nodes.py`_, ``manual_classify``, ``dec_left_op_right``, and ``calc_kappa_rho_elbows_kundu`` are good examples for how to meet these expectations. -Create a dictionary called "outputs" that includes key fields that should be recorded. +Create a dictionary called "outputs" that includes key fields that should be recorded. The following line should be at the end of each function to retain the output info: ``selector.nodes[selector.current_node_idx]["outputs"] = outputs`` @@ -371,7 +371,7 @@ Every function should end with: selector.nodes[selector.current_node_idx]["outputs"] = outputs return selector - functionname.__doc__ = (functionname.__doc__.format(**decision_docs)) + functionname.__doc__ = (functionname.__doc__.format(**DECISION_DOCS)) This makes sure the outputs from the function are saved in the class structure and the class structure is returned. The following line should include the function's name and diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index 2cf493244..8453e9af1 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -38,9 +38,7 @@ class TreeError(Exception): def load_config(tree): - """ - Loads the json file with the decision tree and validates that the - fields in the decision tree are appropriate. + """Load the json file with the decision tree and validate the fields in the decision tree. Parameters ---------- @@ -75,8 +73,7 @@ def load_config(tree): def validate_tree(tree): - """ - Confirms that provided `tree` is a valid decision tree + """Confirm that provided `tree` is a valid decision tree. Parameters ---------- @@ -228,44 +225,39 @@ def validate_tree(tree): class ComponentSelector: - """ - Contains information and methods to load and classify components based on - a specified `tree` - """ + """Load and classify components based on a specified ``tree``.""" def __init__(self, tree, component_table, cross_component_metrics={}, status_table=None): - """ - Initialize the class using the info specified in the json file `tree` + """Initialize the class using the info specified in the json file ``tree``. Parameters ---------- tree : :obj:`str` - The named tree or path to a JSON file that defines one + The named tree or path to a JSON file that defines one. component_table : (C x M) :obj:`pandas.DataFrame` Component metric table. One row for each component, with a column for - each metric; the index should be the component number + each metric; the index should be the component number. cross_component_metrics : :obj:`dict` Metrics that are each a single value calculated across components. - Default is empty + Default is empty dictionary. status_table : :obj:`pandas.DataFrame` A table tracking the status of each component at each step. Pass a status table if running additional steps on a decision tree that was already executed. Default=None. - Notes ----- - Initializing the `ComponentSelector` confirms tree is valid and - loads all information in the tree json file into `ComponentSelector` + Initializing the ``ComponentSelector`` confirms tree is valid and + loads all information in the tree json file into ``ComponentSelector``. - Adds to the `ComponentSelector`: + Adds to the ``ComponentSelector``: - component_status_table: empty dataframe or contents of inputted status_table - cross_component_metrics: empty dict or contents of inputed values - used_metrics: empty set Any parameter that is used by a decision tree node function can be passed - as a parameter in the `ComponentSelector` initialization or can be + as a parameter in the ``ComponentSelector`` initialization or can be included in the json file that defines the decision tree. If a parameter is set in the json file, that will take precedence. As a style rule, a parameter that is the same regardless of the inputted data should be @@ -281,7 +273,7 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab Required for kundu tree An example initialization with these options would look like - `selector = ComponentSelector(tree, comptable, n_echos=n_echos, n_vols=n_vols)` + ``selector = ComponentSelector(tree, comptable, n_echos=n_echos, n_vols=n_vols)`` """ self.tree_name = tree @@ -331,8 +323,9 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab self.component_status_table = status_table def select(self): - """ - Using the validated tree in `ComponentSelector` to run the decision + """Apply the decision tree to data. + + Using the validated tree in ``ComponentSelector`` to run the decision tree functions to calculate cross_component metrics and classify each component as accepted or rejected. @@ -410,6 +403,7 @@ def select(self): f"Step {self.current_node_idx} Full outputs: " f"{self.tree['nodes'][self.current_node_idx]['outputs']}" ) + # move decision columns to end self.component_table = clean_dataframe(self.component_table) # warning anything called a necessary metric wasn't used and if @@ -419,14 +413,13 @@ def select(self): self.are_all_components_accepted_or_rejected() def add_manual(self, indices, classification): - """ - Add nodes that will manually classify components + """Add nodes that will manually classify components. Parameters ---------- - indices: :obj:`list[int]` + indices : :obj:`list[int]` The indices to manually classify - classification: :obj:`str` + classification : :obj:`str` The classification to set the nodes to (i.e. accepted or rejected) """ self.tree["nodes"].append( @@ -487,7 +480,7 @@ def are_only_necessary_metrics_used(self): if len(not_used) > 0: LGR.warning( f"Decision tree {self.tree_name} did not use the following metrics " - "that were declared as necessary: {not_used}" + f"that were declared as necessary: {not_used}" ) def are_all_components_accepted_or_rejected(self): @@ -513,24 +506,24 @@ def n_comps(self): return len(self.component_table) @property - def LikelyBOLD_comps(self): - """A boolean pd.DataSeries of components that are tagged "Likely BOLD".""" - LikelyBOLD_comps = self.component_table["classification_tags"].copy() - for idx in range(len(LikelyBOLD_comps)): - if "Likely BOLD" in LikelyBOLD_comps.loc[idx]: - LikelyBOLD_comps.loc[idx] = True + def likely_bold_comps(self): + """A boolean :obj:`pandas.Series` of components that are tagged "Likely BOLD".""" + likely_bold_comps = self.component_table["classification_tags"].copy() + for idx in range(len(likely_bold_comps)): + if "Likely BOLD" in likely_bold_comps.loc[idx]: + likely_bold_comps.loc[idx] = True else: - LikelyBOLD_comps.loc[idx] = False - return LikelyBOLD_comps + likely_bold_comps.loc[idx] = False + return likely_bold_comps @property - def n_LikelyBOLD_comps(self): + def n_likely_bold_comps(self): """The number of components that are tagged "Likely BOLD".""" - return self.LikelyBOLD_comps.sum() + return self.likely_bold_comps.sum() @property def accepted_comps(self): - """A boolean pd.DataSeries of components that are accepted.""" + """A boolean :obj:`pandas.Series` of components that are accepted.""" return self.component_table["classification"] == "accepted" @property @@ -540,15 +533,15 @@ def n_accepted_comps(self): @property def rejected_comps(self): - """A boolean pd.DataSeries of components that are rejected.""" + """A boolean :obj:`pandas.Series` of components that are rejected.""" return self.component_table["classification"] == "rejected" def to_files(self, io_generator): - """Convert this selector into component files + """Convert this selector into component files. Parameters ---------- - io_generator: :obj:`tedana.io.OutputGenerator` + io_generator : :obj:`tedana.io.OutputGenerator` The output generator to use for filename generation and saving. """ io_generator.save_file(self.component_table, "ICA metrics tsv") diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index bf4fa85c6..57b60a4d5 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -18,53 +18,51 @@ selectcomps2use, ) -# from scipy import stats - LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") RefLGR = logging.getLogger("REFERENCES") -decision_docs = { +DECISION_DOCS = { "selector": """\ -selector: :obj:`tedana.selection.component_selector.ComponentSelector` +selector : :obj:`tedana.selection.component_selector.ComponentSelector` The selector to perform decision tree-based component selection with.""", "ifTrueFalse": """\ -ifTrue: :obj:`str` +ifTrue : :obj:`str` If the condition in this step is True, give the component classification this label. Use 'nochange' if no label changes are desired. - ifFalse: :obj:`str` + ifFalse : :obj:`str` If the condition in this step is False, give the component classification this label. Use 'nochange' to indicate if no label changes are desired. """, "decide_comps": """\ -decide_comps: :obj:`str` or :obj:`list[str]` +decide_comps : :obj:`str` or :obj:`list[str]` What classification(s) to operate on. using default or intermediate_classification labels. For example: decide_comps='unclassified' means to operate only on unclassified components. Use 'all' to include all components.""", "log_extra_report": """\ -log_extra_report: :obj:`str` +log_extra_report : :obj:`str` Additional text to the report log. Default="".""", "log_extra_info": """\ -log_extra_info: :obj:`str` +log_extra_info : :obj:`str` Additional text to the information log. Default="".""", "only_used_metrics": """\ -only_used_metrics: :obj:`bool` +only_used_metrics : :obj:`bool` If True, only return the component_table metrics that would be used. Default=False.""", "custom_node_label": """\ -custom_node_label: :obj:`str` +custom_node_label : :obj:`str` A short label to describe what happens in this step. If "" then a label is automatically generated. Default="".""", "tag_ifTrueFalse": """\ -tag_ifTrue: :obj:`str` +tag_ifTrue : :obj:`str` The classification tag to apply if a component is classified True. Default="". - tag_ifFalse: :obj:`str` + tag_ifFalse : :obj:`str` The classification tag to apply if a component is classified False. Default="".""", "basicreturns": """\ -selector: :obj:`tedana.selection.component_selector.ComponentSelector` +selector : :obj:`tedana.selection.component_selector.ComponentSelector` If only_used_metrics is False, the updated selector is returned - used_metrics: :obj:`set(str)` + used_metrics : :obj:`set(str)` If only_used_metrics is True, the names of the metrics used in the function are returned""", } @@ -82,28 +80,27 @@ def manual_classify( tag=None, dont_warn_reclassify=False, ): - """ - Explicitly assign a classification, defined in new_classification, - to all the components in decide_comps. + """Assign a classification, defined in ``new_classification``, to the components in + ``decide_comps``. Parameters ---------- {selector} {decide_comps} - new_classification: :obj:`str` + new_classification : :obj:`str` Assign all components identified in decide_comps the classification in new_classification. Options are 'unclassified', 'accepted', 'rejected', or intermediate_classification labels predefined in the decision tree - clear_classification_tags: :obj:`bool` + clear_classification_tags : :obj:`bool` If True, reset all values in the 'classification_tags' column to empty strings. This also can create the classification_tags column if it does not already exist. If False, do nothing. - tag: :obj:`str` + tag : :obj:`str` A classification tag to assign to all components being reclassified. This should be one of the tags defined by classification_tags in the decision tree specification - dont_warn_reclassify: :obj:`bool` + dont_warn_reclassify : :obj:`bool` By default, if this function changes a component classification from accepted or rejected to something else, it gives a warning, since those should be terminal classifications. If this is True, that warning is suppressed. @@ -114,23 +111,22 @@ def manual_classify( {custom_node_label} {only_used_metrics} - Returns ------- {basicreturns} Note ---- - This was designed with three use - cases in mind: (1) Set the classifications of all components to unclassified + This was designed with three use cases in mind: + (1) Set the classifications of all components to unclassified for the first node of a decision tree. clear_classification_tags=True is - recommended for this use case. (2) Shift all components between classifications, - such as provisionalaccept to accepted for the penultimate node in the decision tree. + recommended for this use case. + (2) Shift all components between classifications, such as provisionalaccept to accepted for the + penultimate node in the decision tree. (3) Manually re-classify components by number based on user observations. - Unlike other decision node functions, ifTrue and ifFalse are not inputs - since the same classification is assigned to all components listed in - decide_comps + Unlike other decision node functions, ``ifTrue`` and ``ifFalse`` are not inputs + since the same classification is assigned to all components listed in ``decide_comps``. """ # predefine all outputs that should be logged @@ -176,8 +172,6 @@ def manual_classify( tag_ifTrue=tag, dont_warn_reclassify=dont_warn_reclassify, ) - # outputs["numTrue"] = decision_boolean.sum() - # outputs["numFalse"] = np.logical_not(decision_boolean).sum() log_decision_tree_step( function_name_idx, @@ -197,7 +191,7 @@ def manual_classify( return selector -manual_classify.__doc__ = manual_classify.__doc__.format(**decision_docs) +manual_classify.__doc__ = manual_classify.__doc__.format(**DECISION_DOCS) def dec_left_op_right( @@ -227,8 +221,7 @@ def dec_left_op_right( tag_ifTrue=None, tag_ifFalse=None, ): - """ - Performs a relational comparison. + """Perform a relational comparison. Parameters ---------- @@ -517,7 +510,7 @@ def parse_vals(val): return selector -dec_left_op_right.__doc__ = dec_left_op_right.__doc__.format(**decision_docs) +dec_left_op_right.__doc__ = dec_left_op_right.__doc__.format(**DECISION_DOCS) def dec_variance_lessthan_thresholds( @@ -642,7 +635,7 @@ def dec_variance_lessthan_thresholds( dec_variance_lessthan_thresholds.__doc__ = dec_variance_lessthan_thresholds.__doc__.format( - **decision_docs + **DECISION_DOCS ) @@ -743,7 +736,7 @@ def calc_median( return selector -calc_median.__doc__ = calc_median.__doc__.format(**decision_docs) +calc_median.__doc__ = calc_median.__doc__.format(**DECISION_DOCS) def calc_kappa_elbow( @@ -846,7 +839,7 @@ def calc_kappa_elbow( return selector -calc_kappa_elbow.__doc__ = calc_kappa_elbow.__doc__.format(**decision_docs) +calc_kappa_elbow.__doc__ = calc_kappa_elbow.__doc__.format(**DECISION_DOCS) def calc_rho_elbow( @@ -994,7 +987,7 @@ def calc_rho_elbow( return selector -calc_rho_elbow.__doc__ = calc_rho_elbow.__doc__.format(**decision_docs) +calc_rho_elbow.__doc__ = calc_rho_elbow.__doc__.format(**DECISION_DOCS) def dec_classification_doesnt_exist( @@ -1129,7 +1122,7 @@ def dec_classification_doesnt_exist( dec_classification_doesnt_exist.__doc__ = dec_classification_doesnt_exist.__doc__.format( - **decision_docs + **DECISION_DOCS ) @@ -1295,7 +1288,7 @@ def calc_varex_thresh( return selector -calc_varex_thresh.__doc__ = calc_varex_thresh.__doc__.format(**decision_docs) +calc_varex_thresh.__doc__ = calc_varex_thresh.__doc__.format(**DECISION_DOCS) def calc_extend_factor( @@ -1370,7 +1363,7 @@ def calc_extend_factor( return selector -calc_extend_factor.__doc__ = calc_extend_factor.__doc__.format(**decision_docs) +calc_extend_factor.__doc__ = calc_extend_factor.__doc__.format(**DECISION_DOCS) def calc_max_good_meanmetricrank( @@ -1479,7 +1472,7 @@ def calc_max_good_meanmetricrank( return selector -calc_max_good_meanmetricrank.__doc__ = calc_max_good_meanmetricrank.__doc__.format(**decision_docs) +calc_max_good_meanmetricrank.__doc__ = calc_max_good_meanmetricrank.__doc__.format(**DECISION_DOCS) def calc_varex_kappa_ratio( @@ -1600,7 +1593,7 @@ def calc_varex_kappa_ratio( return selector -calc_varex_kappa_ratio.__doc__ = calc_varex_kappa_ratio.__doc__.format(**decision_docs) +calc_varex_kappa_ratio.__doc__ = calc_varex_kappa_ratio.__doc__.format(**DECISION_DOCS) def calc_revised_meanmetricrank_guesses( @@ -1798,7 +1791,7 @@ def calc_revised_meanmetricrank_guesses( calc_revised_meanmetricrank_guesses.__doc__ = calc_revised_meanmetricrank_guesses.__doc__.format( - **decision_docs + **DECISION_DOCS ) # NOTE: to debug any documentation rendering, I recommend the following hack: diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index c1ad69642..864fce092 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -18,15 +18,13 @@ def selectcomps2use(selector, decide_comps): - """ - Get a list of component numbers that fit the classification types in - decide_comps. + """Get a list of component numbers that fit the classification types in ``decide_comps``. Parameters ---------- - selector: :obj:`tedana.selection.component_selector.ComponentSelector` + selector : :obj:`~tedana.selection.component_selector.ComponentSelector` Only uses the component_table in this object - decide_comps: :obj:`str` or :obj:`list[str]` or :obj:`list[int]` + decide_comps : :obj:`str` or :obj:`list[str]` or :obj:`list[int]` This is string or a list of strings describing what classifications of components to operate on, using default or intermediate_classification labels. For example: decide_comps='unclassified' means to operate only on @@ -36,7 +34,7 @@ def selectcomps2use(selector, decide_comps): Returns ------- - comps2use: :obj:`list[int]` + comps2use : :obj:`list[int]` A list of component indices with classifications included in decide_comps """ @@ -104,30 +102,30 @@ def change_comptable_classifications( Parameters ---------- - selector: :obj:`tedana.selection.component_selector.ComponentSelector` + selector : :obj:`tedana.selection.component_selector.ComponentSelector` The attributes used are component_table, component_status_table, and current_node_idx - ifTrue, ifFalse: :obj:`str` + ifTrue, ifFalse : :obj:`str` If the condition in this step is true or false, give the component the label in this string. Options are 'accepted', 'rejected', 'nochange', or intermediate_classification labels predefined in the decision tree. If 'nochange' then don't change the current component classification - decision_boolean: :obj:`pd.Series(bool)` + decision_boolean : :obj:`pd.Series(bool)` A dataframe column of equal length to component_table where each value is True or False. - tag_ifTrue, tag_ifFalse: :obj:`str` + tag_ifTrue, tag_ifFalse : :obj:`str` A string containing a label in classification_tags that will be added to the classification_tags column in component_table if a component is classified as true or false. default=None - dont_warn_reclassify: :obj:`bool` + dont_warn_reclassify : :obj:`bool` If this function changes a component classification from accepted or rejected to something else, it gives a warning. If this is True, that warning is suppressed. default=False Returns ------- - selector: :obj:`tedana.selection.component_selector.ComponentSelector` + selector : :obj:`tedana.selection.component_selector.ComponentSelector` component_table["classifications"] will reflect any new classifications. component_status_table will have a new column titled @@ -136,7 +134,7 @@ def change_comptable_classifications( component_table["classification_tags"] will be updated to include any new tags. Each tag should appear only once in the string and tags will be separated by commas. - numTrue, numFalse: :obj:`int` + numTrue, numFalse : :obj:`int` The number of True and False components in decision_boolean Note @@ -178,40 +176,38 @@ def comptable_classification_changer( tag_if=None, dont_warn_reclassify=False, ): - """ - Implement the component classification changes specified in - change_comptable_classifications. + """Implement the component classification changes from ``change_comptable_classifications``. Parameters ---------- - selector: :obj:`tedana.selection.component_selector.ComponentSelector` + selector : :obj:`tedana.selection.component_selector.ComponentSelector` The attributes used are component_table, component_status_table, and current_node_idx boolstate : :obj:`bool` Change classifications only for True or False components in decision_boolean based on this variable - classify_if: :obj:`str` + classify_if : :obj:`str` This should be if_True or if_False to match boolstate. If the condition in this step is true or false, give the component the label in this string. Options are 'accepted', 'rejected', 'nochange', or intermediate_classification labels predefined in the decision tree. If 'nochange' then don't change the current component classification - decision_boolean: :obj:`pd.Series(bool)` + decision_boolean : :obj:`pd.Series(bool)` A dataframe column of equal length to component_table where each value is True or False. - tag_if: :obj:`str` + tag_if : :obj:`str` This should be tag_ifTrue or tag_ifFalse to match boolstate A string containing a label in classification_tags that will be added to the classification_tags column in component_table if a component is classified as true or false. default=None - dont_warn_reclassify: :obj:`bool` + dont_warn_reclassify : :obj:`bool` If this function changes a component classification from accepted or rejected to something else, it gives a warning. If this is True, that warning is suppressed. default=False Returns ------- - selector: :obj:`tedana.selection.component_selector.ComponentSelector` + selector : :obj:`tedana.selection.component_selector.ComponentSelector` Operates on the True OR False components depending on boolstate component_table["classifications"] will reflect any new classifications. @@ -221,13 +217,17 @@ def comptable_classification_changer( component_table["classification_tags"] will be updated to include any new tags. Each tag should appear only once in the string and tags will be separated by commas. - If a classification is changed away from accepted or rejected and - dont_warn_reclassify is False, then a warning is logged + + Warns + ----- + UserWarning + If a classification is changed away from accepted or rejected and + dont_warn_reclassify is False, then a warning is logged Note ---- This is designed to be run by - `tedana.selection.selection_utils.change_comptable_classifications`. + :func:`~tedana.selection.selection_utils.change_comptable_classifications`. This function is run twice, ones for changes to make of a component is True and again for components that are False. @@ -380,11 +380,10 @@ def log_decision_tree_step( Parameters ---------- - function_name_idx: :obj:`str` + function_name_idx : :obj:`str` The name of the function that should be logged. By convention, this be "Step current_node_idx: function_name" - - comps2use: :obj:`list[int]` or -1 + comps2use : :obj:`list[int]` or -1 A list of component indices that should be used by a function. Only used to report no components found if empty and report the number of components found if not empty. @@ -393,18 +392,17 @@ def log_decision_tree_step( components. For those functions, set comps2use==-1 to avoid logging a warning that no components were found. Currently, this is only used by `calc_extend_factor` - - decide_comps: :obj:`str` or :obj:`list[str]` or :obj:`list[int]` + decide_comps : :obj:`str` or :obj:`list[str]` or :obj:`list[int]` This is string or a list of strings describing what classifications of components to operate on. Only used in this function to report its contents if no components with these classifications were found - numTrue, numFalse: :obj:`int` + numTrue, numFalse : :obj:`int` The number of components classified as True or False - ifTrue, ifFalse: :obj:`str` + ifTrue, ifFalse : :obj:`str` If a component is true or false, the classification to assign that component - calc_outputs: :obj:`dict` + calc_outputs : :obj:`dict` A dictionary with output information from the function. If it contains a key "calc_cross_comp_metrics" then the value for that key is a list of cross component metrics (i.e. kappa or rho elbows) that were calculated @@ -445,8 +443,7 @@ def log_decision_tree_step( def log_classification_counts(decision_node_idx, component_table): - """ - Log the total counts for each component classification in component_table + """Log the total counts for each component classification in component_table. Parameters ---------- @@ -478,8 +475,7 @@ def log_classification_counts(decision_node_idx, component_table): # Calculations that are used in decision tree functions ####################################################### def getelbow_cons(arr, return_val=False): - """ - Elbow using mean/variance method - conservative + """Elbow using mean/variance method - conservative Parameters ---------- @@ -585,21 +581,21 @@ def kappa_elbow_kundu(component_table, n_echos, comps2use=None): Component metric table. One row for each component, with a column for each metric. The index should be the component number. Only the 'kappa' column is used in this function - n_echos: :obj:`int` + n_echos : :obj:`int` The number of echos in the multi-echo data - comps2use: :obj:`list[int]` + comps2use : :obj:`list[int]` A list of component indices used to calculate the elbow default=None which means use all components Returns ------- - kappa_elbow: :obj:`float` + kappa_elbow : :obj:`float` The 'elbow' value for kappa values, above which components are considered more likely to contain T2* weighted signals. minimum of kappa_allcomps_elbow and kappa_nonsig_elbow - kappa_allcomps_elbow: :obj:`float` + kappa_allcomps_elbow : :obj:`float` The elbow for kappa values using all components in comps2use - kappa_nonsig_elbow: :obj:`float` + kappa_nonsig_elbow : :obj:`float` The elbow for kappa values excluding kappa values above a threshold None if there are fewer than 6 values remaining after thresholding @@ -658,37 +654,34 @@ def rho_elbow_kundu_liberal( Component metric table. One row for each component, with a column for each metric. The index should be the component number. Only the 'kappa' column is used in this function - - n_echos: :obj:`int` + n_echos : :obj:`int` The number of echos in the multi-echo data - rho_elbow_type: :obj:`str` + rho_elbow_type : :obj:`str` The algorithm used to calculate the rho elbow. Current options are 'kundu' and 'liberal'. - - comps2use: :obj:`list[int]` + comps2use : :obj:`list[int]` A list of component indices used to calculate the elbow default=None which means use all components - - subset_comps2use: :obj:`list[int]` + subset_comps2use : :obj:`list[int]` A list of component indices used to calculate the elbow If None then only calculate a threshold using all components default=-1 which means use only 'unclassified' components Returns ------- - rho_elbow: :obj:`float` + rho_elbow : :obj:`float` The 'elbow' value for rho values, above which components are considered more likely to contain S0 weighted signals - varex_upper_p: :obj:`float` + varex_upper_p : :obj:`float` This is the median "variance explained" across components with kappa values greater than the kappa_elbow calculated using all components None if subset_comps2use is None - rho_allcomps_elbow: :obj:`float` + rho_allcomps_elbow : :obj:`float` rho elbow calculated using all components in comps2use - rho_unclassified_elbow: :obj:`float` + rho_unclassified_elbow : :obj:`float` rho elbow clculated using all components in subset_comps2use None if subset_comps2use is None - elbow_f05: :obj:`float` + elbow_f05 : :obj:`float` A significant threshold based on the number of echoes. Used as part of the mean for rho_elbow_type=='kundu' @@ -797,18 +790,17 @@ def get_extend_factor(n_vols=None, extend_factor=None): Parameters ---------- - n_vols: :obj:`int` + n_vols : :obj:`int` The number of volumes in an fMRI time series. default=None In the MEICA code, extend_factor was hard-coded to 2 for data with more than 100 volumes and 3 for data with less than 100 volumes. Now is linearly ramped from 2-3 for vols between 90 & 110 - - extend_factor: :obj:`float` + extend_factor : :obj:`float` The scaler used to set a threshold for d_table_score. default=None Returns ------- - extend_factor: :obj:`float` + extend_factor : :obj:`float` Note ---- diff --git a/tedana/selection/tedica.py b/tedana/selection/tedica.py index fa7420b04..682bb5c61 100644 --- a/tedana/selection/tedica.py +++ b/tedana/selection/tedica.py @@ -15,19 +15,18 @@ def automatic_selection(component_table, n_echos, n_vols, tree="kundu", verbose= Parameters ---------- - component_table: :obj:`pd.DataFrame` + component_table : :obj:`pd.DataFrame` The component table to classify - n_echos: :obj:`int` + n_echos : :obj:`int` The number of echoes in this dataset - tree: :obj:`str` + tree : :obj:`str` The type of tree to use for the ComponentSelector object. Default="kundu" - verbose: :obj:`bool` + verbose : :obj:`bool` More verbose logging output if True. Default=False Returns ------- - - selector: :obj:`tedana.selection.component_selector.ComponentSelector` + selector : :obj:`tedana.selection.component_selector.ComponentSelector` Contains component classifications in a component_table and provenance and metadata from the component selection process @@ -35,7 +34,8 @@ def automatic_selection(component_table, n_echos, n_vols, tree="kundu", verbose= ----- If tree=kundu, the selection algorithm used in this function was originated in ME-ICA by Prantik Kundu, and his original implementation is available at: - https://github.com/ME-ICA/me-ica/blob/b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py + https://github.com/ME-ICA/me-ica/blob/\ + b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py The appropriate citation is :footcite:t:`kundu2013integrated`. @@ -47,8 +47,8 @@ def automatic_selection(component_table, n_echos, n_vols, tree="kundu", verbose= components, a hypercommented version of this attempt is available at: https://gist.github.com/emdupre/ca92d52d345d08ee85e104093b81482e - If tree==minimal, the selection algorithm based on the kundu tree with differences - described in the `FAQ`_ + If tree=="minimal", a selection algorithm based on the "kundu" tree will be used. + The differences between the "minimal" and "kundu" trees are described in the `FAQ`_. References ---------- diff --git a/tedana/selection/tedpca.py b/tedana/selection/tedpca.py index 7f2f5253c..5a99fea71 100644 --- a/tedana/selection/tedpca.py +++ b/tedana/selection/tedpca.py @@ -17,8 +17,7 @@ def kundu_tedpca(comptable, n_echos, kdaw=10.0, rdaw=1.0, stabilize=False): - """ - Select PCA components using Kundu's decision tree approach. + """Select PCA components using Kundu's decision tree approach. Parameters ---------- diff --git a/tedana/tests/test_component_selector.py b/tedana/tests/test_component_selector.py index 371b4848f..487119c74 100644 --- a/tedana/tests/test_component_selector.py +++ b/tedana/tests/test_component_selector.py @@ -321,8 +321,8 @@ def test_selector_properties_smoke(): assert selector.n_comps == 21 - # Also runs selector.LikelyBOLD_comps and should need to deal with sets in each field - assert selector.n_LikelyBOLD_comps == 17 + # Also runs selector.likely_bold_comps and should need to deal with sets in each field + assert selector.n_likely_bold_comps == 17 assert selector.n_accepted_comps == 17 diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 6bacbeb30..6031fb07d 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -658,10 +658,10 @@ def tedana_workflow( metrics=required_metrics, ) ica_selection = selection.automatic_selection(comptable, n_echos, n_vols, tree=tree) - n_LikelyBOLD_comps = ica_selection.n_LikelyBOLD_comps - if (n_restarts < maxrestart) and (n_LikelyBOLD_comps == 0): + n_likely_bold_comps = ica_selection.n_likely_bold_comps + if (n_restarts < maxrestart) and (n_likely_bold_comps == 0): LGR.warning("No BOLD components found. Re-attempting ICA.") - elif n_LikelyBOLD_comps == 0: + elif n_likely_bold_comps == 0: LGR.warning("No BOLD components found, but maximum number of restarts reached.") keep_restarting = False else: @@ -736,7 +736,7 @@ def tedana_workflow( } io_generator.save_file(decomp_metadata, "ICA decomposition json") - if ica_selection.n_LikelyBOLD_comps == 0: + if ica_selection.n_likely_bold_comps == 0: LGR.warning("No BOLD components detected! Please check data and results!") # TODO: un-hack separate comptable From f0caf5b32a776e75875fb41f2e8341d8d9ee6052 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Tue, 10 Jan 2023 17:09:24 -0500 Subject: [PATCH 144/177] Remove trailing whitespace. --- docs/api.rst | 2 +- docs/classification_output_descriptions.rst | 8 ++++---- docs/faq.rst | 6 +++--- docs/output_file_descriptions.rst | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index dfe62e68c..fce7d27bc 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -133,7 +133,7 @@ API .. autosummary:: :toctree: generated/ :template: module.rst - + tedana.selection.selection_nodes tedana.selection.selection_utils tedana.selection.tedica diff --git a/docs/classification_output_descriptions.rst b/docs/classification_output_descriptions.rst index c1eb241c2..d5e57dcb2 100644 --- a/docs/classification_output_descriptions.rst +++ b/docs/classification_output_descriptions.rst @@ -21,7 +21,7 @@ Gaussian noise and ICA would not reliably converge. Standard methods for data reduction use cost functions, like MDL, KIC, and AIC to estimate the variance that is just noise and remove the lowest variance components under that threshold. By default, ``tedana`` uses AIC. Of those three, AIC is the least agressive and -will retain the most components. +will retain the most components. ``Tedana`` includes additional `kundu` and `kundu-stabilize` approaches that identify and remove components that don't contain T2* or S0 signal and are more @@ -49,7 +49,7 @@ P007 rejected Rho below fmin (only in stabilized PCA decision tree) ICA Classification Outputs ========================== -The component table is stored in ``desc-tedana_metrics.tsv`` or +The component table is stored in ``desc-tedana_metrics.tsv`` or ``tedana_metrics.tsv``. Each row is a component number. Each column is a metric that is calculated for each component. Short descriptions of each column metric are in the output log, ``tedana_[date_time].tsv``, and the actual metric @@ -93,7 +93,7 @@ in several places: - The information in the output log includes the name of each node and the count of components that changed classification during execution. -- The same information is stored in the `ICA decision tree` json file (see +- The same information is stored in the `ICA decision tree` json file (see `descriptions of file names`_) in the "output" field for each node. That information is organized so that it can be used to generate a visual or text-based summary of what happened when the decision tree was run on a dataset. @@ -101,5 +101,5 @@ in several places: each node was run. This is particularly useful to trying to understand how a specific component ended receiving its classification. -.. _collect.py: https://github.com/ME-ICA/tedana/blob/main/tedana/metrics/collect.py +.. _collect.py: https://github.com/ME-ICA/tedana/blob/main/tedana/metrics/collect.py .. _descriptions of file names: output_file_descriptions.html \ No newline at end of file diff --git a/docs/faq.rst b/docs/faq.rst index 7dd013f9b..cd19533d1 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -119,7 +119,7 @@ is also accepted under the assumption that, even if a component contains noise, is sufficient T2*-weighted signal to retain. Similarly to the kundu tree, components with very low variance are retained so that degrees of freedom aren't wasted by removing them, but `minimal` makes sure that no more than 1% of total variance is -removed this way. +removed this way. ``tedana`` developers still want to examine how the minimal tree performs on a wide range of datasets, but primary benefit is that it is possible to describe what it does @@ -146,10 +146,10 @@ Dr. Prantik Kundu developed a multi-echo ICA (ME-ICA) denoising method and `shared code on bitbucket`_ to allow others to use the method. A nearly identical version of this code is `distributed with AFNI as MEICA v2.5 beta 11`_. Most early publications that validated the MEICA method used variants of this code. That code -runs only on the now defunct python 2.7 and is not under active development. +runs only on the now defunct python 2.7 and is not under active development. ``tedana`` when run with `--tree kundu --tedpca kundu` (or `--tedpca kundu-stabilize`), uses the same core algorithm as in MEICA v2.5. Since ICA is a nondeterministic -algorithm and ``tedana`` and MEICA use different PCA and ICA code, the algorithm will +algorithm and ``tedana`` and MEICA use different PCA and ICA code, the algorithm will mostly be the same, but the results will not be identical. Prantik Kundu also worked on `MEICA v3.2`_ (also for python v2.7). The underlying ICA diff --git a/docs/output_file_descriptions.rst b/docs/output_file_descriptions.rst index 5ddf7d1e7..c703da8fc 100644 --- a/docs/output_file_descriptions.rst +++ b/docs/output_file_descriptions.rst @@ -4,7 +4,7 @@ Output file name descriptions tedana allows for multiple file naming conventions. The key labels and naming options for each convention that can be set using the `--convention` option are in `outputs.json`_. -The output of `tedana` also includes a file called `registry.json` or +The output of `tedana` also includes a file called `registry.json` or `desc-tedana_registry.json` that includes the keys and the matching file names for the output. The table below lists both these keys and the default "BIDS Derivatives" file names. @@ -76,7 +76,7 @@ tedana_report.html The ``desc-tedana_metrics.tsv``. "ICA cross component metrics json": desc-ICACrossComponent_metrics.json Metric names and values that are each a single number calculated across components. For example, kappa and - rho elbows. + rho elbows. "ICA decision tree json": desc-ICA_decision_tree A copy of the inputted decision tree specification with an added "output" field for each node. The output field contains information about what happened during From f0f934f60177a06ae282dbab3c9a935841529dda Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 11 Jan 2023 10:36:12 -0500 Subject: [PATCH 145/177] Spacing in io. --- tedana/io.py | 52 +++++++++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index b93fe828c..64e37cb7f 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -33,8 +33,7 @@ class CustomEncoder(json.JSONEncoder): - """Class for converting some types because of JSON serialization and numpy - incompatibilities + """Class for converting some types because of JSON serialization and numpy incompatibilities. See here: https://stackoverflow.com/q/50916422/2589328 """ @@ -89,11 +88,11 @@ class OutputGenerator: This will correspond to a "figures" subfolder of ``out_dir``. prefix : str Prefix to prepend to output filenames. - force: bool + force : bool Whether to force file overwrites. verbose : bool Whether or not to generate verbose output. - registry: dict + registry : dict A registry of all files saved """ @@ -127,6 +126,7 @@ def __init__( f"({', '.join(v.keys())})" ) cfg[k] = v[convention] + self.config = cfg self.reference_img = check_niimg(reference_img) self.convention = convention @@ -259,6 +259,7 @@ def save_file(self, data, description, **kwargs): "please use the --force option in the command line or the " "force parameter in the Python API." ) + if description.endswith("img"): self.save_img(data, name) elif description.endswith("json"): @@ -266,6 +267,8 @@ def save_file(self, data, description, **kwargs): self.save_json(prepped, name) elif description.endswith("tsv"): self.save_tsv(data, name) + else: + raise ValueError(f"Unsupported file {description}") self.registry[description] = op.basename(name) @@ -291,6 +294,7 @@ def save_img(self, data, name): return elif not isinstance(data, np.ndarray): raise TypeError(f"Data supplied must of type np.ndarray, not {data_type}.") + if data.ndim not in (1, 2): raise TypeError(f"Data must have number of dimensions in (1, 2), not {data.ndim}") @@ -319,6 +323,7 @@ def save_json(self, data, name): data_type = type(data) if not isinstance(data, dict): raise TypeError(f"data must be a dict, not type {data_type}.") + with open(name, "w") as fo: json.dump(data, fo, indent=4, sort_keys=True, cls=CustomEncoder) @@ -335,6 +340,7 @@ def save_tsv(self, data, name): data_type = type(data) if not isinstance(data, pd.DataFrame): raise TypeError(f"data must be pd.Data, not type {data_type}.") + # Replace blanks with numpy NaN deblanked = data.replace("", np.nan) deblanked.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) @@ -365,12 +371,17 @@ def get_file_path(self, description): return None def get_file_contents(self, description): + """Get file contents. + + Notes + ----- + Since we restrict to just these three types, this function should always return. + If more types are added, the loaders dict will need to be updated with an appropriate + loader. + """ for ftype, loader in InputHarvester.loaders.items(): if ftype in description: return loader(self.get_file_path(description)) - # Since we restrict to just these three types, this function should - # always return. If more types are added, the loaders dict will - # need to be updated with an appopriate loader @property def registry(self): @@ -422,8 +433,7 @@ def load_json(path: str) -> dict: def add_decomp_prefix(comp_num, prefix, max_value): - """ - Create component name with leading zeros matching number of components + """Create component name with leading zeros matching number of components. Parameters ---------- @@ -494,8 +504,7 @@ def denoise_ts(data, mmix, mask, comptable): # File Writing Functions def write_split_ts(data, mmix, mask, comptable, io_generator, echo=0): - """ - Splits `data` into denoised / noise / ignored time series and saves to disk + """Split `data` into denoised / noise / ignored time series and save to disk. Parameters ---------- @@ -559,8 +568,7 @@ def write_split_ts(data, mmix, mask, comptable, io_generator, echo=0): def writeresults(ts, mask, comptable, mmix, n_vols, io_generator): - """ - Denoises `ts` and saves all resulting files to disk + """Denoise `ts` and save all resulting files to disk. Parameters ---------- @@ -621,8 +629,7 @@ def writeresults(ts, mask, comptable, mmix, n_vols, io_generator): def writeresults_echoes(catd, mmix, mask, comptable, io_generator): - """ - Saves individually denoised echos to disk + """Save individually denoised echos to disk. Parameters ---------- @@ -658,7 +665,6 @@ def writeresults_echoes(catd, mmix, mask, comptable, io_generator): -------- tedana.io.write_split_ts: Writes out the files. """ - for i_echo in range(catd.shape[1]): LGR.info("Writing Kappa-filtered echo #{:01d} timeseries".format(i_echo + 1)) write_split_ts(catd[:, i_echo, :], mmix, mask, comptable, io_generator, echo=(i_echo + 1)) @@ -722,8 +728,7 @@ def load_data(data, n_echos=None): # Helper Functions def new_nii_like(ref_img, data, affine=None, copy_header=True): - """ - Coerces `data` into NiftiImage format like `ref_img` + """Coerce `data` into NiftiImage format like `ref_img`. Parameters ---------- @@ -741,7 +746,6 @@ def new_nii_like(ref_img, data, affine=None, copy_header=True): nii : :obj:`nibabel.nifti1.Nifti1Image` NiftiImage """ - ref_img = check_niimg(ref_img) newdata = data.reshape(ref_img.shape[:3] + data.shape[1:]) if ".nii" not in ref_img.valid_exts: @@ -756,8 +760,7 @@ def new_nii_like(ref_img, data, affine=None, copy_header=True): def split_ts(data, mmix, mask, comptable): - """ - Splits `data` time series into accepted component time series and remainder + """Split `data` time series into accepted component time series and remainder. Parameters ---------- @@ -795,11 +798,11 @@ def split_ts(data, mmix, mask, comptable): def prep_data_for_json(d) -> dict: - """Attempts to create a JSON serializable dictionary from a data dictionary + """Attempt to create a JSON serializable dictionary from a data dictionary. Parameters ---------- - d: dict + d : dict A dictionary that will be converted into something JSON serializable Raises @@ -834,6 +837,7 @@ def prep_data_for_json(d) -> dict: v = v.tolist() elif isinstance(v, np.int64) or isinstance(v, np.uint64): v = int(v) + # NOTE: add more special cases for type conversions above this # comment line as an elif block d[k] = v @@ -872,6 +876,7 @@ def str_to_component_list(s: str) -> List[int]: elif len(possible_list) == 1 and possible_list[0].isnumeric(): # We have a likely hit and there is just one component break + # Make sure we can actually convert this split list into an integer # Crash with a sensible error if not for x in possible_list: @@ -912,6 +917,7 @@ def fname_to_component_list(fname: str) -> List[int]: return contents["Components"].tolist() else: raise ValueError(f"Cannot determine a components column in file {fname}") + with open(fname, "r") as fp: contents = fp.read() return str_to_component_list(contents) From 7ec4d81383d4a7c8c353de3e4624943a46279f00 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 11 Jan 2023 12:01:13 -0500 Subject: [PATCH 146/177] More minor changes. --- tedana/resources/decision_trees/minimal.json | 2 +- tedana/selection/component_selector.py | 23 ++++++------- tedana/selection/selection_utils.py | 34 ++++++++++---------- tedana/workflows/tedana.py | 18 +++++------ tedana/workflows/tedana_reclassify.py | 4 +++ 5 files changed, 43 insertions(+), 38 deletions(-) diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index 97ee0b217..52b9177c5 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -1,7 +1,7 @@ { "tree_id": "minimal_decision_tree_test1", "info": "Proposed minimal decision tree", - "report": "This is based on the minimal criteria of the original MEICA decision tree without the more agressive noise removal steps", + "report": "This is based on the minimal criteria of the original MEICA decision tree without the more aggressive noise removal steps", "refs": "Kundu 2013; DuPre, Salo, 2021", "necessary_metrics": [ "kappa", diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index 8453e9af1..cb083174e 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -258,17 +258,17 @@ def __init__(self, tree, component_table, cross_component_metrics={}, status_tab Any parameter that is used by a decision tree node function can be passed as a parameter in the ``ComponentSelector`` initialization or can be - included in the json file that defines the decision tree. If a parameter - is set in the json file, that will take precedence. As a style rule, a - parameter that is the same regardless of the inputted data should be - defined in the decision tree json file. A parameter that is dataset specific - should be passed through the initialization function. Dataset specific - parameters that may need to be passed during initialization include: + included in the json file that defines the decision tree. + If a parameter is set in the json file, that will take precedence. + As a style rule, a parameter that is the same regardless of the inputted data should be + defined in the decision tree json file. + A parameter that is dataset-specific should be passed through the initialization function. + Dataset-specific parameters that may need to be passed during initialization include: n_echos : :obj:`int` Number of echos in multi-echo fMRI data. Required for kundu and minimal trees - n_vols: :obj:`int` + n_vols : :obj:`int` Number of volumes (time points) in the fMRI data Required for kundu tree @@ -343,17 +343,17 @@ def select(self): When this is run, multiple elements in `ComponentSelector` will change including: - - component_table: `classification` column with `accepted` or `rejected labels` - and `classification_tags` column with can hold multiple comma-separated labels + - component_table: ``classification`` column with ``accepted`` or ``rejected`` labels + and ``classification_tags`` column with can hold multiple comma-separated labels explaining why a classification happened - cross_component_metrics: Any values that were calculated based on the metric values across components or by direct user input - component_status_table: Contains the classification statuses at each node in the decision tree - used_metrics: A list of metrics used in the selection process - - nodes: The original tree definition with an added `outputs` key listing + - nodes: The original tree definition with an added ``outputs`` key listing everything that changed in each node - - current_node_idx: The total number of nodes run in `ComponentSelector` + - current_node_idx: The total number of nodes run in ``ComponentSelector`` """ if "classification_tags" not in self.component_table.columns: @@ -393,6 +393,7 @@ def select(self): self = fcn(self, **params, **kwargs) else: self = fcn(self, **params) + self.tree["used_metrics"].update( self.tree["nodes"][self.current_node_idx]["outputs"]["used_metrics"] ) diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 864fce092..d8825c25a 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -205,6 +205,7 @@ def comptable_classification_changer( If this function changes a component classification from accepted or rejected to something else, it gives a warning. If this is True, that warning is suppressed. default=False + Returns ------- selector : :obj:`tedana.selection.component_selector.ComponentSelector` @@ -230,7 +231,6 @@ def comptable_classification_changer( :func:`~tedana.selection.selection_utils.change_comptable_classifications`. This function is run twice, ones for changes to make of a component is True and again for components that are False. - """ if classify_if != "nochange": changeidx = decision_boolean.index[np.asarray(decision_boolean) == boolstate] @@ -337,9 +337,12 @@ def confirm_metrics_exist(component_table, necessary_metrics, function_name=None Returns ------- metrics_exist : :obj:`bool` - True if all metrics in necessary_metrics are in component_table + True if all metrics in necessary_metrics are in component_table - If metrics_exist is False then raise an error and end the program + Raises + ------ + ValueError + If metrics_exist is False then raise an error and end the program Note ----- @@ -347,7 +350,6 @@ def confirm_metrics_exist(component_table, necessary_metrics, function_name=None the columns exist. Also, the string in `necessary_metrics` and the column labels in component_table will only be matched if they're identical. """ - missing_metrics = necessary_metrics - set(component_table.columns) metrics_exist = len(missing_metrics) > 0 if metrics_exist is True: @@ -375,8 +377,7 @@ def log_decision_tree_step( ifFalse=None, calc_outputs=None, ): - """ - Logging text to add after every decision tree calculation + """Logging text to add after every decision tree calculation Parameters ---------- @@ -401,7 +402,6 @@ def log_decision_tree_step( ifTrue, ifFalse : :obj:`str` If a component is true or false, the classification to assign that component - calc_outputs : :obj:`dict` A dictionary with output information from the function. If it contains a key "calc_cross_comp_metrics" then the value for that key is a list of @@ -411,23 +411,24 @@ def log_decision_tree_step( Returns ------- - Information is added to the LGR.info logger. This either logs that \ - nothing was changed, the number of components classified as true or \ - false and what they changed to, or the cross component metrics that were \ + Information is added to the LGR.info logger. This either logs that + nothing was changed, the number of components classified as true or + false and what they changed to, or the cross component metrics that were calculated """ - if not (comps2use == -1) and not comps2use: LGR.info( f"{function_name_idx} not applied because no remaining components were " f"classified as {decide_comps}" ) + if ifTrue or ifFalse: LGR.info( f"{function_name_idx} applied to {len(comps2use)} components. " f"{numTrue} True -> {ifTrue}. " f"{numFalse} False -> {ifFalse}." ) + if calc_outputs: if "calc_cross_comp_metrics" in calc_outputs: calc_summaries = [ @@ -459,8 +460,7 @@ def log_classification_counts(decision_node_idx, component_table): The LGR.info logger will add a line like: \ 'Step 4: Total component classifications: 10 accepted, 5 provisionalreject, 8 rejected' """ - - (classification_labels, label_counts) = np.unique( + classification_labels, label_counts = np.unique( component_table["classification"].values, return_counts=True ) label_summaries = [ @@ -515,6 +515,7 @@ def getelbow_cons(arr, return_val=False): for d_ in ds: c_ = (c_ + d_) * d_ dsum.append(c_) + e2 = np.argmax(np.array(dsum)) elind = np.max([getelbow(arr), e2]) @@ -525,8 +526,7 @@ def getelbow_cons(arr, return_val=False): def getelbow(arr, return_val=False): - """ - Elbow using linear projection method - moderate + """Get elbow using linear projection method - moderate. Parameters ---------- @@ -702,7 +702,6 @@ def rho_elbow_kundu_liberal( rho elbows are now logged so that it will be possible to confirm this with data & make additional adjustments to this threshold """ - if rho_elbow_type not in ["kundu", "liberal"]: raise ValueError( f"rho_elbow_kundu_liberal: rho_elbow_type must be 'kundu' or 'liberal'" @@ -784,6 +783,7 @@ def get_extend_factor(n_vols=None, extend_factor=None): """ extend_factor is a scaler used to set a threshold for the d_table_score in the kundu decision tree. + It is either defined by the number of volumes in the time series or directly defined by the user. If it is defined by the user, that takes precedence over using the number of volumes in a calculation @@ -806,7 +806,6 @@ def get_extend_factor(n_vols=None, extend_factor=None): ---- Either n_vols OR extend_factor is a required input """ - if extend_factor: if isinstance(extend_factor, int): extend_factor = float(extend_factor) @@ -823,4 +822,5 @@ def get_extend_factor(n_vols=None, extend_factor=None): error_msg = "get_extend_factor need n_vols or extend_factor as an input" LGR.error(error_msg) raise ValueError(error_msg) + return extend_factor diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 6031fb07d..fb9683eb7 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -657,8 +657,8 @@ def tedana_workflow( "ICA", metrics=required_metrics, ) - ica_selection = selection.automatic_selection(comptable, n_echos, n_vols, tree=tree) - n_likely_bold_comps = ica_selection.n_likely_bold_comps + ica_selector = selection.automatic_selection(comptable, n_echos, n_vols, tree=tree) + n_likely_bold_comps = ica_selector.n_likely_bold_comps if (n_restarts < maxrestart) and (n_likely_bold_comps == 0): LGR.warning("No BOLD components found. Re-attempting ICA.") elif n_likely_bold_comps == 0: @@ -701,7 +701,7 @@ def tedana_workflow( "ICA", metrics=required_metrics, ) - ica_selection = selection.automatic_selection( + ica_selector = selection.automatic_selection( comptable, n_echos, n_vols, @@ -719,7 +719,7 @@ def tedana_workflow( io_generator.save_file(betas_oc, "z-scored ICA components img") # Save component selector and tree - ica_selection.to_files(io_generator) + ica_selector.to_files(io_generator) # Save metrics and metadata metric_metadata = metrics.collect.get_metadata(comptable) io_generator.save_file(metric_metadata, "ICA metrics json") @@ -736,16 +736,16 @@ def tedana_workflow( } io_generator.save_file(decomp_metadata, "ICA decomposition json") - if ica_selection.n_likely_bold_comps == 0: + if ica_selector.n_likely_bold_comps == 0: LGR.warning("No BOLD components detected! Please check data and results!") # TODO: un-hack separate comptable - comptable = ica_selection.component_table + comptable = ica_selector.component_table mmix_orig = mmix.copy() if tedort: - comps_accepted = ica_selection.accepted_comps - comps_rejected = ica_selection.rejected_comps + comps_accepted = ica_selector.accepted_comps + comps_rejected = ica_selector.rejected_comps acc_ts = mmix[:, comps_accepted] rej_ts = mmix[:, comps_rejected] betas = np.linalg.lstsq(acc_ts, rej_ts, rcond=None)[0] @@ -754,7 +754,7 @@ def tedana_workflow( mmix[:, comps_rejected] = resid comp_names = [ io.add_decomp_prefix(comp, prefix="ICA", max_value=comptable.index.max()) - for comp in range(ica_selection.n_comps) + for comp in range(ica_selector.n_comps) ] mixing_df = pd.DataFrame(data=mmix, columns=comp_names) diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/tedana_reclassify.py index 6cfaaef76..37c97951d 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/tedana_reclassify.py @@ -265,6 +265,7 @@ def post_tedana( for a in acc: if a in rej: in_both.append(a) + if len(in_both) != 0: raise ValueError("The following components were both accepted and rejected: " f"{in_both}") @@ -307,6 +308,7 @@ def post_tedana( else: data_oc = ioh.get_file_contents("combined img") used_gs = False + io_generator = io.OutputGenerator( data_oc, convention=convention, @@ -328,8 +330,10 @@ def post_tedana( if accept: selector.add_manual(accept, "accepted") + if reject: selector.add_manual(reject, "rejected") + selector.select() comptable = selector.component_table From 8a6e294643c0dc82e0b2dc76b22a1b3c24338506 Mon Sep 17 00:00:00 2001 From: Joshua Teves Date: Thu, 19 Jan 2023 10:28:26 -0500 Subject: [PATCH 147/177] Add custom napoleon section "Generated Files" --- docs/conf.py | 1 + tedana/decomposition/pca.py | 5 ++--- tedana/io.py | 15 ++++++--------- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index fd67829b9..0abb14feb 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -124,6 +124,7 @@ napoleon_use_param = False napoleon_use_keyword = True napoleon_use_rtype = False +napoleon_custom_sections = ["Generated Files"] # -- Options for HTML output ---------------------------------------------- diff --git a/tedana/decomposition/pca.py b/tedana/decomposition/pca.py index ce18fe840..dfee16772 100644 --- a/tedana/decomposition/pca.py +++ b/tedana/decomposition/pca.py @@ -159,9 +159,8 @@ def tedpca( - Nonsignificant :math:`{\\kappa}` and :math:`{\\rho}`. - Nonsignificant variance explained. - Outputs: - - This function writes out several files: + Generated Files + --------------- =========================== ============================================= Default Filename Content diff --git a/tedana/io.py b/tedana/io.py index b93fe828c..7d1d32e42 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -519,9 +519,8 @@ def write_split_ts(data, mmix, mask, comptable, io_generator, echo=0): varexpl : :obj:`float` Percent variance of data explained by extracted + retained components - Notes - ----- - This function writes out several files: + Generated Files + --------------- ============================ ============================================ Filename Content @@ -580,9 +579,8 @@ def writeresults(ts, mask, comptable, mmix, n_vols, io_generator): ref_img : :obj:`str` or img_like Reference image to dictate how outputs are saved to disk - Notes - ----- - This function writes out several files: + Generated Files + --------------- ========================================= ===================================== Filename Content @@ -639,9 +637,8 @@ def writeresults_echoes(catd, mmix, mask, comptable, io_generator): ref_img : :obj:`str` or img_like Reference image to dictate how outputs are saved to disk - Notes - ----- - This function writes out several files: + Generated Files + --------------- ===================================== =================================== Filename Content From 5e80e9c2a52fb0f9118b1a454345bd5df3a2676d Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sat, 21 Jan 2023 09:30:20 -0500 Subject: [PATCH 148/177] Replace numTrue/numFalse with n_true/n_false. --- docs/building decision trees.rst | 12 ++--- tedana/selection/selection_nodes.py | 74 ++++++++++++++-------------- tedana/selection/selection_utils.py | 18 +++---- tedana/tests/test_selection_nodes.py | 68 ++++++++++++------------- tedana/tests/test_selection_utils.py | 14 +++--- 5 files changed, 93 insertions(+), 93 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index a4137430c..0747741ba 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -79,7 +79,7 @@ New columns in ``selector.component_table`` and the "ICA metrics tsv" file: what happened during execution. Of particular note, each output includes a list of the metrics used within the node, "node_label", which is a (hopefully) human readable brief description of the node's function and, for nodes where component - classifications can change, "numFalse" & "numTrue" list who many components + classifications can change, "n_false" & "n_true" list who many components changed classifications. The inputted parameters include "ifTrue" and "ifFalse" which specify what changes for each component. These fields can be used to construct a visual flow chart or text-based summary of how classifications @@ -120,7 +120,7 @@ node_label: A brief label for what happens in this node that can be used in a decision tree summary table or flow chart. -numTrue, numFalse: +n_true, n_false: For decision tree (dec) functions, the number of components that were classified as true or false respectively in this decision tree step. @@ -300,7 +300,7 @@ fields are common and may be used by other parts of the code: - "decision_node_idx" (required): the ordered index for the current function in the decision tree. - "node_label" (required): A decriptive label for what happens in the node. -- "numTrue" & "numFalse" (required for decision functions): For decision functions, +- "n_true" & "n_false" (required for decision functions): For decision functions, the number of components labeled true or false within the function call. - "used_metrics" (required if a function uses metrics): The list of metrics used in the function. This can be hard coded, defined by input parameters, or empty. @@ -342,8 +342,8 @@ Nearly every function has a clause like: if comps2use is None: log_decision_tree_step(function_name_idx, comps2use, decide_comps=decide_comps) - outputs["numTrue"] = 0 - outputs["numFalse"] = 0 + outputs["n_true"] = 0 + outputs["n_false"] = 0 else: If there are no components with the classifications in ``decide_comps`` this logs that @@ -356,7 +356,7 @@ which will update the component_table classifications, update the classification in component_status_table, and update the component classification_tags. Components not in ``decide_comps`` retain their existing classifications and tags. ``change_comptable_classifications`` also returns and should assign values to -``outputs["numTrue"]`` and ``outputs["numFalse"]``. These log how many components were +``outputs["n_true"]`` and ``outputs["n_false"]``. These log how many components were identified as true or false within each function. For calculation functions, the calculated values should be added as a value/key pair to diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 57b60a4d5..d620eaa30 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -134,8 +134,8 @@ def manual_classify( "decision_node_idx": selector.current_node_idx, "used_metrics": set(), "node_label": None, - "numTrue": None, - "numFalse": None, + "n_true": None, + "n_false": None, } if only_used_metrics: @@ -160,11 +160,11 @@ def manual_classify( if not comps2use: log_decision_tree_step(function_name_idx, comps2use, decide_comps=decide_comps) - outputs["numTrue"] = 0 - outputs["numFalse"] = 0 + outputs["n_true"] = 0 + outputs["n_false"] = 0 else: decision_boolean = pd.Series(True, index=comps2use) - selector, outputs["numTrue"], outputs["numFalse"] = change_comptable_classifications( + selector, outputs["n_true"], outputs["n_false"] = change_comptable_classifications( selector, ifTrue, ifFalse, @@ -176,8 +176,8 @@ def manual_classify( log_decision_tree_step( function_name_idx, comps2use, - numTrue=outputs["numTrue"], - numFalse=outputs["numFalse"], + n_true=outputs["n_true"], + n_false=outputs["n_false"], ifTrue=ifTrue, ifFalse=ifFalse, ) @@ -285,8 +285,8 @@ def dec_left_op_right( "used_metrics": set(), "used_cross_component_metrics": set(), "node_label": None, - "numTrue": None, - "numFalse": None, + "n_true": None, + "n_false": None, } function_name_idx = f"Step {selector.current_node_idx}: left_op_right" @@ -454,14 +454,14 @@ def parse_vals(val): return val # should be a fixed number if not comps2use: - outputs["numTrue"] = 0 - outputs["numFalse"] = 0 + outputs["n_true"] = 0 + outputs["n_false"] = 0 log_decision_tree_step( function_name_idx, comps2use, decide_comps=decide_comps, - ifTrue=outputs["numTrue"], - ifFalse=outputs["numFalse"], + ifTrue=outputs["n_true"], + ifFalse=outputs["n_false"], ) else: @@ -485,7 +485,7 @@ def parse_vals(val): # logical dot product for compound statement decision_boolean = statement1 * statement2 - (selector, outputs["numTrue"], outputs["numFalse"],) = change_comptable_classifications( + (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( selector, ifTrue, ifFalse, @@ -493,14 +493,14 @@ def parse_vals(val): tag_ifTrue=tag_ifTrue, tag_ifFalse=tag_ifFalse, ) - # outputs["numTrue"] = np.asarray(decision_boolean).sum() - # outputs["numFalse"] = np.logical_not(decision_boolean).sum() + # outputs["n_true"] = np.asarray(decision_boolean).sum() + # outputs["n_false"] = np.logical_not(decision_boolean).sum() log_decision_tree_step( function_name_idx, comps2use, - numTrue=outputs["numTrue"], - numFalse=outputs["numFalse"], + n_true=outputs["n_true"], + n_false=outputs["n_false"], ifTrue=ifTrue, ifFalse=ifFalse, ) @@ -565,8 +565,8 @@ def dec_variance_lessthan_thresholds( "decision_node_idx": selector.current_node_idx, "used_metrics": set([var_metric]), "node_label": None, - "numTrue": None, - "numFalse": None, + "n_true": None, + "n_false": None, } if only_used_metrics: @@ -592,14 +592,14 @@ def dec_variance_lessthan_thresholds( ) if not comps2use: - outputs["numTrue"] = 0 - outputs["numFalse"] = 0 + outputs["n_true"] = 0 + outputs["n_false"] = 0 log_decision_tree_step( function_name_idx, comps2use, decide_comps=decide_comps, - ifTrue=outputs["numTrue"], - ifFalse=outputs["numFalse"], + ifTrue=outputs["n_true"], + ifFalse=outputs["n_false"], ) else: variance = selector.component_table.loc[comps2use, var_metric] @@ -612,7 +612,7 @@ def dec_variance_lessthan_thresholds( while variance[decision_boolean].sum() > all_comp_threshold: tmpmax = variance == variance[decision_boolean].max() decision_boolean[tmpmax] = False - (selector, outputs["numTrue"], outputs["numFalse"],) = change_comptable_classifications( + (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( selector, ifTrue, ifFalse, @@ -624,8 +624,8 @@ def dec_variance_lessthan_thresholds( log_decision_tree_step( function_name_idx, comps2use, - numTrue=outputs["numTrue"], - numFalse=outputs["numFalse"], + n_true=outputs["n_true"], + n_false=outputs["n_false"], ifTrue=ifTrue, ifFalse=ifFalse, ) @@ -1052,8 +1052,8 @@ def dec_classification_doesnt_exist( "used_metrics": set(), "used_cross_comp_metrics": set(), "node_label": None, - "numTrue": None, - "numFalse": None, + "n_true": None, + "n_false": None, } if only_used_metrics: @@ -1086,20 +1086,20 @@ def dec_classification_doesnt_exist( do_comps_exist = selectcomps2use(selector, class_comp_exists) if (not comps2use) or (len(do_comps_exist) >= at_least_num_exist): - outputs["numTrue"] = 0 - # If nothing chanages, then assign the number of components in comps2use to numFalse - outputs["numFalse"] = len(comps2use) + outputs["n_true"] = 0 + # If nothing chanages, then assign the number of components in comps2use to n_false + outputs["n_false"] = len(comps2use) log_decision_tree_step( function_name_idx, comps2use, decide_comps=decide_comps, - ifTrue=outputs["numTrue"], - ifFalse=outputs["numFalse"], + ifTrue=outputs["n_true"], + ifFalse=outputs["n_false"], ) else: # do_comps_exist is None: decision_boolean = pd.Series(True, index=comps2use) - selector, outputs["numTrue"], outputs["numFalse"] = change_comptable_classifications( + selector, outputs["n_true"], outputs["n_false"] = change_comptable_classifications( selector, ifTrue, ifFalse, @@ -1110,8 +1110,8 @@ def dec_classification_doesnt_exist( log_decision_tree_step( function_name_idx, comps2use, - numTrue=outputs["numTrue"], - numFalse=outputs["numFalse"], + n_true=outputs["n_true"], + n_false=outputs["n_false"], ifTrue=ifTrue, ifFalse=ifFalse, ) diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index d8825c25a..bd5f7fc5d 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -134,7 +134,7 @@ def change_comptable_classifications( component_table["classification_tags"] will be updated to include any new tags. Each tag should appear only once in the string and tags will be separated by commas. - numTrue, numFalse : :obj:`int` + n_true, n_false : :obj:`int` The number of True and False components in decision_boolean Note @@ -163,9 +163,9 @@ def change_comptable_classifications( f"Node {selector.current_node_idx}" ] = selector.component_table["classification"] - numTrue = decision_boolean.sum() - numFalse = np.logical_not(decision_boolean).sum() - return selector, numTrue, numFalse + n_true = decision_boolean.sum() + n_false = np.logical_not(decision_boolean).sum() + return selector, n_true, n_false def comptable_classification_changer( @@ -371,8 +371,8 @@ def log_decision_tree_step( function_name_idx, comps2use, decide_comps=None, - numTrue=None, - numFalse=None, + n_true=None, + n_false=None, ifTrue=None, ifFalse=None, calc_outputs=None, @@ -397,7 +397,7 @@ def log_decision_tree_step( This is string or a list of strings describing what classifications of components to operate on. Only used in this function to report its contents if no components with these classifications were found - numTrue, numFalse : :obj:`int` + n_true, n_false : :obj:`int` The number of components classified as True or False ifTrue, ifFalse : :obj:`str` If a component is true or false, the classification to assign that @@ -425,8 +425,8 @@ def log_decision_tree_step( if ifTrue or ifFalse: LGR.info( f"{function_name_idx} applied to {len(comps2use)} components. " - f"{numTrue} True -> {ifTrue}. " - f"{numFalse} False -> {ifFalse}." + f"{n_true} True -> {ifTrue}. " + f"{n_false} False -> {ifFalse}." ) if calc_outputs: diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 7182f04ea..52cbdde8a 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -36,15 +36,15 @@ def test_manual_classify_smoke(): ) # There should be 4 selected components and component_status_table should # have a new column "Node 0" - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 4 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 4 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 0 assert f"Node {selector.current_node_idx}" in selector.component_status_table # No components with "NotALabel" classification so nothing selected and no # Node 1 column not created in component_status_table selector.current_node_idx = 1 selector = selection_nodes.manual_classify(selector, "NotAClassification", new_classification) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 0 assert f"Node {selector.current_node_idx}" not in selector.component_status_table # Changing components from "rejected" to "accepted" and suppressing warning @@ -59,7 +59,7 @@ def test_manual_classify_smoke(): tag="test tag", dont_warn_reclassify=True, ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 4 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 4 assert f"Node {selector.current_node_idx}" in selector.component_status_table @@ -98,8 +98,8 @@ def test_dec_left_op_right_succeeds(): ) # scales are set to make sure 3 components are true and 1 is false using # the sample component table - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 3 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 1 assert f"Node {selector.current_node_idx}" in selector.component_status_table # No components with "NotALabel" classification so nothing selected and no @@ -114,7 +114,7 @@ def test_dec_left_op_right_succeeds(): "kappa", "rho", ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 0 assert f"Node {selector.current_node_idx}" not in selector.component_status_table # Re-initializing selector so that it has components classificated as @@ -130,8 +130,8 @@ def test_dec_left_op_right_succeeds(): "kappa", "test_elbow", ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 3 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 1 assert f"Node {selector.current_node_idx}" in selector.component_status_table # right is a component_table_metric, left is a cross_component_metric @@ -148,8 +148,8 @@ def test_dec_left_op_right_succeeds(): "kappa", left_scale="new_cc_metric", ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 1 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 3 assert f"Node {selector.current_node_idx}" in selector.component_status_table # left component_table_metric, right is a constant integer value @@ -163,8 +163,8 @@ def test_dec_left_op_right_succeeds(): "kappa", 21, ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 3 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 1 assert f"Node {selector.current_node_idx}" in selector.component_status_table # right component_table_metric, left is a constant float value @@ -178,8 +178,8 @@ def test_dec_left_op_right_succeeds(): 21.0, "kappa", ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 1 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 3 assert f"Node {selector.current_node_idx}" in selector.component_status_table # Testing combination of two statements. kappa>21 AND rho<14 @@ -196,8 +196,8 @@ def test_dec_left_op_right_succeeds(): op2="<", right2=14, ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 2 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 2 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 2 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 2 assert f"Node {selector.current_node_idx}" in selector.component_status_table # Testing combination of three statements. kappa>21 AND rho<14 AND 'variance explained'<5 @@ -217,8 +217,8 @@ def test_dec_left_op_right_succeeds(): op3="<", right3=5, ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 1 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 3 assert f"Node {selector.current_node_idx}" in selector.component_status_table @@ -392,8 +392,8 @@ def test_dec_variance_lessthan_thresholds_smoke(): tag_ifTrue="test true tag", tag_ifFalse="test false tag", ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 1 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 1 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 3 assert f"Node {selector.current_node_idx}" in selector.component_status_table # No components with "NotALabel" classification so nothing selected and no @@ -402,7 +402,7 @@ def test_dec_variance_lessthan_thresholds_smoke(): selector = selection_nodes.dec_variance_lessthan_thresholds( selector, "accepted", "rejected", "NotAClassification" ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 0 assert f"Node {selector.current_node_idx}" not in selector.component_status_table # Running without specifying logging text generates internal text @@ -410,8 +410,8 @@ def test_dec_variance_lessthan_thresholds_smoke(): selector = selection_nodes.dec_variance_lessthan_thresholds( selector, "accepted", "rejected", decide_comps ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 4 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 4 assert f"Node {selector.current_node_idx}" in selector.component_status_table @@ -708,9 +708,9 @@ def test_dec_classification_doesnt_exist_smoke(): custom_node_label="custom label", tag="test true tag", ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 - # Lists the number of components in decide_comps in numFalse - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 17 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 0 + # Lists the number of components in decide_comps in n_false + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 17 # During normal execution, it will find provionally accepted components # and do nothing so another node isn't created assert f"Node {selector.current_node_idx}" not in selector.component_status_table @@ -725,8 +725,8 @@ def test_dec_classification_doesnt_exist_smoke(): "NotAClassification", class_comp_exists="provisional accept", ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 0 assert f"Node {selector.current_node_idx}" not in selector.component_status_table # Other normal state is to change classifications when there are @@ -742,8 +742,8 @@ def test_dec_classification_doesnt_exist_smoke(): class_comp_exists="provisional reject", tag="test true tag", ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 17 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 17 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 0 assert f"Node {selector.current_node_idx}" in selector.component_status_table # Standard execution with at_least_num_exist=5 which should trigger the @@ -760,9 +760,9 @@ def test_dec_classification_doesnt_exist_smoke(): custom_node_label="custom label", tag="test true tag", ) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numTrue"] == 17 - # Lists the number of components in decide_comps in numFalse - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["numFalse"] == 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 17 + # Lists the number of components in decide_comps in n_false + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 0 assert f"Node {selector.current_node_idx}" in selector.component_status_table diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index a8675595a..717322144 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -193,7 +193,7 @@ def test_change_comptable_classifications_succeeds(): rho = selector.component_table.loc[comps2use, "rho"] decision_boolean = rho < 13.5 - selector, numTrue, numFalse = selection_utils.change_comptable_classifications( + selector, n_true, n_false = selection_utils.change_comptable_classifications( selector, "accepted", "nochange", @@ -202,8 +202,8 @@ def test_change_comptable_classifications_succeeds(): tag_ifFalse="testing_tag2", ) - assert numTrue == 2 - assert numFalse == 2 + assert n_true == 2 + assert n_false == 2 # check every element that was supposed to change, did change changeidx = decision_boolean.index[np.asarray(decision_boolean) == True] # noqa: E712 new_vals = selector.component_table.loc[changeidx, "classification"] @@ -257,8 +257,8 @@ def test_log_decision_tree_step_smoke(): "Step 0: test_function_name", comps2use, decide_comps="reject", - numTrue=5, - numFalse=2, + n_true=5, + n_false=2, ifTrue="accept", ifFalse="reject", ) @@ -291,8 +291,8 @@ def test_log_decision_tree_step_smoke(): "Step 0: test_function_name", comps2use, decide_comps="NotALabel", - numTrue=5, - numFalse=2, + n_true=5, + n_false=2, ifTrue="accept", ifFalse="reject", ) From 09a5f11edcf71a645a0d0231cb660b4386aae4b0 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sat, 21 Jan 2023 09:32:05 -0500 Subject: [PATCH 149/177] Replace ifTrue/ifFalse with if_true/if_false. --- docs/building decision trees.rst | 4 +- .../decision_trees/invalid_kundu_bkup.json | 52 +++++++------- tedana/resources/decision_trees/kundu.json | 48 ++++++------- tedana/resources/decision_trees/minimal.json | 32 ++++----- tedana/selection/component_selector.py | 8 +-- tedana/selection/selection_nodes.py | 70 +++++++++---------- tedana/selection/selection_utils.py | 22 +++--- tedana/tests/test_component_selector.py | 8 +-- tedana/tests/test_selection_utils.py | 8 +-- 9 files changed, 126 insertions(+), 126 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index 0747741ba..7c5f6f65b 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -80,7 +80,7 @@ New columns in ``selector.component_table`` and the "ICA metrics tsv" file: of the metrics used within the node, "node_label", which is a (hopefully) human readable brief description of the node's function and, for nodes where component classifications can change, "n_false" & "n_true" list who many components - changed classifications. The inputted parameters include "ifTrue" and "ifFalse" + changed classifications. The inputted parameters include "if_true" and "if_false" which specify what changes for each component. These fields can be used to construct a visual flow chart or text-based summary of how classifications changed for each run. @@ -253,7 +253,7 @@ In addition to the intermediate and default ("accepted" "rejected" "unclassified component classifications, this can be "all" for functions that should be applied to all components regardless of their classifications. -Most decision functions also include "ifTrue" and "ifFalse" which specify how to change +Most decision functions also include "if_true" and "if_false" which specify how to change the classification of each component based on whether a the decision criterion is true or false. In addition to the default and intermediate classification options, this can also be "nochange" (i.e. For components where a>b is true, "reject". For components diff --git a/tedana/resources/decision_trees/invalid_kundu_bkup.json b/tedana/resources/decision_trees/invalid_kundu_bkup.json index 19d6c650b..e2373b4be 100644 --- a/tedana/resources/decision_trees/invalid_kundu_bkup.json +++ b/tedana/resources/decision_trees/invalid_kundu_bkup.json @@ -30,8 +30,8 @@ { "functionname": "metric1_greaterthan_metric2", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "metric1": "rho", "metric2": "kappa" @@ -45,8 +45,8 @@ { "functionname": "metric1_greaterthan_metric2", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "metric1": "countsigFS0", "metric2": "countsigFT2" @@ -60,8 +60,8 @@ { "functionname": "metric1_greaterthan_metric2", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "metric1": "dice_FS0", "metric2": "dice_FT2" @@ -75,8 +75,8 @@ { "functionname": "metric1_greaterthan_metric2", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "metric1": 0, "metric2": "signal-noise_t" @@ -90,8 +90,8 @@ { "functionname": "kappa_rho_elbow_cutoffs_kundu", "parameters": { - "ifTrue": "provisionalaccept", - "ifFalse": "provisionalreject", + "if_true": "provisionalaccept", + "if_false": "provisionalreject", "decide_comps": "unclassified", "n_echos": null }, @@ -103,8 +103,8 @@ { "functionname": "classification_exists", "parameters": { - "ifTrue": "nochange", - "ifFalse": "ignored", + "if_true": "nochange", + "if_false": "ignored", "decide_comps": [ "provisionalaccept", "provisionalreject" @@ -119,8 +119,8 @@ { "functionname": "meanmetricrank_and_variance_greaterthan_thresh", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "provisionalreject" @@ -136,8 +136,8 @@ { "functionname": "lowvariance_highmeanmetricrank_lowkappa", "parameters": { - "ifTrue": "ignored", - "ifFalse": "nochange", + "if_true": "ignored", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "provisionalreject" @@ -154,8 +154,8 @@ { "functionname": "classification_exists", "parameters": { - "ifTrue": "nochange", - "ifFalse": "accepted", + "if_true": "nochange", + "if_false": "accepted", "decide_comps": [ "provisionalaccept", "provisionalreject" @@ -170,8 +170,8 @@ { "functionname": "highvariance_highmeanmetricrank_highkapparatio", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "provisionalreject" @@ -188,8 +188,8 @@ { "functionname": "highvariance_highmeanmetricrank", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "provisionalreject" @@ -206,8 +206,8 @@ { "functionname": "highvariance_highmeanmetricrank", "parameters": { - "ifTrue": "ignored", - "ifFalse": "nochange", + "if_true": "ignored", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "provisionalreject" @@ -226,8 +226,8 @@ { "functionname": "highvariance_lowkappa", "parameters": { - "ifTrue": "ignored", - "ifFalse": "nochange", + "if_true": "ignored", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "provisionalreject" diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 7d39eeb84..a4ee05158 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -46,8 +46,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": "rho", @@ -61,8 +61,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": "countsigFS0", @@ -87,8 +87,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": "dice_FS0", @@ -105,8 +105,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": 0, @@ -147,8 +147,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "provisionalaccept", - "ifFalse": "nochange", + "if_true": "provisionalaccept", + "if_false": "nochange", "decide_comps": "unclassified", "op": ">=", "left": "kappa", @@ -161,8 +161,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "unclassified", - "ifFalse": "nochange", + "if_true": "unclassified", + "if_false": "nochange", "decide_comps": [ "provisionalaccept" ], @@ -235,8 +235,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "unclassified" @@ -258,8 +258,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "accepted", - "ifFalse": "nochange", + "if_true": "accepted", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "unclassified" @@ -311,8 +311,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "unclassified" @@ -338,8 +338,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "unclassified" @@ -376,8 +376,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "accepted", - "ifFalse": "nochange", + "if_true": "accepted", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "unclassified" @@ -398,8 +398,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "accepted", - "ifFalse": "nochange", + "if_true": "accepted", + "if_false": "nochange", "decide_comps": [ "provisionalaccept", "unclassified" diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index 52b9177c5..eb247305e 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -38,8 +38,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": "rho", @@ -53,8 +53,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": "countsigFS0", @@ -79,8 +79,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": "dice_FS0", @@ -97,8 +97,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": 0, @@ -139,8 +139,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "provisionalaccept", - "ifFalse": "provisionalreject", + "if_true": "provisionalaccept", + "if_false": "provisionalreject", "decide_comps": "unclassified", "op": ">=", "left": "kappa", @@ -153,8 +153,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "accepted", - "ifFalse": "nochange", + "if_true": "accepted", + "if_false": "nochange", "decide_comps": "provisionalaccept", "op": ">", "left": "kappa", @@ -170,8 +170,8 @@ { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "provisionalreject", - "ifFalse": "nochange", + "if_true": "provisionalreject", + "if_false": "nochange", "decide_comps": [ "provisionalreject", "provisionalaccept" @@ -187,8 +187,8 @@ { "functionname": "dec_variance_lessthan_thresholds", "parameters": { - "ifTrue": "accepted", - "ifFalse": "nochange", + "if_true": "accepted", + "if_false": "nochange", "decide_comps": "provisionalreject" }, "kwargs": { diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index cb083174e..ffa9eb210 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -178,13 +178,13 @@ def validate_tree(tree): # "provisionalaccepted" they won't be included and there might not # be any other warnings compclass = set() - if "ifTrue" in node.get("parameters").keys(): - tmp_comp = node["parameters"]["ifTrue"] + if "if_true" in node.get("parameters").keys(): + tmp_comp = node["parameters"]["if_true"] if isinstance(tmp_comp, str): tmp_comp = [tmp_comp] compclass = compclass | set(tmp_comp) - if "ifFalse" in node.get("parameters").keys(): - tmp_comp = node["parameters"]["ifFalse"] + if "if_false" in node.get("parameters").keys(): + tmp_comp = node["parameters"]["if_false"] if isinstance(tmp_comp, str): tmp_comp = [tmp_comp] compclass = compclass | set(tmp_comp) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index d620eaa30..4b96713ff 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -28,10 +28,10 @@ selector : :obj:`tedana.selection.component_selector.ComponentSelector` The selector to perform decision tree-based component selection with.""", "ifTrueFalse": """\ -ifTrue : :obj:`str` +if_true : :obj:`str` If the condition in this step is True, give the component classification this label. Use 'nochange' if no label changes are desired. - ifFalse : :obj:`str` + if_false : :obj:`str` If the condition in this step is False, give the component classification this label. Use 'nochange' to indicate if no label changes are desired. """, @@ -125,7 +125,7 @@ def manual_classify( penultimate node in the decision tree. (3) Manually re-classify components by number based on user observations. - Unlike other decision node functions, ``ifTrue`` and ``ifFalse`` are not inputs + Unlike other decision node functions, ``if_true`` and ``if_false`` are not inputs since the same classification is assigned to all components listed in ``decide_comps``. """ @@ -141,8 +141,8 @@ def manual_classify( if only_used_metrics: return outputs["used_metrics"] - ifTrue = new_classification - ifFalse = "nochange" + if_true = new_classification + if_false = "nochange" function_name_idx = "Step {}: manual_classify".format((selector.current_node_idx)) if custom_node_label: @@ -166,8 +166,8 @@ def manual_classify( decision_boolean = pd.Series(True, index=comps2use) selector, outputs["n_true"], outputs["n_false"] = change_comptable_classifications( selector, - ifTrue, - ifFalse, + if_true, + if_false, decision_boolean, tag_ifTrue=tag, dont_warn_reclassify=dont_warn_reclassify, @@ -178,8 +178,8 @@ def manual_classify( comps2use, n_true=outputs["n_true"], n_false=outputs["n_false"], - ifTrue=ifTrue, - ifFalse=ifFalse, + if_true=if_true, + if_false=if_false, ) if clear_classification_tags: @@ -196,8 +196,8 @@ def manual_classify( def dec_left_op_right( selector, - ifTrue, - ifFalse, + if_true, + if_false, decide_comps, op, left, @@ -436,7 +436,7 @@ def operator_scale_descript(val_scale, val): # Might want to add additional default logging to functions here # The function input will be logged before the function call - LGR.info(f"{function_name_idx}: {ifTrue} if {outputs['node_label']}, else {ifFalse}") + LGR.info(f"{function_name_idx}: {if_true} if {outputs['node_label']}, else {if_false}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: @@ -460,8 +460,8 @@ def parse_vals(val): function_name_idx, comps2use, decide_comps=decide_comps, - ifTrue=outputs["n_true"], - ifFalse=outputs["n_false"], + if_true=outputs["n_true"], + if_false=outputs["n_false"], ) else: @@ -487,8 +487,8 @@ def parse_vals(val): (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( selector, - ifTrue, - ifFalse, + if_true, + if_false, decision_boolean, tag_ifTrue=tag_ifTrue, tag_ifFalse=tag_ifFalse, @@ -501,8 +501,8 @@ def parse_vals(val): comps2use, n_true=outputs["n_true"], n_false=outputs["n_false"], - ifTrue=ifTrue, - ifFalse=ifFalse, + if_true=if_true, + if_false=if_false, ) selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs @@ -515,8 +515,8 @@ def parse_vals(val): def dec_variance_lessthan_thresholds( selector, - ifTrue, - ifFalse, + if_true, + if_false, decide_comps, var_metric="variance explained", single_comp_threshold=0.1, @@ -580,7 +580,7 @@ def dec_variance_lessthan_thresholds( "node_label" ] = f"{var_metric}<{single_comp_threshold}. All variance<{all_comp_threshold}" - LGR.info(f"{function_name_idx}: {ifTrue} if {outputs['node_label']}, else {ifFalse}") + LGR.info(f"{function_name_idx}: {if_true} if {outputs['node_label']}, else {if_false}") if log_extra_info: LGR.info(f"{function_name_idx} {log_extra_info}") if log_extra_report: @@ -598,8 +598,8 @@ def dec_variance_lessthan_thresholds( function_name_idx, comps2use, decide_comps=decide_comps, - ifTrue=outputs["n_true"], - ifFalse=outputs["n_false"], + if_true=outputs["n_true"], + if_false=outputs["n_false"], ) else: variance = selector.component_table.loc[comps2use, var_metric] @@ -614,8 +614,8 @@ def dec_variance_lessthan_thresholds( decision_boolean[tmpmax] = False (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( selector, - ifTrue, - ifFalse, + if_true, + if_false, decision_boolean, tag_ifTrue=tag_ifTrue, tag_ifFalse=tag_ifFalse, @@ -626,8 +626,8 @@ def dec_variance_lessthan_thresholds( comps2use, n_true=outputs["n_true"], n_false=outputs["n_false"], - ifTrue=ifTrue, - ifFalse=ifFalse, + if_true=if_true, + if_false=if_false, ) selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs @@ -1078,8 +1078,8 @@ def dec_classification_doesnt_exist( if log_extra_report: RepLGR.info(log_extra_report) - ifTrue = new_classification - ifFalse = "nochange" + if_true = new_classification + if_false = "nochange" comps2use = selectcomps2use(selector, decide_comps) @@ -1093,16 +1093,16 @@ def dec_classification_doesnt_exist( function_name_idx, comps2use, decide_comps=decide_comps, - ifTrue=outputs["n_true"], - ifFalse=outputs["n_false"], + if_true=outputs["n_true"], + if_false=outputs["n_false"], ) else: # do_comps_exist is None: decision_boolean = pd.Series(True, index=comps2use) selector, outputs["n_true"], outputs["n_false"] = change_comptable_classifications( selector, - ifTrue, - ifFalse, + if_true, + if_false, decision_boolean, tag_ifTrue=tag, ) @@ -1112,8 +1112,8 @@ def dec_classification_doesnt_exist( comps2use, n_true=outputs["n_true"], n_false=outputs["n_false"], - ifTrue=ifTrue, - ifFalse=ifFalse, + if_true=if_true, + if_false=if_false, ) selector.tree["nodes"][selector.current_node_idx]["outputs"] = outputs diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index bd5f7fc5d..bf919c22a 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -89,8 +89,8 @@ def selectcomps2use(selector, decide_comps): def change_comptable_classifications( selector, - ifTrue, - ifFalse, + if_true, + if_false, decision_boolean, tag_ifTrue=None, tag_ifFalse=None, @@ -105,7 +105,7 @@ def change_comptable_classifications( selector : :obj:`tedana.selection.component_selector.ComponentSelector` The attributes used are component_table, component_status_table, and current_node_idx - ifTrue, ifFalse : :obj:`str` + if_true, if_false : :obj:`str` If the condition in this step is true or false, give the component the label in this string. Options are 'accepted', 'rejected', 'nochange', or intermediate_classification labels predefined in the @@ -145,7 +145,7 @@ def change_comptable_classifications( selector = comptable_classification_changer( selector, True, - ifTrue, + if_true, decision_boolean, tag_if=tag_ifTrue, dont_warn_reclassify=dont_warn_reclassify, @@ -153,7 +153,7 @@ def change_comptable_classifications( selector = comptable_classification_changer( selector, False, - ifFalse, + if_false, decision_boolean, tag_if=tag_ifFalse, dont_warn_reclassify=dont_warn_reclassify, @@ -373,8 +373,8 @@ def log_decision_tree_step( decide_comps=None, n_true=None, n_false=None, - ifTrue=None, - ifFalse=None, + if_true=None, + if_false=None, calc_outputs=None, ): """Logging text to add after every decision tree calculation @@ -399,7 +399,7 @@ def log_decision_tree_step( its contents if no components with these classifications were found n_true, n_false : :obj:`int` The number of components classified as True or False - ifTrue, ifFalse : :obj:`str` + if_true, if_false : :obj:`str` If a component is true or false, the classification to assign that component calc_outputs : :obj:`dict` @@ -422,11 +422,11 @@ def log_decision_tree_step( f"classified as {decide_comps}" ) - if ifTrue or ifFalse: + if if_true or if_false: LGR.info( f"{function_name_idx} applied to {len(comps2use)} components. " - f"{n_true} True -> {ifTrue}. " - f"{n_false} False -> {ifFalse}." + f"{n_true} True -> {if_true}. " + f"{n_false} False -> {if_false}." ) if calc_outputs: diff --git a/tedana/tests/test_component_selector.py b/tedana/tests/test_component_selector.py index 487119c74..fe5c622a3 100644 --- a/tedana/tests/test_component_selector.py +++ b/tedana/tests/test_component_selector.py @@ -59,8 +59,8 @@ def dicts_to_test(treechoice): { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "rejected", - "ifFalse": "nochange", + "if_true": "rejected", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": "rho", @@ -74,8 +74,8 @@ def dicts_to_test(treechoice): { "functionname": "dec_left_op_right", "parameters": { - "ifTrue": "random2", - "ifFalse": "nochange", + "if_true": "random2", + "if_false": "nochange", "decide_comps": "all", "op": ">", "left": "kappa", diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index 717322144..fa099c580 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -259,8 +259,8 @@ def test_log_decision_tree_step_smoke(): decide_comps="reject", n_true=5, n_false=2, - ifTrue="accept", - ifFalse="reject", + if_true="accept", + if_false="reject", ) # Standard use for logging cross_component_metric calculation @@ -293,8 +293,8 @@ def test_log_decision_tree_step_smoke(): decide_comps="NotALabel", n_true=5, n_false=2, - ifTrue="accept", - ifFalse="reject", + if_true="accept", + if_false="reject", ) From d0b66391e60484a8611ad24f6ea833e318b5706d Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sat, 21 Jan 2023 10:10:18 -0500 Subject: [PATCH 150/177] Use fill_doc. --- tedana/docs.py | 148 ++++++++++++ tedana/metrics/collect.py | 4 +- tedana/selection/selection_nodes.py | 338 +++++++++++----------------- 3 files changed, 284 insertions(+), 206 deletions(-) create mode 100644 tedana/docs.py diff --git a/tedana/docs.py b/tedana/docs.py new file mode 100644 index 000000000..4058c9407 --- /dev/null +++ b/tedana/docs.py @@ -0,0 +1,148 @@ +"""Documentation to be injected into docstrings.""" +import sys + +################################### +# Standard documentation entries +docdict = dict() + +docdict["selector"] = """ +selector : :obj:`tedana.selection.component_selector.ComponentSelector` + The selector to perform decision tree-based component selection with. +""" + +docdict["if_true"] = """ +if_true : :obj:`str` + If the condition in this step is True, give the component classification this + label. Use 'nochange' if no label changes are desired. +""" + +docdict["if_false"] = """ +if_false : :obj:`str` + If the condition in this step is False, give the component classification this + label. Use 'nochange' to indicate if no label changes are desired. +""" + +docdict["decide_comps"] = """ +decide_comps : :obj:`str` or :obj:`list[str]` + What classification(s) to operate on. using default or + intermediate_classification labels. For example: decide_comps='unclassified' + means to operate only on unclassified components. Use 'all' to include all + components. +""" + +docdict["log_extra_report"] = """ +log_extra_report : :obj:`str` + Additional text to the report log. Default="". +""" + +docdict["log_extra_info"] = """ +log_extra_info : :obj:`str` + Additional text to the information log. Default="". +""" + +docdict["only_used_metrics"] = """ +only_used_metrics : :obj:`bool` + If True, only return the component_table metrics that would be used. Default=False. +""" + +docdict["custom_node_label"] = """ +custom_node_label : :obj:`str` + A short label to describe what happens in this step. If "" then a label is + automatically generated. Default="". +""" + +docdict["tag_ifTrue"] = """ +tag_ifTrue : :obj:`str` + The classification tag to apply if a component is classified True. Default="". +""" + +docdict["tag_ifFalse"] = """ +tag_ifFalse : :obj:`str` + The classification tag to apply if a component is classified False. Default="". +""" + +docdict["selector"] = """ +selector : :obj:`~tedana.selection.component_selector.ComponentSelector` + If only_used_metrics is False, the updated selector is returned. +""" + +docdict["used_metrics"] = """ +used_metrics : :obj:`set(str)` + If only_used_metrics is True, the names of the metrics used in the + function are returned. +""" + +docdict_indented = {} + + +def _indentcount_lines(lines): + """Minimum indent for all lines in line list. + + >>> lines = [' one', ' two', ' three'] + >>> _indentcount_lines(lines) + 1 + >>> lines = [] + >>> _indentcount_lines(lines) + 0 + >>> lines = [' one'] + >>> _indentcount_lines(lines) + 1 + >>> _indentcount_lines([' ']) + 0 + + """ + indentno = sys.maxsize + for line in lines: + stripped = line.lstrip() + if stripped: + indentno = min(indentno, len(line) - len(stripped)) + if indentno == sys.maxsize: + return 0 + return indentno + + +def fill_doc(f): + """Fill a docstring with docdict entries. + + Parameters + ---------- + f : callable + The function to fill the docstring of. Will be modified in place. + + Returns + ------- + f : callable + The function, potentially with an updated ``__doc__``. + + """ + docstring = f.__doc__ + if not docstring: + return f + lines = docstring.splitlines() + # Find the minimum indent of the main docstring, after first line + if len(lines) < 2: + icount = 0 + else: + icount = _indentcount_lines(lines[1:]) + # Insert this indent to dictionary docstrings + try: + indented = docdict_indented[icount] + except KeyError: + indent = " " * icount + docdict_indented[icount] = indented = {} + for name, dstr in docdict.items(): + lines = dstr.splitlines() + try: + newlines = [lines[0]] + for line in lines[1:]: + newlines.append(indent + line) + indented[name] = "\n".join(newlines) + except IndexError: + indented[name] = dstr + try: + f.__doc__ = docstring % indented + except (TypeError, ValueError, KeyError) as exp: + funcname = f.__name__ + funcname = docstring.split("\n")[0] if funcname is None else funcname + raise RuntimeError(f"Error documenting {funcname}:\n{str(exp)}") + return f diff --git a/tedana/metrics/collect.py b/tedana/metrics/collect.py index c8cd8b076..ce831be64 100644 --- a/tedana/metrics/collect.py +++ b/tedana/metrics/collect.py @@ -8,8 +8,8 @@ from tedana import io, utils from tedana.stats import getfbounds -from . import dependence -from ._utils import dependency_resolver, determine_signs, flip_components +from tedana.metrics import dependence +from tedana.metrics._utils import dependency_resolver, determine_signs, flip_components LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 4b96713ff..ceee39020 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -1,6 +1,4 @@ -""" -Functions that will be used as steps in a decision tree -""" +"""Functions that will be used as steps in a decision tree.""" import logging import numpy as np @@ -17,57 +15,15 @@ rho_elbow_kundu_liberal, selectcomps2use, ) +from tedana.docs import fill_doc LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") RefLGR = logging.getLogger("REFERENCES") -DECISION_DOCS = { - "selector": """\ -selector : :obj:`tedana.selection.component_selector.ComponentSelector` - The selector to perform decision tree-based component selection with.""", - "ifTrueFalse": """\ -if_true : :obj:`str` - If the condition in this step is True, give the component classification this - label. Use 'nochange' if no label changes are desired. - if_false : :obj:`str` - If the condition in this step is False, give the component classification this - label. Use 'nochange' to indicate if no label changes are desired. -""", - "decide_comps": """\ -decide_comps : :obj:`str` or :obj:`list[str]` - What classification(s) to operate on. using default or - intermediate_classification labels. For example: decide_comps='unclassified' - means to operate only on unclassified components. Use 'all' to include all - components.""", - "log_extra_report": """\ -log_extra_report : :obj:`str` - Additional text to the report log. Default="".""", - "log_extra_info": """\ -log_extra_info : :obj:`str` - Additional text to the information log. Default="".""", - "only_used_metrics": """\ -only_used_metrics : :obj:`bool` - If True, only return the component_table metrics that would be used. Default=False.""", - "custom_node_label": """\ -custom_node_label : :obj:`str` - A short label to describe what happens in this step. If "" then a label is - automatically generated. Default="".""", - "tag_ifTrueFalse": """\ -tag_ifTrue : :obj:`str` - The classification tag to apply if a component is classified True. Default="". - tag_ifFalse : :obj:`str` - The classification tag to apply if a component is classified False. Default="".""", - "basicreturns": """\ -selector : :obj:`tedana.selection.component_selector.ComponentSelector` - If only_used_metrics is False, the updated selector is returned - used_metrics : :obj:`set(str)` - If only_used_metrics is True, the names of the metrics used in the - function are returned""", -} - +@fill_doc def manual_classify( selector, decide_comps, @@ -80,13 +36,12 @@ def manual_classify( tag=None, dont_warn_reclassify=False, ): - """Assign a classification, defined in ``new_classification``, to the components in - ``decide_comps``. + """Assign a classification defined in new_classification to the components in decide_comps. Parameters ---------- - {selector} - {decide_comps} + %(selector)s + %(decide_comps)s new_classification : :obj:`str` Assign all components identified in decide_comps the classification in new_classification. Options are 'unclassified', 'accepted', @@ -106,14 +61,15 @@ def manual_classify( classifications. If this is True, that warning is suppressed. (Useful if manual_classify is used to reset all labels to unclassified). Default=False - {log_extra_info} - {log_extra_report} - {custom_node_label} - {only_used_metrics} + %(log_extra_info)s + %(log_extra_report)s + %(custom_node_label)s + %(only_used_metrics)s Returns ------- - {basicreturns} + %(selector)s + %(used_metrics)s Note ---- @@ -191,9 +147,7 @@ def manual_classify( return selector -manual_classify.__doc__ = manual_classify.__doc__.format(**DECISION_DOCS) - - +@fill_doc def dec_left_op_right( selector, if_true, @@ -225,9 +179,10 @@ def dec_left_op_right( Parameters ---------- - {selector} - {ifTrueFalse} - {decide_comps} + %(selector)s + %(tag_ifTrue)s + %(tag_ifFalse)s + %(decide_comps)s op: :obj:`str` Must be one of: ">", ">=", "==", "<=", "<" Applied the user defined operator to left op right @@ -255,15 +210,17 @@ def dec_left_op_right( (left_scale*)left op (right_scale*right) AND (left2_scale*)left2 op2 (right2_scale*right2) if the "3" parameters are also defined then it's the intersection of all 3 statements - {log_extra_info} - {log_extra_report} - {custom_node_label} - {only_used_metrics} - {tag_ifTrueFalse} + %(log_extra_info)s + %(log_extra_report)s + %(custom_node_label)s + %(only_used_metrics)s + %(tag_ifTrue)s + %(tag_ifFalse)s Returns ------- - {basicreturns} + %(selector)s + %(used_metrics)s Note ---- @@ -510,9 +467,7 @@ def parse_vals(val): return selector -dec_left_op_right.__doc__ = dec_left_op_right.__doc__.format(**DECISION_DOCS) - - +@fill_doc def dec_variance_lessthan_thresholds( selector, if_true, @@ -528,8 +483,8 @@ def dec_variance_lessthan_thresholds( tag_ifTrue=None, tag_ifFalse=None, ): - """ - Change classifications for components with variance Date: Sat, 21 Jan 2023 10:17:21 -0500 Subject: [PATCH 151/177] Style fixes. --- docs/building decision trees.rst | 2 +- tedana/docs.py | 8 +-- tedana/resources/decision_trees/kundu.json | 20 +++---- tedana/resources/decision_trees/minimal.json | 12 ++-- tedana/selection/component_selector.py | 29 ++++----- tedana/selection/selection_nodes.py | 62 +++++++------------- tedana/selection/selection_utils.py | 20 +++---- tedana/tests/test_component_selector.py | 8 +-- tedana/tests/test_selection_nodes.py | 8 +-- tedana/tests/test_selection_utils.py | 4 +- 10 files changed, 76 insertions(+), 97 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index 7c5f6f65b..bf7701ab1 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -257,7 +257,7 @@ Most decision functions also include "if_true" and "if_false" which specify how the classification of each component based on whether a the decision criterion is true or false. In addition to the default and intermediate classification options, this can also be "nochange" (i.e. For components where a>b is true, "reject". For components -where a>b is false, "nochange"). The optional parameters "tag_ifTrue" and "tag_ifFalse" +where a>b is false, "nochange"). The optional parameters "tag_if_true" and "tag_if_false" define the classification tags to be assigned to components. Currently, the only exceptions are "manual_classify" and "dec_classification_doesnt_exist" which use "new_classification" to designate the new component classification and "tag" (optional) diff --git a/tedana/docs.py b/tedana/docs.py index 4058c9407..86abec705 100644 --- a/tedana/docs.py +++ b/tedana/docs.py @@ -51,13 +51,13 @@ automatically generated. Default="". """ -docdict["tag_ifTrue"] = """ -tag_ifTrue : :obj:`str` +docdict["tag_if_true"] = """ +tag_if_true : :obj:`str` The classification tag to apply if a component is classified True. Default="". """ -docdict["tag_ifFalse"] = """ -tag_ifFalse : :obj:`str` +docdict["tag_if_false"] = """ +tag_if_false : :obj:`str` The classification tag to apply if a component is classified False. Default="". """ diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index a4ee05158..9ed9c57ec 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -54,7 +54,7 @@ "right": "kappa" }, "kwargs": { - "tag_ifTrue": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD" }, "_comment": "Code I002 in premodularized tedana" }, @@ -72,7 +72,7 @@ "left2": "countsigFT2", "op2": ">", "right2": 0, - "tag_ifTrue": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD" }, "_comment": "Code I003 in premodularized tedana" }, @@ -98,7 +98,7 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "tag_ifTrue": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD" }, "_comment": "Code I004 in premodularized tedana" }, @@ -116,7 +116,7 @@ "left2": "variance explained", "op2": ">", "right2": "median_varex", - "tag_ifTrue": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD" }, "_comment": "Code I005 in premodularized tedana" }, @@ -251,7 +251,7 @@ "right2": "varex_upper_thresh", "right2_scale": "extend_factor", "log_extra_info": "If variance and d_table_scores are high, then reject", - "tag_ifTrue": "Less likely BOLD" + "tag_if_true": "Less likely BOLD" }, "_comment": "Code I007 in premodularized tedana. One of several steps that makes it more likely to reject high variance components" }, @@ -269,7 +269,7 @@ "right": "max_good_meanmetricrank" }, "kwargs": { - "tag_ifTrue": "Low variance", + "tag_if_true": "Low variance", "op2": "<=", "left2": "variance explained", "right2": "varex_lower_thresh", @@ -322,7 +322,7 @@ "right": "conservative_guess" }, "kwargs": { - "tag_ifTrue": "Less likely BOLD", + "tag_if_true": "Less likely BOLD", "op2": ">", "left2": "varex kappa ratio", "right2": "extend_factor", @@ -349,7 +349,7 @@ "right": "num_acc_guess" }, "kwargs": { - "tag_ifTrue": "Less likely BOLD", + "tag_if_true": "Less likely BOLD", "right_scale": 0.9, "op2": ">", "left2": "variance explained", @@ -387,7 +387,7 @@ "right": "num_acc_guess" }, "kwargs": { - "tag_ifTrue": "Accept borderline", + "tag_if_true": "Accept borderline", "op2": ">", "left2": "variance explained", "right2": "varex_new_lower_thresh", @@ -409,7 +409,7 @@ "right": "kappa_elbow_kundu" }, "kwargs": { - "tag_ifTrue": "Accept borderline", + "tag_if_true": "Accept borderline", "op2": ">", "left2": "variance explained", "right2": "varex_new_lower_thresh", diff --git a/tedana/resources/decision_trees/minimal.json b/tedana/resources/decision_trees/minimal.json index eb247305e..8ce4ff68a 100644 --- a/tedana/resources/decision_trees/minimal.json +++ b/tedana/resources/decision_trees/minimal.json @@ -47,7 +47,7 @@ }, "kwargs": { "log_extra_report": "", - "tag_ifTrue": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD" } }, { @@ -65,7 +65,7 @@ "op2": ">", "right2": 0, "log_extra_report": "", - "tag_ifTrue": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD" } }, { @@ -91,7 +91,7 @@ "op2": ">", "right2": "median_varex", "log_extra_report": "", - "tag_ifTrue": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD" } }, { @@ -109,7 +109,7 @@ "op2": ">", "right2": "median_varex", "log_extra_report": "", - "tag_ifTrue": "Unlikely BOLD" + "tag_if_true": "Unlikely BOLD" } }, { @@ -164,7 +164,7 @@ "log_extra_info": "If kappa>elbow and kappa>2*rho accept even if rho>elbow", "log_extra_report": "", "right_scale": 2, - "tag_ifTrue": "Likely BOLD" + "tag_if_true": "Likely BOLD" } }, { @@ -197,7 +197,7 @@ "log_extra_report": "", "single_comp_threshold": 0.1, "all_comp_threshold": 1.0, - "tag_ifTrue": "Low variance" + "tag_if_true": "Low variance" } }, { diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index ffa9eb210..0e82db931 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -138,9 +138,7 @@ def validate_tree(tree): fcn = getattr(selection_nodes, node.get("functionname")) sig = inspect.signature(fcn) except (AttributeError, TypeError): - err_msg += "Node {} has invalid functionname parameter: {}\n".format( - i, node.get("functionname") - ) + err_msg += f"Node {i} has invalid functionname parameter: {node.get('functionname')}\n" continue # Get a functions parameters and compare to parameters defined in the tree @@ -149,12 +147,12 @@ def validate_tree(tree): missing_pos = pos - set(node.get("parameters").keys()) - defaults if len(missing_pos) > 0: - err_msg += "Node {} is missing required parameter(s): {}\n".format(i, missing_pos) + err_msg += f"Node {i} is missing required parameter(s): {missing_pos}\n" invalid_params = set(node.get("parameters").keys()) - pos if len(invalid_params) > 0: - err_msg += "Node {} has additional, undefined required parameters: {}\n".format( - i, invalid_params + err_msg += ( + f"Node {i} has additional, undefined required parameters: {invalid_params}\n" ) # Only if kwargs are inputted, make sure they are all valid @@ -162,9 +160,8 @@ def validate_tree(tree): invalid_kwargs = set(node.get("kwargs").keys()) - kwargs if len(invalid_kwargs) > 0: err_msg += ( - "Node {} has additional, undefined optional parameters (kwargs): {}\n".format( - i, invalid_kwargs - ) + f"Node {i} has additional, undefined optional parameters (kwargs): " + f"{invalid_kwargs}\n" ) # Gather all the classification labels used in each tree both for @@ -205,10 +202,10 @@ def validate_tree(tree): if node.get("kwargs") is not None: tagset = set() - if "tag_ifTrue" in node.get("kwargs").keys(): - tagset.update(set([node["kwargs"]["tag_ifTrue"]])) - if "tag_ifFalse" in node.get("kwargs").keys(): - tagset.update(set([node["kwargs"]["tag_ifFalse"]])) + if "tag_if_true" in node.get("kwargs").keys(): + tagset.update(set([node["kwargs"]["tag_if_true"]])) + if "tag_if_false" in node.get("kwargs").keys(): + tagset.update(set([node["kwargs"]["tag_if_false"]])) if "tag" in node.get("kwargs").keys(): tagset.update(set([node["kwargs"]["tag"]])) undefined_classification_tags = tagset.difference(set(tree.get("classification_tags"))) @@ -454,9 +451,9 @@ def check_null(self, params, fcn): params[key] = getattr(self, key) except AttributeError: raise ValueError( - "Parameter {} is required in node {}, but not defined. ".format(key, fcn) - + "If {} is dataset specific, it should be " - "defined in the ".format(key) + " initialization of " + f"Parameter {key} is required in node {fcn}, but not defined. " + f"If {key} is dataset specific, it should be " + "defined in the initialization of " "ComponentSelector. If it is fixed regardless of dataset, it " "should be defined in the json file that defines the " "decision tree." diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index ceee39020..0bd2b13f2 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -5,6 +5,7 @@ import pandas as pd from scipy.stats import scoreatpercentile +from tedana.docs import fill_doc from tedana.metrics.dependence import generate_decision_table_score from tedana.selection.selection_utils import ( change_comptable_classifications, @@ -15,8 +16,6 @@ rho_elbow_kundu_liberal, selectcomps2use, ) -from tedana.docs import fill_doc - LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") @@ -84,7 +83,6 @@ def manual_classify( Unlike other decision node functions, ``if_true`` and ``if_false`` are not inputs since the same classification is assigned to all components listed in ``decide_comps``. """ - # predefine all outputs that should be logged outputs = { "decision_node_idx": selector.current_node_idx, @@ -100,7 +98,7 @@ def manual_classify( if_true = new_classification if_false = "nochange" - function_name_idx = "Step {}: manual_classify".format((selector.current_node_idx)) + function_name_idx = f"Step {selector.current_node_idx}: manual_classify" if custom_node_label: outputs["node_label"] = custom_node_label else: @@ -125,7 +123,7 @@ def manual_classify( if_true, if_false, decision_boolean, - tag_ifTrue=tag, + tag_if_true=tag, dont_warn_reclassify=dont_warn_reclassify, ) @@ -172,21 +170,20 @@ def dec_left_op_right( log_extra_info="", custom_node_label="", only_used_metrics=False, - tag_ifTrue=None, - tag_ifFalse=None, + tag_if_true=None, + tag_if_false=None, ): """Perform a relational comparison. Parameters ---------- %(selector)s - %(tag_ifTrue)s - %(tag_ifFalse)s + %(tag_if_true)s + %(tag_if_false)s %(decide_comps)s op: :obj:`str` Must be one of: ">", ">=", "==", "<=", "<" Applied the user defined operator to left op right - left, right: :obj:`str` or :obj:`float` The labels for the two metrics to be used for comparision. For example: left='kappa', right='rho' and op='>' means this @@ -209,13 +206,12 @@ def dec_left_op_right( this function returns (left_scale*)left op (right_scale*right) AND (left2_scale*)left2 op2 (right2_scale*right2) if the "3" parameters are also defined then it's the intersection of all 3 statements - %(log_extra_info)s %(log_extra_report)s %(custom_node_label)s %(only_used_metrics)s - %(tag_ifTrue)s - %(tag_ifFalse)s + %(tag_if_true)s + %(tag_if_false)s Returns ------- @@ -235,7 +231,6 @@ def dec_left_op_right( an intentional decision because, if a classification changes if A>B or C>D are true then A>B and C>D should be logged separately """ - # predefine all outputs that should be logged outputs = { "decision_node_idx": selector.current_node_idx, @@ -447,8 +442,8 @@ def parse_vals(val): if_true, if_false, decision_boolean, - tag_ifTrue=tag_ifTrue, - tag_ifFalse=tag_ifFalse, + tag_if_true=tag_if_true, + tag_if_false=tag_if_false, ) # outputs["n_true"] = np.asarray(decision_boolean).sum() # outputs["n_false"] = np.logical_not(decision_boolean).sum() @@ -480,8 +475,8 @@ def dec_variance_lessthan_thresholds( log_extra_info="", custom_node_label="", only_used_metrics=False, - tag_ifTrue=None, - tag_ifFalse=None, + tag_if_true=None, + tag_if_false=None, ): """Change classifications for components with varianceRho", "log_extra_report": "", # Warning for an non-predefined classification assigned to a component - "tag_ifTrue": "random2notpredefined", + "tag_if_true": "random2notpredefined", }, }, { @@ -215,8 +215,8 @@ def test_validate_tree_succeeds(): if "/minimal.json" in tree_name: # Should remove/ignore the "reconstruct_from" key during validation tree["reconstruct_from"] = "testinput" - # Need to test handling of the tag_ifFalse kwarg somewhere - tree["nodes"][1]["kwargs"]["tag_ifFalse"] = "testing tag" + # Need to test handling of the tag_if_false kwarg somewhere + tree["nodes"][1]["kwargs"]["tag_if_false"] = "testing tag" assert component_selector.validate_tree(tree) diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 52cbdde8a..26a716b41 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -93,8 +93,8 @@ def test_dec_left_op_right_succeeds(): log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", - tag_ifTrue="test true tag", - tag_ifFalse="test false tag", + tag_if_true="test true tag", + tag_if_false="test false tag", ) # scales are set to make sure 3 components are true and 1 is false using # the sample component table @@ -389,8 +389,8 @@ def test_dec_variance_lessthan_thresholds_smoke(): log_extra_report="report log", log_extra_info="info log", custom_node_label="custom label", - tag_ifTrue="test true tag", - tag_ifFalse="test false tag", + tag_if_true="test true tag", + tag_if_false="test false tag", ) assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 1 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 3 diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index fa099c580..c356e95ec 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -198,8 +198,8 @@ def test_change_comptable_classifications_succeeds(): "accepted", "nochange", decision_boolean, - tag_ifTrue="testing_tag1", - tag_ifFalse="testing_tag2", + tag_if_true="testing_tag1", + tag_if_false="testing_tag2", ) assert n_true == 2 From 1f7c07a44de52201178b216f7c8e0e9baa9adcee Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 2 Feb 2023 15:50:35 -0500 Subject: [PATCH 152/177] more int32 --- tedana/io.py | 2 +- tedana/selection/selection_utils.py | 2 +- tedana/tests/test_selection_utils.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index 7d1d32e42..8c2f10763 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -41,7 +41,7 @@ class CustomEncoder(json.JSONEncoder): def default(self, obj): # int64 non-serializable but is a numpy output - if isinstance(obj, np.integer): + if isinstance(obj, np.int32): return int(obj) # containers that are not serializable diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index c1ad69642..2455cb6a8 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -513,7 +513,7 @@ def getelbow_cons(arr, return_val=False): (arr[nk - 5 - ii - 1] > arr[nk - 5 - ii : nk].mean() + 2 * arr[nk - 5 - ii : nk].std()) for ii in range(nk - 5) ] - ds = np.array(temp1[::-1], dtype=np.integer) + ds = np.array(temp1[::-1], dtype=np.int32) dsum = [] c_ = 0 for d_ in ds: diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index a8675595a..9d78c6a2a 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -315,7 +315,7 @@ def test_getelbow_smoke(): """A smoke test for the getelbow function.""" arr = np.random.random(100) idx = selection_utils.getelbow(arr) - assert isinstance(idx, np.integer) + assert isinstance(idx, np.int32) val = selection_utils.getelbow(arr, return_val=True) assert isinstance(val, float) @@ -335,7 +335,7 @@ def test_getelbow_cons_smoke(): """A smoke test for the getelbow_cons function.""" arr = np.random.random(100) idx = selection_utils.getelbow_cons(arr) - assert isinstance(idx, np.integer) + assert isinstance(idx, np.int32) val = selection_utils.getelbow_cons(arr, return_val=True) assert isinstance(val, float) From 149ec28f9219778ce2b439c1ae6ab8c3fea1006c Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Fri, 3 Feb 2023 16:05:37 -0500 Subject: [PATCH 153/177] more int32 fun --- tedana/io.py | 2 +- tedana/tests/test_selection_utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tedana/io.py b/tedana/io.py index 603236019..74dfb839d 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -40,7 +40,7 @@ class CustomEncoder(json.JSONEncoder): def default(self, obj): # int64 non-serializable but is a numpy output - if isinstance(obj, np.int32): + if isinstance(obj, np.int32) or isinstance(obj, np.int64): return int(obj) # containers that are not serializable diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index 95f77be79..14fd63399 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -315,7 +315,7 @@ def test_getelbow_smoke(): """A smoke test for the getelbow function.""" arr = np.random.random(100) idx = selection_utils.getelbow(arr) - assert isinstance(idx, np.int32) + assert isinstance(idx, np.int32) or isinstance(idx, np.int64) val = selection_utils.getelbow(arr, return_val=True) assert isinstance(val, float) @@ -335,7 +335,7 @@ def test_getelbow_cons_smoke(): """A smoke test for the getelbow_cons function.""" arr = np.random.random(100) idx = selection_utils.getelbow_cons(arr) - assert isinstance(idx, np.int32) + assert isinstance(idx, np.int32) or isinstance(idx, np.int64) val = selection_utils.getelbow_cons(arr, return_val=True) assert isinstance(val, float) From 322a25f3d822e6c2568e1957025655e7012be17a Mon Sep 17 00:00:00 2001 From: "Joshua B. Teves" Date: Fri, 3 Feb 2023 16:21:02 -0500 Subject: [PATCH 154/177] Appease linter --- tedana/docs.py | 48 ++++++++++++++++++------- tedana/io.py | 1 - tedana/selection/selection_nodes.py | 14 +++++--- tedana/tests/test_component_selector.py | 1 + tedana/tests/test_utils.py | 2 +- 5 files changed, 48 insertions(+), 18 deletions(-) diff --git a/tedana/docs.py b/tedana/docs.py index 86abec705..8154d716a 100644 --- a/tedana/docs.py +++ b/tedana/docs.py @@ -5,24 +5,32 @@ # Standard documentation entries docdict = dict() -docdict["selector"] = """ +docdict[ + "selector" +] = """ selector : :obj:`tedana.selection.component_selector.ComponentSelector` The selector to perform decision tree-based component selection with. """ -docdict["if_true"] = """ +docdict[ + "if_true" +] = """ if_true : :obj:`str` If the condition in this step is True, give the component classification this label. Use 'nochange' if no label changes are desired. """ -docdict["if_false"] = """ +docdict[ + "if_false" +] = """ if_false : :obj:`str` If the condition in this step is False, give the component classification this label. Use 'nochange' to indicate if no label changes are desired. """ -docdict["decide_comps"] = """ +docdict[ + "decide_comps" +] = """ decide_comps : :obj:`str` or :obj:`list[str]` What classification(s) to operate on. using default or intermediate_classification labels. For example: decide_comps='unclassified' @@ -30,43 +38,59 @@ components. """ -docdict["log_extra_report"] = """ +docdict[ + "log_extra_report" +] = """ log_extra_report : :obj:`str` Additional text to the report log. Default="". """ -docdict["log_extra_info"] = """ +docdict[ + "log_extra_info" +] = """ log_extra_info : :obj:`str` Additional text to the information log. Default="". """ -docdict["only_used_metrics"] = """ +docdict[ + "only_used_metrics" +] = """ only_used_metrics : :obj:`bool` If True, only return the component_table metrics that would be used. Default=False. """ -docdict["custom_node_label"] = """ +docdict[ + "custom_node_label" +] = """ custom_node_label : :obj:`str` A short label to describe what happens in this step. If "" then a label is automatically generated. Default="". """ -docdict["tag_if_true"] = """ +docdict[ + "tag_if_true" +] = """ tag_if_true : :obj:`str` The classification tag to apply if a component is classified True. Default="". """ -docdict["tag_if_false"] = """ +docdict[ + "tag_if_false" +] = """ tag_if_false : :obj:`str` The classification tag to apply if a component is classified False. Default="". """ -docdict["selector"] = """ +docdict[ + "selector" +] = """ selector : :obj:`~tedana.selection.component_selector.ComponentSelector` If only_used_metrics is False, the updated selector is returned. """ -docdict["used_metrics"] = """ +docdict[ + "used_metrics" +] = """ used_metrics : :obj:`set(str)` If only_used_metrics is True, the names of the metrics used in the function are returned. diff --git a/tedana/io.py b/tedana/io.py index 74dfb839d..d3ad9aa12 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -108,7 +108,6 @@ def __init__( verbose=False, old_registry=None, ): - if config == "auto": config = op.join(utils.get_resource_path(), "config", "outputs.json") diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 0bd2b13f2..00228e3d9 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -437,7 +437,11 @@ def parse_vals(val): # logical dot product for compound statement decision_boolean = statement1 * statement2 - (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( + ( + selector, + outputs["n_true"], + outputs["n_false"], + ) = change_comptable_classifications( selector, if_true, if_false, @@ -564,7 +568,11 @@ def dec_variance_lessthan_thresholds( while variance[decision_boolean].sum() > all_comp_threshold: tmpmax = variance == variance[decision_boolean].max() decision_boolean[tmpmax] = False - (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( + ( + selector, + outputs["n_true"], + outputs["n_false"], + ) = change_comptable_classifications( selector, if_true, if_false, @@ -670,7 +678,6 @@ def calc_median( decide_comps=decide_comps, ) else: - outputs[label_name] = np.median(selector.component_table.loc[comps2use, metric_name]) selector.cross_component_metrics[label_name] = outputs[label_name] @@ -1382,7 +1389,6 @@ def calc_max_good_meanmetricrank( decide_comps=decide_comps, ) else: - num_prov_accept = len(comps2use) if "extend_factor" in selector.cross_component_metrics: extend_factor = selector.cross_component_metrics["extend_factor"] diff --git a/tedana/tests/test_component_selector.py b/tedana/tests/test_component_selector.py index facd9b995..50f53960d 100644 --- a/tedana/tests/test_component_selector.py +++ b/tedana/tests/test_component_selector.py @@ -141,6 +141,7 @@ def dicts_to_test(treechoice): # component_selector Tests # ---------------------------------------------------------------------- + # load_config # ----------- def test_load_config_fails(): diff --git a/tedana/tests/test_utils.py b/tedana/tests/test_utils.py index 9085a8d71..9abb221bb 100644 --- a/tedana/tests/test_utils.py +++ b/tedana/tests/test_utils.py @@ -30,7 +30,7 @@ def test_unmask(): (rs.randint(10, size=(n_data, 3, 3)), int), # 3D int ] - for (input, dtype) in inputs: + for input, dtype in inputs: out = utils.unmask(input, mask) assert out.shape == (100,) + input.shape[1:] assert out.dtype == dtype From b130e827ecf2a273077165ec80d009666a1d3bce Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 14 Feb 2023 10:33:04 -0500 Subject: [PATCH 155/177] Fixed style issues --- tedana/metrics/collect.py | 3 +-- tedana/selection/selection_nodes.py | 12 ++---------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/tedana/metrics/collect.py b/tedana/metrics/collect.py index ce831be64..f4c731876 100644 --- a/tedana/metrics/collect.py +++ b/tedana/metrics/collect.py @@ -6,10 +6,9 @@ import pandas as pd from tedana import io, utils -from tedana.stats import getfbounds - from tedana.metrics import dependence from tedana.metrics._utils import dependency_resolver, determine_signs, flip_components +from tedana.stats import getfbounds LGR = logging.getLogger("GENERAL") RepLGR = logging.getLogger("REPORT") diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 00228e3d9..9eb5327ce 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -437,11 +437,7 @@ def parse_vals(val): # logical dot product for compound statement decision_boolean = statement1 * statement2 - ( - selector, - outputs["n_true"], - outputs["n_false"], - ) = change_comptable_classifications( + (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( selector, if_true, if_false, @@ -568,11 +564,7 @@ def dec_variance_lessthan_thresholds( while variance[decision_boolean].sum() > all_comp_threshold: tmpmax = variance == variance[decision_boolean].max() decision_boolean[tmpmax] = False - ( - selector, - outputs["n_true"], - outputs["n_false"], - ) = change_comptable_classifications( + (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( selector, if_true, if_false, From a65a4cfe2c3fa60476b37471d20add68049f1561 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eneko=20Uru=C3=B1uela?= Date: Thu, 16 Feb 2023 15:46:39 +0000 Subject: [PATCH 156/177] Add RICA to Approach section of docs --- docs/approach.rst | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/approach.rst b/docs/approach.rst index 97a985f1a..1a4f9e557 100644 --- a/docs/approach.rst +++ b/docs/approach.rst @@ -347,6 +347,20 @@ yielding a denoised timeseries, which is saved as **desc-optcomDenoised_bold.nii .. image:: /_static/a15_denoised_data_timeseries.png +******************************* +Manual classification with RICA +******************************* + +``RICA`` is a tool for manual ICA classification. Once the .tsv file containing the result of +manual component classification is obtained, it is necessary to `re-run the tedana workflow`_ +passing the manual_classification.tsv file with the --ctab option. To save the output correctly, +make sure that the output directory does not coincide with the input directory. See `this example`_ +presented at MRITogether 2022 for a hands-on tutorial. + +.. _re-run the tedana workflow: https://tedana.readthedocs.io/en/stable/usage.html#Arguments%20for%20Rerunning%20the%20Workflow +.. _this example: https://www.youtube.com/live/P4cV-sGeltk?feature=share&t=1347 + + ********************************************* Removal of spatially diffuse noise (optional) ********************************************* From 1a9ffe41d6261d2db4588ece59f8fce308d29205 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 16 Feb 2023 15:00:53 -0500 Subject: [PATCH 157/177] Fixed CI style check failure --- tedana/selection/selection_nodes.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 9eb5327ce..00228e3d9 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -437,7 +437,11 @@ def parse_vals(val): # logical dot product for compound statement decision_boolean = statement1 * statement2 - (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( + ( + selector, + outputs["n_true"], + outputs["n_false"], + ) = change_comptable_classifications( selector, if_true, if_false, @@ -564,7 +568,11 @@ def dec_variance_lessthan_thresholds( while variance[decision_boolean].sum() > all_comp_threshold: tmpmax = variance == variance[decision_boolean].max() decision_boolean[tmpmax] = False - (selector, outputs["n_true"], outputs["n_false"],) = change_comptable_classifications( + ( + selector, + outputs["n_true"], + outputs["n_false"], + ) = change_comptable_classifications( selector, if_true, if_false, From f8bb798d1f82101d97a2651d969991f6a74c51f6 Mon Sep 17 00:00:00 2001 From: Neha Reddy <58482773+n-reddy@users.noreply.github.com> Date: Thu, 16 Feb 2023 16:45:03 -0600 Subject: [PATCH 158/177] DTM documentation review (#30) * Standardization of usage descriptions * Minor grammar edits * Minor grammar/spelling edits * Update docs/faq.rst --------- --- docs/building decision trees.rst | 48 ++++++++++++++++---------------- docs/faq.rst | 28 +++++++++---------- tedana/workflows/t2smap.py | 16 ++++++----- tedana/workflows/tedana.py | 15 +++++----- 4 files changed, 54 insertions(+), 53 deletions(-) diff --git a/docs/building decision trees.rst b/docs/building decision trees.rst index bf7701ab1..2bf24e62a 100644 --- a/docs/building decision trees.rst +++ b/docs/building decision trees.rst @@ -61,9 +61,9 @@ New columns in ``selector.component_table`` and the "ICA metrics tsv" file: for visualizing and reviewing results ``selector.cross_component_metrics`` and "ICA cross component metrics json": - A dictionary of metrics that are each a single value calculated across components. - For example, kappa and rho elbows. User or pre-defined scaling factors are - also be stored here. Any constant that is used in the component classification + A dictionary of metrics that are each a single value calculated across components, + for example, kappa and rho elbows. User or pre-defined scaling factors are + also stored here. Any constant that is used in the component classification processes that isn't pre-defined in the decision tree file should be saved here. ``selector.component_status_table`` and "ICA status table tsv": @@ -105,10 +105,10 @@ New columns in ``selector.component_table`` and the "ICA metrics tsv" file: for each "node" or function call. For each node, there is an "outputs" subfield with information from when the tree was executed. Each outputs field includes: -decison_node_idx: +decision_node_idx: The decision tree functions are run as part of an ordered list. - This is the positional index the location of the function in - the list, starting with index 0. + This is the positional index (the location of the function in + the list), starting with index 0. used_metrics: A list of the metrics used in a node of the decision tree @@ -122,7 +122,7 @@ node_label: n_true, n_false: For decision tree (dec) functions, the number of components that were classified - as true or false respectively in this decision tree step. + as true or false, respectively, in this decision tree step. calc_cross_comp_metrics: For calculation (calc) functions, cross component metrics that were @@ -144,10 +144,10 @@ Defining a custom decision tree ******************************* Decision trees are stored in json files. The default trees are stored as part of -the tedana code repository in `resources/decision_trees`_ The minimal tree, -minimal.json is a good example highlighting the structure and steps in a tree. It +the tedana code repository in `resources/decision_trees`_. The minimal tree, +minimal.json, is a good example highlighting the structure and steps in a tree. It may be helpful to look at that tree while reading this section. kundu.json replicates -the decision tree used in MEICA version 2.5, the predecessor to tedana. It is a more +the decision tree used in MEICA version 2.5, the predecessor to tedana. It is more complex, but also highlights additional possible functionality in decision trees. A user can specify another decision tree and link to the tree location when tedana is @@ -178,7 +178,7 @@ in `selection_nodes.py`_ There are several fields with general information. Some of these store general information that's useful for reporting results and others store information -that Are used to checks whether results are plausible & can help avoid mistakes +that is used to check whether results are plausible & can help avoid mistakes. tree_id: A descriptive name for the tree that will be logged. @@ -193,15 +193,15 @@ that Are used to checks whether results are plausible & can help avoid mistakes Publications that should be referenced when this tree is used necessary_metrics: - Is a list of the necessary metrics in the component table that will be used + A list of the necessary metrics in the component table that will be used by the tree. If a metric doesn't exist then this will raise an error instead of executing a tree. (Depending on future code development, this could potentially be used to run ``tedana`` by specifying a decision tree and - metrics are calculated base on the contents of this field.) If a necessary + metrics are calculated based on the contents of this field.) If a necessary metric isn't used, there will be a warning. generated_metrics: - Is an optional initial field. It lists metrics that are to be calculated as + An optional initial field. It lists metrics that are to be calculated as part of the decision tree's execution. This is used similarly to necessary_metrics except, since the decision tree starts before these metrics exist, it won't raise an error when these metrics are not found. One might want to calculate a new metric @@ -246,15 +246,15 @@ There are several key fields for each node: - "parameters": Specifications of all required parameters for the function in functionname - "kwargs": Specifications for optional parameters for the function in functionname -The only parameter that is used in all functions is "decidecomps" which is used to +The only parameter that is used in all functions is "decidecomps", which is used to identify, based on their classifications, the components a function should be applied to. It can be a single classification, or a comma separated string of classifications. In addition to the intermediate and default ("accepted" "rejected" "unclassified") component classifications, this can be "all" for functions that should be applied to all components regardless of their classifications. -Most decision functions also include "if_true" and "if_false" which specify how to change -the classification of each component based on whether a the decision criterion is true +Most decision functions also include "if_true" and "if_false", which specify how to change +the classification of each component based on whether a decision criterion is true or false. In addition to the default and intermediate classification options, this can also be "nochange" (i.e. For components where a>b is true, "reject". For components where a>b is false, "nochange"). The optional parameters "tag_if_true" and "tag_if_false" @@ -316,7 +316,7 @@ Before any data are touched in the function, there should be an call. This will be useful to gather all metrics a tree will use without requiring a specific dataset. -Existing functions define ``function_name_idx = f"Step {selector.current_node_idx}: [text of function_name]`` +Existing functions define ``function_name_idx = f"Step {selector.current_node_idx}: [text of function_name]``. This is used in logging and is cleaner to initialize near the top of each function. @@ -331,9 +331,9 @@ and output a warning if the function overwrites an existing value Code that adds the text ``log_extra_info`` and ``log_extra_report`` into the appropriate logs (if they are provided by the user) -After the above information is included, all functions will call ``selectcomps2use`` +After the above information is included, all functions will call ``selectcomps2use``, which returns the components with classifications included in ``decide_comps`` -and then runs ``confirm_metrics_exist`` which is an added check to make sure the metrics +and then runs ``confirm_metrics_exist``, which is an added check to make sure the metrics used by this function exist in the component table. Nearly every function has a clause like: @@ -346,12 +346,12 @@ Nearly every function has a clause like: outputs["n_false"] = 0 else: -If there are no components with the classifications in ``decide_comps`` this logs that +If there are no components with the classifications in ``decide_comps``, this logs that there's nothing for the function to be run on, else continue. -For decision functions the key variable is ``decision_boolean`` which should be a pandas -dataframe column which is True or False for the components in ``decide_comps`` based on -the function's criteria. That column is an input to ``change_comptable_classifications`` +For decision functions, the key variable is ``decision_boolean``, which should be a pandas +dataframe column that is True or False for the components in ``decide_comps`` based on +the function's criteria. That column is an input to ``change_comptable_classifications``, which will update the component_table classifications, update the classification history in component_status_table, and update the component classification_tags. Components not in ``decide_comps`` retain their existing classifications and tags. diff --git a/docs/faq.rst b/docs/faq.rst index cd19533d1..db35b48ac 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -99,21 +99,21 @@ make sure to output the denoised time series into a separate directory. ************************************************************************************* The decision tree is the series of conditions through which each component is -classified as accepted or rejected. The kundu tree (`--tree kundu`) -was used in Prantik Kundu's MEICA v2.7 is the classification process that has long +classified as accepted or rejected. The kundu tree (`--tree kundu`), used in Prantik +Kundu's MEICA v2.5, is the classification process that has long been used by ``tedana`` and users have been generally content with the results. The -kundu tree used multiple intersecting metrics and rankings classify components. +kundu tree used multiple intersecting metrics and rankings to classify components. How these steps may interact on specific datasets is opaque. While there is a kappa (T2*-weighted) elbow threshold and a rho (S0-weighted) elbow threshold, as discussed in publications, no component is accepted or rejected because of those thresholds. -Users sometimes notice rejected components that clearly should been accepted. For +Users sometimes notice rejected components that clearly should have been accepted. For example, a component that included a clear T2*-weighted V1 response to a block design flashing checkerboard was sometimes rejected because the relatively large variance of that component interacted with a rejection criterion. The minimal tree (`--tree minimal`) is designed to be easier to understand and less -likely to reject T2* weighted components. There are a few other critiera, but components -with `kappa>kappa elbow` and `rhokappa elbow` and `rho=v3.7) so +validated this method. That is why ``tedana`` replicated the established and validated +MEICA v2.5 method and also includes options to integrate additional component selection +methods. Recently Prantik has started to work on `MEICA v3.3`_ (for python >=v3.7) so that this version of the selection process would again be possible to run. .. _shared code on bitbucket: https://bitbucket.org/prantikk/me-ica/src/experimental diff --git a/tedana/workflows/t2smap.py b/tedana/workflows/t2smap.py index 0a63b229b..29ca955bd 100644 --- a/tedana/workflows/t2smap.py +++ b/tedana/workflows/t2smap.py @@ -90,12 +90,14 @@ def _get_parser(): dest="fittype", action="store", choices=["loglin", "curvefit"], - help="Desired Fitting Method" - '"loglin" means that a linear model is fit' - " to the log of the data, default" - '"curvefit" means that a more computationally' - "demanding monoexponential model is fit" - "to the raw data", + help=( + "Desired T2*/S0 fitting method. " + '"loglin" means that a linear model is fit ' + "to the log of the data. " + '"curvefit" means that a more computationally ' + "demanding monoexponential model is fit " + "to the raw data. " + ), default="loglin", ) optional.add_argument( @@ -130,7 +132,7 @@ def _get_parser(): "threadpoolctl to set the parameter outside " "of the workflow function. Higher numbers of " "threads tend to slow down performance on " - "typical datasets. Default is 1." + "typical datasets." ), default=1, ) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index fb9683eb7..c9fe28c7d 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -122,7 +122,6 @@ def _get_parser(): '"curvefit" means that a more computationally ' "demanding monoexponential model is fit " "to the raw data. " - 'Default is "loglin".' ), default="loglin", ) @@ -131,7 +130,7 @@ def _get_parser(): dest="combmode", action="store", choices=["t2s"], - help=("Combination scheme for TEs: t2s (Posse 1999, default)"), + help=("Combination scheme for TEs: t2s (Posse 1999)"), default="t2s", ) optional.add_argument( @@ -149,8 +148,9 @@ def _get_parser(): "in which case components will be selected based on the " "cumulative variance explained or an integer greater than 1" "in which case the specificed number of components will be " - "selected. Default='aic'." + "selected." ), + choices=["mdl", "kic", "aic"], default="aic", ) optional.add_argument( @@ -174,7 +174,6 @@ def _get_parser(): "algorithm. Set to an integer value for " "reproducible ICA results. Set to -1 for " "varying results across ICA calls. " - "Default=42." ), default=42, ) @@ -215,12 +214,12 @@ def _get_parser(): nargs="+", help=( "Perform additional denoising to remove " - "spatially diffuse noise. Default is None. " + "spatially diffuse noise. " "This argument can be single value or a space " "delimited list" ), choices=["mir", "gsr"], - default=None, + default="", ) optional.add_argument( "--no-reports", @@ -265,7 +264,7 @@ def _get_parser(): "threadpoolctl to set the parameter outside " "of the workflow function. Higher numbers of " "threads tend to slow down performance on " - "typical datasets. Default is 1." + "typical datasets." ), default=1, ) @@ -305,7 +304,7 @@ def _get_parser(): "-f", dest="force", action="store_true", - help="Force overwriting of files. Default False.", + help="Force overwriting of files.", default=False, ) optional.add_argument("-v", "--version", action="version", version=verstr) parser._action_groups.append(optional) From a03b1203d5338e1dab2c7bc0005b8fdfdb795460 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 16 Feb 2023 18:26:30 -0500 Subject: [PATCH 159/177] Rename reclassify force (#32) * changed tedana_reclassify and force * Added default messages to CLI workflows * clean up CLI default messages * added t2smap to function from CLI * style fix --- docs/classification_output_descriptions.rst | 2 +- docs/faq.rst | 2 +- pyproject.toml | 3 ++- tedana/io.py | 14 +++++----- tedana/tests/test_integration.py | 14 +++++----- ...tedana_reclassify.py => ica_reclassify.py} | 26 +++++++++---------- tedana/workflows/t2smap.py | 4 +-- tedana/workflows/tedana.py | 17 ++++++------ 8 files changed, 42 insertions(+), 40 deletions(-) rename tedana/workflows/{tedana_reclassify.py => ica_reclassify.py} (96%) diff --git a/docs/classification_output_descriptions.rst b/docs/classification_output_descriptions.rst index d5e57dcb2..8a4705827 100644 --- a/docs/classification_output_descriptions.rst +++ b/docs/classification_output_descriptions.rst @@ -59,7 +59,7 @@ and `classification_tags`. `classification` should include `accepted` or through denoising. `classification_tags` provide more information on why components received a specific classification. Each component can receive more than one tag. The following tags are included depending if ``--tree`` -is minimal, kundu, or if ``tedana_reclassify`` is run. +is minimal, kundu, or if ``ica_reclassify`` is run. ===================== ================ ======================================== Tag Included in Tree Explanation diff --git a/docs/faq.rst b/docs/faq.rst index db35b48ac..a792fe77f 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -84,7 +84,7 @@ applying tedana, and you encounter this problem, please submit a question to `Ne [tedana] Can I manually reclassify components? ******************************************************************************** -``tedana_reclassify`` allows users to manually alter component classifications. +``ica_reclassify`` allows users to manually alter component classifications. This can both be used as a command line tool or as part of other interactive programs, such as `RICA`_. RICA creates a graphical interface that is similar to the build-in tedana reports that lets users interactively change component diff --git a/pyproject.toml b/pyproject.toml index 2da1d630b..ae066e418 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,8 @@ version = "0.0.12" [project.scripts] tedana = "tedana.workflows.tedana:_main" -tedana_reclassify = "tedana.workflows.tedana_reclassify:_main" +ica_reclassify = "tedana.workflows.ica_reclassify:_main" +t2smap = "tedana.workflows.t2smap:_main" [build-system] requires = ["setuptools>=64", "wheel"] diff --git a/tedana/io.py b/tedana/io.py index d3ad9aa12..1516df774 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -70,7 +70,7 @@ class OutputGenerator: descriptions. Default is "auto", which uses tedana's default configuration file. make_figures : bool, optional Whether or not to actually make a figures directory - force : bool, optional + overwrite : bool, optional Whether to force overwrites of data. Default False. Attributes @@ -88,7 +88,7 @@ class OutputGenerator: This will correspond to a "figures" subfolder of ``out_dir``. prefix : str Prefix to prepend to output filenames. - force : bool + overwrite : bool Whether to force file overwrites. verbose : bool Whether or not to generate verbose output. @@ -104,7 +104,7 @@ def __init__( prefix="", config="auto", make_figures=True, - force=False, + overwrite=False, verbose=False, old_registry=None, ): @@ -132,7 +132,7 @@ def __init__( self.out_dir = op.abspath(out_dir) self.figures_dir = op.join(out_dir, "figures") self.prefix = prefix + "_" if prefix != "" else "" - self.force = force + self.overwrite = overwrite self.verbose = verbose self.registry = {} if old_registry: @@ -252,11 +252,11 @@ def save_file(self, data, description, **kwargs): The full file path of the saved file. """ name = self.get_name(description, **kwargs) - if op.exists(name) and not self.force: + if op.exists(name) and not self.overwrite: raise RuntimeError( f"File {name} already exists. In order to allow overwrite " - "please use the --force option in the command line or the " - "force parameter in the Python API." + "please use the --overwrite option in the command line or the " + "overwrite parameter in the Python API." ) if description.endswith("img"): diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 5cb6c738b..83104eb99 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -21,7 +21,7 @@ from tedana.io import InputHarvester from tedana.workflows import t2smap as t2smap_cli from tedana.workflows import tedana as tedana_cli -from tedana.workflows.tedana_reclassify import post_tedana +from tedana.workflows.ica_reclassify import post_tedana # Need to see if a no BOLD warning occurred LOGGER = logging.getLogger(__name__) @@ -260,7 +260,7 @@ def test_integration_reclassify_insufficient_args(skip_integration): guarantee_reclassify_data() args = [ - "tedana_reclassify", + "ica_reclassify", os.path.join(reclassify_raw(), "desc-tedana_registry.json"), ] @@ -289,7 +289,7 @@ def test_integration_reclassify_quiet_csv(skip_integration): rej_df.to_csv(rej_csv_fname) args = [ - "tedana_reclassify", + "ica_reclassify", "--manacc", acc_csv_fname, "--manrej", @@ -315,7 +315,7 @@ def test_integration_reclassify_quiet_spaces(skip_integration): shutil.rmtree(out_dir) args = [ - "tedana_reclassify", + "ica_reclassify", "--manacc", "1", "2", @@ -345,7 +345,7 @@ def test_integration_reclassify_quiet_string(skip_integration): shutil.rmtree(out_dir) args = [ - "tedana_reclassify", + "ica_reclassify", "--manacc", "1,2,3", "--manrej", @@ -371,7 +371,7 @@ def test_integration_reclassify_debug(skip_integration): shutil.rmtree(out_dir) args = [ - "tedana_reclassify", + "ica_reclassify", "--manacc", "1", "2", @@ -435,7 +435,7 @@ def test_integration_reclassify_run_twice(skip_integration): reclassify_raw_registry(), accept=[1, 2, 3], out_dir=out_dir, - force=True, + overwrite=True, no_reports=True, ) fn = resource_filename("tedana", "tests/data/reclassify_run_twice.txt") diff --git a/tedana/workflows/tedana_reclassify.py b/tedana/workflows/ica_reclassify.py similarity index 96% rename from tedana/workflows/tedana_reclassify.py rename to tedana/workflows/ica_reclassify.py index 37c97951d..24559ca29 100644 --- a/tedana/workflows/tedana_reclassify.py +++ b/tedana/workflows/ica_reclassify.py @@ -27,8 +27,8 @@ def _main(): from tedana import __version__ - verstr = "tedana_reclassify v{}".format(__version__) - parser = argparse.ArgumentParser() + verstr = "ica_reclassify v{}".format(__version__) + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "registry", help="File registry from a previous tedana run", @@ -58,7 +58,7 @@ def _main(): parser.add_argument( "--config", dest="config", - help="File naming configuration. Default auto (prepackaged).", + help="File naming configuration.", default="auto", ) parser.add_argument( @@ -120,11 +120,11 @@ def _main(): default=False, ) parser.add_argument( - "--force", + "--overwrite", "-f", - dest="force", + dest="overwrite", action="store_true", - help="Force overwriting of files. Default False.", + help="Force overwriting of files.", ) parser.add_argument( "--quiet", dest="quiet", help=argparse.SUPPRESS, action="store_true", default=False @@ -170,7 +170,7 @@ def _main(): mir=args.mir, no_reports=args.no_reports, png_cmap=args.png_cmap, - force=args.force, + overwrite=args.overwrite, debug=args.debug, quiet=args.quiet, ) @@ -188,7 +188,7 @@ def post_tedana( mir=False, no_reports=False, png_cmap="coolwarm", - force=False, + overwrite=False, debug=False, quiet=False, ): @@ -220,7 +220,7 @@ def post_tedana( Cannot be used with --no-png. Default is 'coolwarm'. debug : :obj:`bool`, optional Whether to run in debugging mode or not. Default is False. - force : :obj:`bool`, optional + overwrite : :obj:`bool`, optional Whether to force file overwrites. Default is False. quiet : :obj:`bool`, optional If True, suppresses logging/printing of messages. Default is False. @@ -314,7 +314,7 @@ def post_tedana( convention=convention, prefix=prefix, config=config, - force=force, + overwrite=overwrite, verbose=False, out_dir=out_dir, old_registry=ioh.registry, @@ -410,9 +410,9 @@ def post_tedana( ) if mir: - io_generator.force = True + io_generator.overwrite = True gsc.minimum_image_regression(data_oc, mmix, mask_denoise, comptable, io_generator) - io_generator.force = False + io_generator.overwrite = False # Write out BIDS-compatible description file derivative_metadata = { @@ -421,7 +421,7 @@ def post_tedana( "DatasetType": "derivative", "GeneratedBy": [ { - "Name": "tedana_reclassify", + "Name": "ica_reclassify", "Version": __version__, "Description": ( "A denoising pipeline for the identification and removal " diff --git a/tedana/workflows/t2smap.py b/tedana/workflows/t2smap.py index 29ca955bd..012a0e270 100644 --- a/tedana/workflows/t2smap.py +++ b/tedana/workflows/t2smap.py @@ -25,7 +25,7 @@ def _get_parser(): ------- parser.parse_args() : argparse dict """ - parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Argument parser follow templtate provided by RalphyZ # https://stackoverflow.com/a/43456577 optional = parser._action_groups.pop() @@ -119,7 +119,7 @@ def _get_parser(): dest="combmode", action="store", choices=["t2s", "paid"], - help=("Combination scheme for TEs: t2s (Posse 1999, default), paid (Poser)"), + help=("Combination scheme for TEs: t2s (Posse 1999), paid (Poser)"), default="t2s", ) optional.add_argument( diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index c9fe28c7d..c406478ba 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -47,7 +47,7 @@ def _get_parser(): from tedana import __version__ verstr = "tedana v{}".format(__version__) - parser = argparse.ArgumentParser() + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Argument parser follow templtate provided by RalphyZ # https://stackoverflow.com/a/43456577 optional = parser._action_groups.pop() @@ -300,11 +300,12 @@ def _get_parser(): "--quiet", dest="quiet", help=argparse.SUPPRESS, action="store_true", default=False ) parser.add_argument( - "--force", + "--overwrite", "-f", - dest="force", + dest="overwrite", action="store_true", - help="Force overwriting of files.", default=False, + help="Force overwriting of files.", + default=False, ) optional.add_argument("-v", "--version", action="version", version=verstr) parser._action_groups.append(optional) @@ -334,7 +335,7 @@ def tedana_workflow( low_mem=False, debug=False, quiet=False, - force=False, + overwrite=False, t2smap=None, mixm=None, ): @@ -472,7 +473,7 @@ def tedana_workflow( out_dir=out_dir, prefix=prefix, config="auto", - force=force, + overwrite=overwrite, verbose=verbose, ) @@ -668,10 +669,10 @@ def tedana_workflow( # If we're going to restart, temporarily allow force overwrite if keep_restarting: - io_generator.force = True + io_generator.overwrite = True RepLGR.disabled = True # Disable the report to avoid duplicate text RepLGR.disabled = False # Re-enable the report after the while loop is escaped - io_generator.force = force # Re-enable original overwrite behavior + io_generator.overwrite = overwrite # Re-enable original overwrite behavior else: LGR.info("Using supplied mixing matrix from ICA") mixing_file = io_generator.get_name("ICA mixing tsv") From 532dc6a66faffc7109e555f4646551fec8cc7a72 Mon Sep 17 00:00:00 2001 From: Neha Reddy <58482773+n-reddy@users.noreply.github.com> Date: Thu, 16 Feb 2023 18:20:36 -0600 Subject: [PATCH 160/177] Add defaults to --help output (#31) From c43befb54b36fd7703752883bc45b670a369307b Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Thu, 16 Feb 2023 20:06:08 -0500 Subject: [PATCH 161/177] added ica_reclassify to setup.cfg --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 2caa6c325..de0a940c2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -59,6 +59,7 @@ all = [options.entry_points] console_scripts = t2smap = tedana.workflows.t2smap:_main + ica_reclassify = tedana.workflows.ica_reclassify:_main tedana = tedana.workflows.tedana:_main [options.package_data] From 2dc0a5a919d10ccaa32fa339fc455e1675ec3338 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Tue, 28 Feb 2023 14:34:52 -0500 Subject: [PATCH 162/177] Using a more persistent cache for the testing data (#33) * Cleans up how testing datasets are downloaded within test_integration.py. In Main & the current JT_DTM each dataset is downloaded in a slightly different way and the five-echo data are downloaded twice. * Added `data_for_testing_info` which gives the file hash location and local directory name for each of the four files we download. All tests are updated to use this function. * The local copy of testing data will now go into the `.testing_data_cache` subdirectory * The downloaded testing data will be in separate directories from the outputs so the downloaded directories can be completely static * When `download_test_data` is called, it will first download the metadata json to see if the last updated copy on osf.io is newer than the downloaded version and will only download if osf has a newer file. Downloading the metadata will happen frequently, but it will hopefully be fast. * The logger is now used to give a warning if osf.io cannot be accessed, but it will still run using cached data --- .gitignore | 1 + pyproject.toml | 1 + tedana/tests/test_integration.py | 256 ++++++++++++++++++++++--------- 3 files changed, 184 insertions(+), 74 deletions(-) diff --git a/.gitignore b/.gitignore index d7d4a4a82..620137e3d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ .DS_Store docs/generated/ .pytest_cache/ +.testing_data_cache/ # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/pyproject.toml b/pyproject.toml index ae066e418..99af7c9f8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ exclude = ''' | \.github | \.hg | \.pytest_cache + | \.testing_data_cache | _build | build | dist diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 83104eb99..03efe8e58 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -3,6 +3,7 @@ """ import glob +import json import logging import os import os.path as op @@ -10,6 +11,7 @@ import shutil import subprocess import tarfile +from datetime import datetime from gzip import GzipFile from io import BytesIO @@ -25,6 +27,8 @@ # Need to see if a no BOLD warning occurred LOGGER = logging.getLogger(__name__) +# Added a testing logger to output whether or not testing data were downlaoded +TestLGR = logging.getLogger("TESTING") def check_integration_outputs(fname, outpath, n_logs=1): @@ -61,60 +65,160 @@ def check_integration_outputs(fname, outpath, n_logs=1): assert sorted(tocheck) == sorted(existing) -def download_test_data(osf, outpath): +def data_for_testing_info(test_dataset=str): """ - Downloads tar.gz data stored at `osf` and unpacks into `outpath` + Get the path and download link for each dataset used for testing + + Also creates the base directories into which the data and output + directories are written Parameters ---------- - osf : str - URL to OSF file that contains data to be downloaded - outpath : str + test_dataset : str + References one of the datasets to download. It can be: + three-echo + three-echo-reclassify + four-echo + five-echo + + Returns + ------- + test_data_path : str + The path to the local directory where the data will be downloaded + osfID : str + The ID for the OSF file. + Data download link would be https://osf.io/osfID/download + Metadata download link would be https://osf.io/osfID/metadata/?format=datacite-json + """ + + tedana_path = os.path.dirname(tedana_cli.__file__) + base_data_path = os.path.abspath(os.path.join(tedana_path, "../../.testing_data_cache")) + os.makedirs(base_data_path, exist_ok=True) + os.makedirs(os.path.join(base_data_path, "outputs"), exist_ok=True) + if test_dataset == "three-echo": + test_data_path = os.path.join(base_data_path, "three-echo/TED.three-echo") + osfID = "rqhfc" + os.makedirs(os.path.join(base_data_path, "three-echo"), exist_ok=True) + os.makedirs(os.path.join(base_data_path, "outputs/three-echo"), exist_ok=True) + elif test_dataset == "three-echo-reclassify": + test_data_path = os.path.join(base_data_path, "reclassify") + osfID = "f6g45" + os.makedirs(os.path.join(base_data_path, "outputs/reclassify"), exist_ok=True) + elif test_dataset == "four-echo": + test_data_path = os.path.join(base_data_path, "four-echo/TED.four-echo") + osfID = "gnj73" + os.makedirs(os.path.join(base_data_path, "four-echo"), exist_ok=True) + os.makedirs(os.path.join(base_data_path, "outputs/four-echo"), exist_ok=True) + elif test_dataset == "five-echo": + test_data_path = os.path.join(base_data_path, "five-echo/TED.five-echo") + osfID = "9c42e" + os.makedirs(os.path.join(base_data_path, "five-echo"), exist_ok=True) + os.makedirs(os.path.join(base_data_path, "outputs/five-echo"), exist_ok=True) + else: + raise ValueError(f"{test_dataset} is not a valid dataset string for data_for_testing_info") + + return test_data_path, osfID + + +def download_test_data(osfID, test_data_path): + """ + If current data is not already available, downloads tar.gz data + stored at `https://osf.io/osfID/download` + and unpacks into `out_path` + + Parameters + ---------- + osfID : str + The ID for the OSF file. + out_path : str Path to directory where OSF data should be extracted """ - req = requests.get(osf) + try: + datainfo = requests.get(f"https://osf.io/{osfID}/metadata/?format=datacite-json") + except Exception: + if len(os.listdir(test_data_path)) == 0: + raise ConnectionError( + f"Cannot access https://osf.io/{osfID} and testing data " "are not yet downloaded" + ) + else: + TestLGR.warning( + f"Cannot access https://osf.io/{osfID}. " + f"Using local copy of testing data in {test_data_path} " + "but cannot validate that local copy is up-to-date" + ) + return + datainfo.raise_for_status() + metadata = json.loads(datainfo.content) + # 'dates' is a list with all udpates to the file, the last item in the list + # is the most recent and the 'date' field in the list is the date of the last + # update. + osf_filedate = metadata["dates"][-1]["date"] + + # File the file with the most recent date for comparision with + # the lsst updated date for the osf file + if os.path.exists(test_data_path): + filelist = glob.glob(f"{test_data_path}/*") + most_recent_file = max(filelist, key=os.path.getctime) + if os.path.exists(most_recent_file): + local_filedate = os.path.getmtime(most_recent_file) + local_filedate_str = str(datetime.fromtimestamp(local_filedate).date()) + local_data_exists = True + else: + local_data_exists = False + else: + local_data_exists = False + if local_data_exists: + if local_filedate_str == osf_filedate: + TestLGR.INFO( + f"Downloaded and up-to-date data already in {test_data_path}. Not redownloading" + ) + return + else: + TestLGR.INFO( + f"Downloaded data in {test_data_path} was last modified on " + f"{local_filedate_str}. Data on https://osf.io/{osfID} " + f" was last updated on {osf_filedate}. Deleting and redownloading" + ) + shutil.rmtree(test_data_path) + req = requests.get(f"https://osf.io/{osfID}/download") req.raise_for_status() t = tarfile.open(fileobj=GzipFile(fileobj=BytesIO(req.content))) - os.makedirs(outpath, exist_ok=True) - t.extractall(outpath) - - -def reclassify_path() -> str: - """Get the path to the reclassify test data.""" - return "/tmp/data/reclassify/" + os.makedirs(test_data_path, exist_ok=True) + t.extractall(test_data_path) def reclassify_raw() -> str: - return os.path.join(reclassify_path(), "TED.three-echo") + test_data_path, _ = data_for_testing_info("three-echo-reclassify") + return os.path.join(test_data_path, "TED.three-echo") def reclassify_raw_registry() -> str: return os.path.join(reclassify_raw(), "desc-tedana_registry.json") -def reclassify_url() -> str: - """Get the URL to reclassify test data.""" - return "https://osf.io/f6g45/download" +def guarantee_reclassify_data() -> None: + """Ensures that the reclassify data exists at the expected path and return path.""" + test_data_path, osfID = data_for_testing_info("three-echo-reclassify") -def guarantee_reclassify_data() -> None: - """Ensures that the reclassify data exists at the expected path.""" - if not os.path.exists(reclassify_raw_registry()): - download_test_data(reclassify_url(), reclassify_path()) - else: - # Path exists, be sure that everything in registry exists - ioh = InputHarvester(os.path.join(reclassify_raw(), "desc-tedana_registry.json")) - all_present = True - for _, v in ioh.registry.items(): - if not isinstance(v, list): - if not os.path.exists(os.path.join(reclassify_raw(), v)): - all_present = False - break - if not all_present: - # Something was removed, need to re-download - shutil.rmtree(reclassify_raw()) - guarantee_reclassify_data() + # Should now be checking and not downloading for each test so don't see if statement here + # if not os.path.exists(reclassify_raw_registry()): + download_test_data(osfID, test_data_path) + # else: + # Path exists, be sure that everything in registry exists + ioh = InputHarvester(reclassify_raw_registry()) + all_present = True + for _, v in ioh.registry.items(): + if not isinstance(v, list): + if not os.path.exists(os.path.join(reclassify_raw(), v)): + all_present = False + break + if not all_present: + # Something was removed, need to re-download + shutil.rmtree(reclassify_raw()) + guarantee_reclassify_data() + return test_data_path def test_integration_five_echo(skip_integration): @@ -123,18 +227,19 @@ def test_integration_five_echo(skip_integration): if skip_integration: pytest.skip("Skipping five-echo integration test") - out_dir = "/tmp/data/five-echo/TED.five-echo" - out_dir_manual = "/tmp/data/five-echo/TED.five-echo-manual" + test_data_path, osfID = data_for_testing_info("five-echo") + out_dir = os.path.abspath(os.path.join(test_data_path, "../../outputs/five-echo")) + # out_dir_manual = f"{out_dir}-manual" if os.path.exists(out_dir): shutil.rmtree(out_dir) - if os.path.exists(out_dir_manual): - shutil.rmtree(out_dir_manual) + # if os.path.exists(out_dir_manual): + # shutil.rmtree(out_dir_manual) # download data and run the test - download_test_data("https://osf.io/9c42e/download", os.path.dirname(out_dir)) - prepend = "/tmp/data/five-echo/p06.SBJ01_S09_Task11_e" + download_test_data(osfID, test_data_path) + prepend = f"{test_data_path}/p06.SBJ01_S09_Task11_e" suffix = ".sm.nii.gz" datalist = [prepend + str(i + 1) + suffix for i in range(5)] echo_times = [15.4, 29.7, 44.0, 58.3, 72.6] @@ -165,8 +270,9 @@ def test_integration_four_echo(skip_integration): if skip_integration: pytest.skip("Skipping four-echo integration test") - out_dir = "/tmp/data/four-echo/TED.four-echo" - out_dir_manual = "/tmp/data/four-echo/TED.four-echo-manual" + test_data_path, osfID = data_for_testing_info("four-echo") + out_dir = os.path.abspath(os.path.join(test_data_path, "../../outputs/four-echo")) + out_dir_manual = f"{out_dir}-manual" if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -175,9 +281,8 @@ def test_integration_four_echo(skip_integration): shutil.rmtree(out_dir_manual) # download data and run the test - download_test_data("https://osf.io/gnj73/download", os.path.dirname(out_dir)) - prepend = "/tmp/data/four-echo/" - prepend += "sub-PILOT_ses-01_task-localizerDetection_run-01_echo-" + download_test_data(osfID, test_data_path) + prepend = f"{test_data_path}/sub-PILOT_ses-01_task-localizerDetection_run-01_echo-" suffix = "_space-sbref_desc-preproc_bold+orig.HEAD" datalist = [prepend + str(i + 1) + suffix for i in range(4)] tedana_cli.tedana_workflow( @@ -211,8 +316,9 @@ def test_integration_three_echo(skip_integration): if skip_integration: pytest.skip("Skipping three-echo integration test") - out_dir = "/tmp/data/three-echo/TED.three-echo" - out_dir_manual = "/tmp/data/three-echo/TED.three-echo-rerun" + test_data_path, osfID = data_for_testing_info("three-echo") + out_dir = os.path.abspath(os.path.join(test_data_path, "../../outputs/three-echo")) + out_dir_manual = f"{out_dir}-rerun" if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -221,9 +327,9 @@ def test_integration_three_echo(skip_integration): shutil.rmtree(out_dir_manual) # download data and run the test - download_test_data("https://osf.io/rqhfc/download", os.path.dirname(out_dir)) + download_test_data(osfID, test_data_path) tedana_cli.tedana_workflow( - data="/tmp/data/three-echo/three_echo_Cornell_zcat.nii.gz", + data=f"{test_data_path}/three_echo_Cornell_zcat.nii.gz", tes=[14.5, 38.5, 62.5], out_dir=out_dir, low_mem=True, @@ -233,7 +339,7 @@ def test_integration_three_echo(skip_integration): # Test re-running, but use the CLI args = [ "-d", - "/tmp/data/three-echo/three_echo_Cornell_zcat.nii.gz", + f"{test_data_path}/three_echo_Cornell_zcat.nii.gz", "-e", "14.5", "38.5", @@ -261,7 +367,7 @@ def test_integration_reclassify_insufficient_args(skip_integration): args = [ "ica_reclassify", - os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + reclassify_raw_registry(), ] result = subprocess.run(args, capture_output=True) @@ -273,8 +379,8 @@ def test_integration_reclassify_quiet_csv(skip_integration): if skip_integration: pytest.skip("Skip reclassify quiet csv") - guarantee_reclassify_data() - out_dir = os.path.join(reclassify_path(), "quiet") + test_data_path = guarantee_reclassify_data() + out_dir = os.path.abspath(os.path.join(test_data_path, "../outputs/reclassify/quiet")) if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -296,7 +402,7 @@ def test_integration_reclassify_quiet_csv(skip_integration): rej_csv_fname, "--out-dir", out_dir, - os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + reclassify_raw_registry(), ] results = subprocess.run(args, capture_output=True) @@ -309,8 +415,8 @@ def test_integration_reclassify_quiet_spaces(skip_integration): if skip_integration: pytest.skip("Skip reclassify quiet space-delimited integers") - guarantee_reclassify_data() - out_dir = os.path.join(reclassify_path(), "quiet") + test_data_path = guarantee_reclassify_data() + out_dir = os.path.abspath(os.path.join(test_data_path, "../outputs/reclassify/quiet")) if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -326,7 +432,7 @@ def test_integration_reclassify_quiet_spaces(skip_integration): "6", "--out-dir", out_dir, - os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + reclassify_raw_registry(), ] results = subprocess.run(args, capture_output=True) @@ -339,8 +445,9 @@ def test_integration_reclassify_quiet_string(skip_integration): if skip_integration: pytest.skip("Skip reclassify quiet string of integers") - guarantee_reclassify_data() - out_dir = os.path.join(reclassify_path(), "quiet") + test_data_path = guarantee_reclassify_data() + out_dir = os.path.abspath(os.path.join(test_data_path, "../outputs/reclassify/quiet")) + if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -352,7 +459,7 @@ def test_integration_reclassify_quiet_string(skip_integration): "4,5,6,", "--out-dir", out_dir, - os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + reclassify_raw_registry(), ] results = subprocess.run(args, capture_output=True) @@ -365,8 +472,8 @@ def test_integration_reclassify_debug(skip_integration): if skip_integration: pytest.skip("Skip reclassify debug") - guarantee_reclassify_data() - out_dir = os.path.join(reclassify_path(), "debug") + test_data_path = guarantee_reclassify_data() + out_dir = os.path.abspath(os.path.join(test_data_path, "../outputs/reclassify/debug")) if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -386,7 +493,7 @@ def test_integration_reclassify_debug(skip_integration): "--out-dir", out_dir, "--debug", - os.path.join(reclassify_raw(), "desc-tedana_registry.json"), + reclassify_raw_registry(), ] results = subprocess.run(args, capture_output=True) @@ -399,8 +506,8 @@ def test_integration_reclassify_both_rej_acc(skip_integration): if skip_integration: pytest.skip("Skip reclassify both rejected and accepted") - guarantee_reclassify_data() - out_dir = os.path.join(reclassify_path(), "both_rej_acc") + test_data_path = guarantee_reclassify_data() + out_dir = os.path.abspath(os.path.join(test_data_path, "../outputs/reclassify/both_rej_acc")) if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -420,8 +527,8 @@ def test_integration_reclassify_run_twice(skip_integration): if skip_integration: pytest.skip("Skip reclassify both rejected and accepted") - guarantee_reclassify_data() - out_dir = os.path.join(reclassify_path(), "run_twice") + test_data_path = guarantee_reclassify_data() + out_dir = os.path.abspath(os.path.join(test_data_path, "../outputs/reclassify/run_twice")) if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -446,8 +553,8 @@ def test_integration_reclassify_no_bold(skip_integration, caplog): if skip_integration: pytest.skip("Skip reclassify both rejected and accepted") - guarantee_reclassify_data() - out_dir = os.path.join(reclassify_path(), "no_bold") + test_data_path = guarantee_reclassify_data() + out_dir = os.path.abspath(os.path.join(test_data_path, "../outputs/reclassify/no_bold")) if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -471,8 +578,8 @@ def test_integration_reclassify_accrej_files(skip_integration, caplog): if skip_integration: pytest.skip("Skip reclassify both rejected and accepted") - guarantee_reclassify_data() - out_dir = os.path.join(reclassify_path(), "no_bold") + test_data_path = guarantee_reclassify_data() + out_dir = os.path.abspath(os.path.join(test_data_path, "../outputs/reclassify/no_bold")) if os.path.exists(out_dir): shutil.rmtree(out_dir) @@ -496,13 +603,14 @@ def test_integration_t2smap(skip_integration): """Integration test of the full t2smap workflow using five-echo test data""" if skip_integration: pytest.skip("Skipping t2smap integration test") - out_dir = "/tmp/data/five-echo/t2smap_five-echo" + test_data_path, osfID = data_for_testing_info("five-echo") + out_dir = os.path.abspath(os.path.join(test_data_path, "../../outputs/t2smap_five-echo")) if os.path.exists(out_dir): shutil.rmtree(out_dir) # download data and run the test - download_test_data("https://osf.io/9c42e/download", os.path.dirname(out_dir)) - prepend = "/tmp/data/five-echo/p06.SBJ01_S09_Task11_e" + download_test_data(osfID, test_data_path) + prepend = f"{test_data_path}/p06.SBJ01_S09_Task11_e" suffix = ".sm.nii.gz" datalist = [prepend + str(i + 1) + suffix for i in range(5)] echo_times = [15.4, 29.7, 44.0, 58.3, 72.6] From 2e45e8d36e00dc0b7fc068f5e4067a102f3d40d8 Mon Sep 17 00:00:00 2001 From: handwerkerd Date: Tue, 28 Feb 2023 16:28:08 -0500 Subject: [PATCH 163/177] Change to TestLGR.info --- tedana/tests/test_integration.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 03efe8e58..7aca3402d 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -170,12 +170,12 @@ def download_test_data(osfID, test_data_path): local_data_exists = False if local_data_exists: if local_filedate_str == osf_filedate: - TestLGR.INFO( + TestLGR.info( f"Downloaded and up-to-date data already in {test_data_path}. Not redownloading" ) return else: - TestLGR.INFO( + TestLGR.info( f"Downloaded data in {test_data_path} was last modified on " f"{local_filedate_str}. Data on https://osf.io/{osfID} " f" was last updated on {osf_filedate}. Deleting and redownloading" From 8acf185f200b06eb34262b4f76472691130cc3b5 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Tue, 14 Mar 2023 21:46:08 -0400 Subject: [PATCH 164/177] Fixing high variance classification mess (#34) * Added dec_reclassify_high_var_comps plus * clarified diff btwn rho_kundu and _liberal thresh * Clarified docs for minimal tree --- docs/faq.rst | 10 +- .../decision_trees/invalid_kundu_bkup.json | 267 ------------------ tedana/resources/decision_trees/kundu.json | 61 ++-- tedana/selection/selection_nodes.py | 195 +++++++++++-- tedana/selection/selection_utils.py | 67 ++--- tedana/tests/test_component_selector.py | 4 +- tedana/tests/test_selection_nodes.py | 105 +++++-- tedana/tests/test_selection_utils.py | 14 +- tedana/workflows/tedana.py | 61 ++-- 9 files changed, 391 insertions(+), 393 deletions(-) delete mode 100644 tedana/resources/decision_trees/invalid_kundu_bkup.json diff --git a/docs/faq.rst b/docs/faq.rst index a792fe77f..35c2af077 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -125,9 +125,13 @@ removed this way. range of datasets, but the primary benefit is that it is possible to describe what it does in a short paragraph. The minimal tree will retain some components that kundu appropriately classifies as noise, and it will reject some components that kundu -accepts. On balance, we expect it to be a more conservative option that should not -remove noise as aggressively as kundu, but will be less likely to reject components that -clearly contain signal-of-interest. +accepts. The goal for the minimal tree is to be a more conservative option that +will be less likely to reject components that clearly contain signal-of-interest, but +this has not yet been validated. The precise thresholds and steps in the minimal +tree may change as the results from running it are examined on a wider range of data. +The developers are looking for more people to compare results between the kundu and +minimal trees, but if someone values stability when processing a large dataset, +the minimal tree might not be the best option until it is tested and validated more. It is also possible for users to view both decision trees and `make their own`_. This might be useful for general methods development and also for using ``tedana`` diff --git a/tedana/resources/decision_trees/invalid_kundu_bkup.json b/tedana/resources/decision_trees/invalid_kundu_bkup.json deleted file mode 100644 index e2373b4be..000000000 --- a/tedana/resources/decision_trees/invalid_kundu_bkup.json +++ /dev/null @@ -1,267 +0,0 @@ -{ - "tree_id": "kundu_MEICA27_decision_tree", - "info": "Following the full decision tree designed by Prantik Kundu", - "report": "This is based on the minimal criteria of the original MEICA decision tree without the more agressive noise removal steps", - "refs": "Kundu 2013", - "necessary_metrics": [ - "kappa", - "rho", - "countsigFS0", - "countsigFT2", - "dice_FS0", - "dice_FT2", - "signal-noise_t", - "variance explained", - "d_table_score" - ], - "nodes": [ - { - "functionname": "manual_classify", - "parameters": { - "new_classification": "unclassified", - "decide_comps": "all" - }, - "kwargs": { - "log_extra_info": "Initializing all classifications as unclassified and all rationales as blank", - "log_extra_report": "", - "clear_rationale": true - } - }, - { - "functionname": "metric1_greaterthan_metric2", - "parameters": { - "if_true": "rejected", - "if_false": "nochange", - "decide_comps": "all", - "metric1": "rho", - "metric2": "kappa" - }, - "kwargs": { - "log_extra_info": "Reject if Kappa>Rho", - "log_extra_report": "", - "metric2_scale": 1 - } - }, - { - "functionname": "metric1_greaterthan_metric2", - "parameters": { - "if_true": "rejected", - "if_false": "nochange", - "decide_comps": "all", - "metric1": "countsigFS0", - "metric2": "countsigFT2" - }, - "kwargs": { - "log_extra_info": "Reject if countsig_in S0clusters > T2clusters", - "log_extra_report": "", - "metric2_scale": 1 - } - }, - { - "functionname": "metric1_greaterthan_metric2", - "parameters": { - "if_true": "rejected", - "if_false": "nochange", - "decide_comps": "all", - "metric1": "dice_FS0", - "metric2": "dice_FT2" - }, - "kwargs": { - "log_extra_info": "Reject if DICE S0>T2", - "log_extra_report": "", - "metric2_scale": 1 - } - }, - { - "functionname": "metric1_greaterthan_metric2", - "parameters": { - "if_true": "rejected", - "if_false": "nochange", - "decide_comps": "all", - "metric1": 0, - "metric2": "signal-noise_t" - }, - "kwargs": { - "log_extra_info": "Reject if T2fitdiff_invsout_ICAmap_Tstat<0", - "log_extra_report": "", - "metric2_scale": 1 - } - }, - { - "functionname": "kappa_rho_elbow_cutoffs_kundu", - "parameters": { - "if_true": "provisionalaccept", - "if_false": "provisionalreject", - "decide_comps": "unclassified", - "n_echos": null - }, - "kwargs": { - "log_extra_info": "", - "log_extra_report": "" - } - }, - { - "functionname": "classification_exists", - "parameters": { - "if_true": "nochange", - "if_false": "ignored", - "decide_comps": [ - "provisionalaccept", - "provisionalreject" - ], - "class_comp_exists": "provisionalaccept" - }, - "kwargs": { - "log_extra_info": "", - "log_extra_report": "" - } - }, - { - "functionname": "meanmetricrank_and_variance_greaterthan_thresh", - "parameters": { - "if_true": "rejected", - "if_false": "nochange", - "decide_comps": [ - "provisionalaccept", - "provisionalreject" - ], - "n_vols": null - }, - "kwargs": { - "high_perc": 90, - "log_extra_info": "", - "log_extra_report": "" - } - }, - { - "functionname": "lowvariance_highmeanmetricrank_lowkappa", - "parameters": { - "if_true": "ignored", - "if_false": "nochange", - "decide_comps": [ - "provisionalaccept", - "provisionalreject" - ], - "n_echos": null, - "n_vols": null - }, - "kwargs": { - "low_perc": 25, - "log_extra_info": "", - "log_extra_report": "" - } - }, - { - "functionname": "classification_exists", - "parameters": { - "if_true": "nochange", - "if_false": "accepted", - "decide_comps": [ - "provisionalaccept", - "provisionalreject" - ], - "class_comp_exists": "provisionalreject" - }, - "kwargs": { - "log_extra_info": "", - "log_extra_report": "" - } - }, - { - "functionname": "highvariance_highmeanmetricrank_highkapparatio", - "parameters": { - "if_true": "rejected", - "if_false": "nochange", - "decide_comps": [ - "provisionalaccept", - "provisionalreject" - ], - "n_echos": null, - "n_vols": null - }, - "kwargs": { - "prev_X_steps": 3, - "log_extra_info": "", - "log_extra_report": "" - } - }, - { - "functionname": "highvariance_highmeanmetricrank", - "parameters": { - "if_true": "rejected", - "if_false": "nochange", - "decide_comps": [ - "provisionalaccept", - "provisionalreject" - ], - "n_echos": null, - "n_vols": null - }, - "kwargs": { - "prev_X_steps": 1, - "log_extra_info": "", - "log_extra_report": "" - } - }, - { - "functionname": "highvariance_highmeanmetricrank", - "parameters": { - "if_true": "ignored", - "if_false": "nochange", - "decide_comps": [ - "provisionalaccept", - "provisionalreject" - ], - "n_echos": null - }, - "kwargs": { - "prev_X_steps": 2, - "high_perc": 100, - "extend_factor": 1, - "recalc_varex_lower_thresh": true, - "log_extra_info": "", - "log_extra_report": "" - } - }, - { - "functionname": "highvariance_lowkappa", - "parameters": { - "if_true": "ignored", - "if_false": "nochange", - "decide_comps": [ - "provisionalaccept", - "provisionalreject" - ], - "n_echos": null - }, - "kwargs": { - "log_extra_info": "", - "log_extra_report": "" - } - }, - { - "functionname": "manual_classify", - "parameters": { - "new_classification": "accepted", - "decide_comps": "provisionalaccept" - }, - "kwargs": { - "log_extra_info": "", - "log_extra_report": "", - "clear_rationale": false - } - }, - { - "functionname": "manual_classify", - "parameters": { - "new_classification": "rejected", - "decide_comps": "provisionalreject" - }, - "kwargs": { - "log_extra_info": "", - "log_extra_report": "", - "clear_rationale": false - } - } - ] -} diff --git a/tedana/resources/decision_trees/kundu.json b/tedana/resources/decision_trees/kundu.json index 9ed9c57ec..72f0c5770 100644 --- a/tedana/resources/decision_trees/kundu.json +++ b/tedana/resources/decision_trees/kundu.json @@ -16,11 +16,12 @@ "countnoise" ], "generated_metrics": [ - "d_table_score_node19", + "d_table_score_node20", "varex kappa ratio" ], "intermediate_classifications": [ - "provisionalaccept" + "provisionalaccept", + "unclass_highvar" ], "classification_tags": [ "Likely BOLD", @@ -131,6 +132,18 @@ }, "_comment": "" }, + { + "functionname": "dec_reclassify_high_var_comps", + "parameters": { + "decide_comps": "unclassified", + "new_classification": "unclass_highvar" + }, + "kwargs": { + "log_extra_info": "", + "log_extra_report": "" + }, + "_comment": "" + }, { "functionname": "calc_rho_elbow", "parameters": { @@ -180,7 +193,8 @@ "new_classification": "accepted", "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ], "class_comp_exists": "provisionalaccept" }, @@ -239,7 +253,8 @@ "if_false": "nochange", "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ], "op": ">", "left": "d_table_score", @@ -262,7 +277,8 @@ "if_false": "nochange", "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ], "op": ">", "left": "d_table_score", @@ -286,9 +302,13 @@ "new_classification": "accepted", "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ], - "class_comp_exists": "unclassified" + "class_comp_exists": [ + "unclassified", + "unclass_highvar" + ] }, "kwargs": { "tag": "Likely BOLD", @@ -302,7 +322,8 @@ "parameters": { "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ] }, "kwargs": {}, @@ -315,10 +336,11 @@ "if_false": "nochange", "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ], "op": ">", - "left": "d_table_score_node19", + "left": "d_table_score_node20", "right": "conservative_guess" }, "kwargs": { @@ -342,10 +364,11 @@ "if_false": "nochange", "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ], "op": ">", - "left": "d_table_score_node19", + "left": "d_table_score_node20", "right": "num_acc_guess" }, "kwargs": { @@ -364,7 +387,8 @@ "parameters": { "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ], "thresh_label": "new_lower", "percentile_thresh": 25 @@ -380,10 +404,11 @@ "if_false": "nochange", "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ], "op": ">", - "left": "d_table_score_node19", + "left": "d_table_score_node20", "right": "num_acc_guess" }, "kwargs": { @@ -402,7 +427,8 @@ "if_false": "nochange", "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ], "op": "<=", "left": "kappa", @@ -423,7 +449,8 @@ "new_classification": "accepted", "decide_comps": [ "provisionalaccept", - "unclassified" + "unclassified", + "unclass_highvar" ] }, "kwargs": { diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 00228e3d9..4a91c22a0 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -721,6 +721,12 @@ def calc_kappa_elbow( a kappa elbow calculation on all components and on a subset of kappa values below a significance threshold. To get the same functionality as in MEICA v2.5, decide_comps must be 'all'. + + varex_upper_p isn't used for anything in this function, but it is calculated + on kappa values and is used in rho_elbow_kundu_liberal and + dec_reclassify_high_var_comps. For several reasons it made more sense to calculate here. + This also means the kappa elbow should be calculated before those two other functions + are called """ outputs = { "decision_node_idx": selector.current_node_idx, @@ -731,10 +737,12 @@ def calc_kappa_elbow( "kappa_elbow_kundu", "kappa_allcomps_elbow", "kappa_nonsig_elbow", + "varex_upper_p", ], "kappa_elbow_kundu": None, "kappa_allcomps_elbow": None, "kappa_nonsig_elbow": None, + "varex_upper_p": None, } if only_used_metrics: @@ -750,6 +758,11 @@ def calc_kappa_elbow( f"Overwriting previous value in {function_name_idx}" ) + if "varex_upper_p" in selector.cross_component_metrics: + LGR.warning( + f"varex_upper_p already calculated. Overwriting previous value in {function_name_idx}" + ) + if custom_node_label: outputs["node_label"] = custom_node_label else: @@ -777,10 +790,12 @@ def calc_kappa_elbow( outputs["kappa_elbow_kundu"], outputs["kappa_allcomps_elbow"], outputs["kappa_nonsig_elbow"], + outputs["varex_upper_p"], ) = kappa_elbow_kundu(selector.component_table, selector.n_echos, comps2use=comps2use) selector.cross_component_metrics["kappa_elbow_kundu"] = outputs["kappa_elbow_kundu"] selector.cross_component_metrics["kappa_allcomps_elbow"] = outputs["kappa_allcomps_elbow"] selector.cross_component_metrics["kappa_nonsig_elbow"] = outputs["kappa_nonsig_elbow"] + selector.cross_component_metrics["varex_upper_p"] = outputs["varex_upper_p"] log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) @@ -851,14 +866,12 @@ def calc_rho_elbow( "n_echos": selector.n_echos, "calc_cross_comp_metrics": [ elbow_name, - "varex_upper_p", "rho_allcomps_elbow", "rho_unclassified_elbow", "elbow_f05", ], "used_metrics": set(["kappa", "rho", "variance explained"]), elbow_name: None, - "varex_upper_p": None, "rho_allcomps_elbow": None, "rho_unclassified_elbow": None, "elbow_f05": None, @@ -875,11 +888,6 @@ def calc_rho_elbow( f"Overwriting previous value in {function_name_idx}" ) - if "varex_upper_p" in selector.cross_component_metrics: - LGR.warning( - f"varex_upper_p already calculated. Overwriting previous value in {function_name_idx}" - ) - if custom_node_label: outputs["node_label"] = custom_node_label else: @@ -907,7 +915,6 @@ def calc_rho_elbow( else: ( outputs[elbow_name], - outputs["varex_upper_p"], outputs["rho_allcomps_elbow"], outputs["rho_unclassified_elbow"], outputs["elbow_f05"], @@ -919,7 +926,6 @@ def calc_rho_elbow( subset_comps2use=subset_comps2use, ) selector.cross_component_metrics[elbow_name] = outputs[elbow_name] - selector.cross_component_metrics["varex_upper_p"] = outputs["varex_upper_p"] selector.cross_component_metrics["rho_allcomps_elbow"] = outputs["rho_allcomps_elbow"] selector.cross_component_metrics["rho_unclassified_elbow"] = outputs[ "rho_unclassified_elbow" @@ -1065,6 +1071,145 @@ def dec_classification_doesnt_exist( return selector +@fill_doc +def dec_reclassify_high_var_comps( + selector, + new_classification, + decide_comps, + log_extra_report="", + log_extra_info="", + custom_node_label="", + only_used_metrics=False, + tag=None, +): + """ + Identifies and reclassifies a couple components with the largest gaps in variance + + Parameters + ---------- + %(selector)s + new_classification: :obj:`str` + Assign all components identified in decide_comps the classification + in new_classification. + %(decide_comps)s + %(log_extra_info)s + %(log_extra_report)s + %(custom_node_label)s + %(only_used_metrics)s + tag: :obj:`str` + A classification tag to assign to all components being reclassified. + This should be one of the tags defined by classification_tags in + the decision tree specification. Default="". + + Returns + ------- + %(selector)s + %(used_metrics)s + + Note + ---- + This function should not exist, but with the goal of maintaining the results of + the original MEICA decision tree it is necessary, so here it is. + It is a quirky and brittle step that is used to remove a few higher variance + components from the calculation of the rho elbow. In the kundu decision tree, + these components are also excluded from being provisionally accepted if + kappa>kappa_elbow and rho len(comps2use): + # NOTE: This was originally an error, but the original tedana code has no check + # at all and it looks like sorted_varex[:num_highest_var_comps] does not + # crash and always maxes out at the length of sorted_varex. Since this is + # an edge case, decided to print an info message and change the value even + # if this won't affect functionality + LGR.info( + f"{function_name_idx}: num_highest_var_comps ({num_highest_var_comps}) > " + f"len(comps2use) ({len(comps2use)}). Setting to equal len(comps2use) since " + "selection should not use more components than exist" ) + num_highest_var_comps = len(comps2use) + + sorted_varex = np.flip( + np.sort((selector.component_table.loc[comps2use, "variance explained"]).to_numpy()) + ) + outputs[varex_name] = scoreatpercentile( + sorted_varex[:num_highest_var_comps], percentile_thresh + ) + selector.cross_component_metrics[varex_name] = outputs[varex_name] log_decision_tree_step(function_name_idx, comps2use, calc_outputs=outputs) diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index f1a619584..5ab9b884f 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -598,6 +598,9 @@ def kappa_elbow_kundu(component_table, n_echos, comps2use=None): kappa_nonsig_elbow : :obj:`float` The elbow for kappa values excluding kappa values above a threshold None if there are fewer than 6 values remaining after thresholding + varex_upper_p : :obj:`float` + This is the median "variance explained" across components with kappa values + greater than the kappa_elbow calculated using all components Note ---- @@ -608,6 +611,10 @@ def kappa_elbow_kundu(component_table, n_echos, comps2use=None): includes all component numbers. If comps2use includes indices for only a subset of components then the kappa values from just those components will be used for both elbow calculations. + + varex_upper_p isn't used for anything in this function, but it is calculated + on kappa values and is used in rho_elbow_kundu_liberal. For several reasons + it made more sense to calculate here. """ # If comps2use is None then set to a list of all component numbers @@ -637,7 +644,19 @@ def kappa_elbow_kundu(component_table, n_echos, comps2use=None): kappa_nonsig_elbow = None LGR.info(("Calculating kappa elbow based on all components.")) - return kappa_elbow, kappa_allcomps_elbow, kappa_nonsig_elbow + # Calculating varex_upper_p + # Upper limit for variance explained is median across components with high + # Kappa values. High Kappa is defined as Kappa above Kappa elbow. + high_kappa_idx = np.squeeze(np.argwhere(kappas2use > kappa_allcomps_elbow)) + # list(kappa_comps2use.index[kappas2use > kappa_allcomps_elbow]) + varex_upper_p = np.median( + component_table.loc[ + high_kappa_idx, + "variance explained", + ] + ) + + return kappa_elbow, kappa_allcomps_elbow, kappa_nonsig_elbow, varex_upper_p def rho_elbow_kundu_liberal( @@ -672,10 +691,6 @@ def rho_elbow_kundu_liberal( rho_elbow : :obj:`float` The 'elbow' value for rho values, above which components are considered more likely to contain S0 weighted signals - varex_upper_p : :obj:`float` - This is the median "variance explained" across components with kappa values - greater than the kappa_elbow calculated using all components - None if subset_comps2use is None rho_allcomps_elbow : :obj:`float` rho elbow calculated using all components in comps2use rho_unclassified_elbow : :obj:`float` @@ -689,7 +704,7 @@ def rho_elbow_kundu_liberal( ---- The rho elbow calculation in Kundu's original meica code calculates one elbow using all components' rho values, one elbow using only - unclassified components (plus some quirky stuff with high variance components), + unclassified components (excluding 2-3 remaining high variance componetns), on threshold based on the number of echos, and takes the mean of those 3 values To replicate the original code, comps2use should include indices for all components and subset_comps2use should includes indices for unclassified components @@ -700,7 +715,15 @@ def rho_elbow_kundu_liberal( elbows based on rho values. The assumption is that the threshold on unclassified components is always lower and can likely be excluded. Both rho elbows are now logged so that it will be possible to confirm this with - data & make additional adjustments to this threshold + data & make additional adjustments to this threshold. + + Additionally, the liberal threshold does not exclude 2-3 high variance components + from the unclassified threshold. This was done as a practical matter because + those components are now removed in a separate node, dec_reclassify_high_var_comps, + and adding that separate node to the minimal tree would make it less minimal, but + it also seems reasonable since there was no clear reason why they elbow with them + removed was reliably better than the elbow containing them. More direct comparisons + between these two arbitrary thresholds might be useful at some point. """ if rho_elbow_type not in ["kundu", "liberal"]: raise ValueError( @@ -735,38 +758,10 @@ def rho_elbow_kundu_liberal( "No unclassified components for rho elbow calculation only elbow based " "on all components is used" ) - varex_upper_p = None rho_unclassified_elbow = None rho_elbow = rho_allcomps_elbow else: - # Calculating varex_upper_p - # Upper limit for variance explained is median across components with high - # Kappa values. High Kappa is defined as Kappa above Kappa elbow. - kappa_comps2use = component_table.loc[comps2use, "kappa"] - high_kappa_idx = list( - kappa_comps2use.index[ - kappa_comps2use - > getelbow(component_table.loc[comps2use, "kappa"], return_val=True) - ] - ) - varex_upper_p = np.median( - component_table.loc[ - high_kappa_idx, - "variance explained", - ] - ) - - # Removing large gaps in variance in the subset_comps2use before - # calculating this subset elbow threshold - for i_loop in range(3): - temp_comptable = component_table.loc[subset_comps2use].sort_values( - by=["variance explained"], ascending=False - ) - diff_vals = temp_comptable["variance explained"].diff(-1) - diff_vals = diff_vals.fillna(0) - subset_comps2use = temp_comptable.loc[diff_vals < varex_upper_p].index.values - rho_unclassified_elbow = getelbow( component_table.loc[subset_comps2use, "rho"], return_val=True ) @@ -776,7 +771,7 @@ def rho_elbow_kundu_liberal( else: # rho_elbow_type == 'liberal' rho_elbow = np.maximum(rho_allcomps_elbow, rho_unclassified_elbow) - return rho_elbow, varex_upper_p, rho_allcomps_elbow, rho_unclassified_elbow, elbow_f05 + return rho_elbow, rho_allcomps_elbow, rho_unclassified_elbow, elbow_f05 def get_extend_factor(n_vols=None, extend_factor=None): diff --git a/tedana/tests/test_component_selector.py b/tedana/tests/test_component_selector.py index 50f53960d..5f9a3da52 100644 --- a/tedana/tests/test_component_selector.py +++ b/tedana/tests/test_component_selector.py @@ -175,7 +175,7 @@ def test_minimal(): selector = component_selector.ComponentSelector( "minimal", sample_comptable(), - cross_component_metrics=xcomp, + cross_component_metrics=xcomp.copy(), ) selector.select() @@ -183,7 +183,7 @@ def test_minimal(): selector = component_selector.ComponentSelector( "minimal", sample_comptable(), - cross_component_metrics=xcomp, + cross_component_metrics=xcomp.copy(), ) selector.component_table = selector.component_table.drop(columns="classification_tags") selector.select() diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 26a716b41..909af2b21 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -437,6 +437,7 @@ def test_calc_kappa_elbow(): "kappa_elbow_kundu", "kappa_allcomps_elbow", "kappa_nonsig_elbow", + "varex_upper_p", } output_calc_cross_comp_metrics = set( selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] @@ -446,6 +447,7 @@ def test_calc_kappa_elbow(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_allcomps_elbow"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_nonsig_elbow"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 # Using a subset of components for decide_comps. selector = selection_nodes.calc_kappa_elbow( @@ -459,6 +461,7 @@ def test_calc_kappa_elbow(): "kappa_elbow_kundu", "kappa_allcomps_elbow", "kappa_nonsig_elbow", + "varex_upper_p", } output_calc_cross_comp_metrics = set( selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] @@ -468,6 +471,7 @@ def test_calc_kappa_elbow(): assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_elbow_kundu"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_allcomps_elbow"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_nonsig_elbow"] > 0 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 # No components with "NotALabel" classification so nothing selected selector = sample_selector() @@ -485,6 +489,7 @@ def test_calc_kappa_elbow(): assert ( selector.tree["nodes"][selector.current_node_idx]["outputs"]["kappa_nonsig_elbow"] is None ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] is None def test_calc_rho_elbow(): @@ -507,7 +512,6 @@ def test_calc_rho_elbow(): ) calc_cross_comp_metrics = { "rho_elbow_kundu", - "varex_upper_p", "rho_allcomps_elbow", "rho_unclassified_elbow", "elbow_f05", @@ -517,7 +521,6 @@ def test_calc_rho_elbow(): ) # Confirming the intended metrics are added to outputs and they have non-zero values assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_allcomps_elbow"] > 0 assert ( @@ -536,7 +539,6 @@ def test_calc_rho_elbow(): ) calc_cross_comp_metrics = { "rho_elbow_liberal", - "varex_upper_p", "rho_allcomps_elbow", "rho_unclassified_elbow", "elbow_f05", @@ -546,7 +548,6 @@ def test_calc_rho_elbow(): ) # Confirming the intended metrics are added to outputs and they have non-zero values assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_liberal"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_allcomps_elbow"] > 0 assert ( @@ -564,7 +565,6 @@ def test_calc_rho_elbow(): ) calc_cross_comp_metrics = { "rho_elbow_kundu", - "varex_upper_p", "rho_allcomps_elbow", "rho_unclassified_elbow", "elbow_f05", @@ -574,7 +574,6 @@ def test_calc_rho_elbow(): ) # Confirming the intended metrics are added to outputs and they have non-zero values assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] > 0 assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_allcomps_elbow"] > 0 assert ( @@ -591,7 +590,6 @@ def test_calc_rho_elbow(): # Outputs just the metrics used in this function selector = selection_nodes.calc_rho_elbow(selector, decide_comps) - assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_upper_p"] is None assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_elbow_kundu"] is None assert ( selector.tree["nodes"][selector.current_node_idx]["outputs"]["rho_allcomps_elbow"] is None @@ -766,6 +764,69 @@ def test_dec_classification_doesnt_exist_smoke(): assert f"Node {selector.current_node_idx}" in selector.component_status_table +def test_dec_reclassify_high_var_comps(): + """tests for dec_reclassify_high_var_comps""" + + selector = sample_selector(options="unclass") + decide_comps = "unclassified" + + # Outputs just the metrics used in this function {"variance explained"} + used_metrics = selection_nodes.dec_reclassify_high_var_comps( + selector, + "unclass_highvar", + decide_comps, + only_used_metrics=True, + ) + assert len(used_metrics - set(["variance explained"])) == 0 + + # Raises an error since varex_upper_p not in cross_component_metrics + # & there are components in decide_comps + with pytest.raises(ValueError): + selection_nodes.dec_reclassify_high_var_comps( + selector, + "unclass_highvar", + decide_comps, + ) + + # varex_upper_p not in cross_component_metrics, + # but doesn't raise an error because no components in decide_comps + selection_nodes.dec_reclassify_high_var_comps( + selector, + "unclass_highvar", + "NotAClassification", + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 0 + assert f"Node {selector.current_node_idx}" not in selector.component_status_table + + # Add varex_upper_p to cross component_metrics to run normal test + selector = sample_selector(options="unclass") + selector.cross_component_metrics["varex_upper_p"] = 0.97 + + # Standard execution where with all extra logging code and options changed from defaults + selection_nodes.dec_reclassify_high_var_comps( + selector, + "unclass_highvar", + decide_comps, + log_extra_report="report log", + log_extra_info="info log", + custom_node_label="custom label", + tag="test true tag", + ) + # Lists the number of components in decide_comps in n_true or n_false + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 3 + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_false"] == 10 + assert f"Node {selector.current_node_idx}" in selector.component_status_table + + # No components with "NotALabel" classification so nothing selected and no + # Node 1 column is created in component_status_table + selector.current_node_idx = 1 + selector = selection_nodes.dec_reclassify_high_var_comps( + selector, "unclass_highvar", "NotAClassification" + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["n_true"] == 0 + assert f"Node {selector.current_node_idx}" not in selector.component_status_table + + def test_calc_varex_thresh_smoke(): """Smoke tests for calc_varex_thresh""" @@ -892,15 +953,27 @@ def test_calc_varex_thresh_smoke(): num_highest_var_comps=9.5, ) - # Raise error if num_highest_var_comps is larger than the number of selected components - with pytest.raises(ValueError): - selector = selection_nodes.calc_varex_thresh( - selector, - decide_comps, - thresh_label="new_lower", - percentile_thresh=25, - num_highest_var_comps=55, - ) + # Still run num_highest_var_comps is larger than the number of selected components + # NOTE: To match original functionaly this will run but add an info message + # and set num_highest_var_comps to the number of selected components + # + selector = selection_nodes.calc_varex_thresh( + selector, + decide_comps, + thresh_label="new_lower", + percentile_thresh=25, + num_highest_var_comps=55, + ) + calc_cross_comp_metrics = {"varex_new_lower_thresh", "new_lower_perc"} + output_calc_cross_comp_metrics = set( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["calc_cross_comp_metrics"] + ) + # Confirming the intended metrics are added to outputs and they have non-zero values + assert len(output_calc_cross_comp_metrics - calc_cross_comp_metrics) == 0 + assert ( + selector.tree["nodes"][selector.current_node_idx]["outputs"]["varex_new_lower_thresh"] > 0 + ) + assert selector.tree["nodes"][selector.current_node_idx]["outputs"]["new_lower_perc"] == 25 # Run warning logging code to see if any of the cross_component_metrics # already exists and would be over-written diff --git a/tedana/tests/test_selection_utils.py b/tedana/tests/test_selection_utils.py index 14fd63399..2f25baea8 100644 --- a/tedana/tests/test_selection_utils.py +++ b/tedana/tests/test_selection_utils.py @@ -361,10 +361,12 @@ def test_kappa_elbow_kundu_smoke(): kappa_elbow_kundu, kappa_allcomps_elbow, kappa_nonsig_elbow, + varex_upper_p, ) = selection_utils.kappa_elbow_kundu(component_table, n_echos=5) assert isinstance(kappa_elbow_kundu, float) assert isinstance(kappa_allcomps_elbow, float) assert isinstance(kappa_nonsig_elbow, float) + assert isinstance(varex_upper_p, float) # For the sample component_table, when n_echos=6, there are fewer than 5 components # that are greater than an f01 threshold and a different condition in kappa_elbow_kundu is run @@ -372,16 +374,19 @@ def test_kappa_elbow_kundu_smoke(): kappa_elbow_kundu, kappa_allcomps_elbow, kappa_nonsig_elbow, + varex_upper_p, ) = selection_utils.kappa_elbow_kundu(component_table, n_echos=6) assert isinstance(kappa_elbow_kundu, float) assert isinstance(kappa_allcomps_elbow, float) assert isinstance(kappa_nonsig_elbow, type(None)) + assert isinstance(varex_upper_p, float) # Run using only a subset of components ( kappa_elbow_kundu, kappa_allcomps_elbow, kappa_nonsig_elbow, + varex_upper_p, ) = selection_utils.kappa_elbow_kundu( component_table, n_echos=5, @@ -390,6 +395,7 @@ def test_kappa_elbow_kundu_smoke(): assert isinstance(kappa_elbow_kundu, float) assert isinstance(kappa_allcomps_elbow, float) assert isinstance(kappa_nonsig_elbow, float) + assert isinstance(varex_upper_p, float) def test_rho_elbow_kundu_liberal_smoke(): @@ -399,13 +405,11 @@ def test_rho_elbow_kundu_liberal_smoke(): # Normal execution with default kundu threshold ( rho_elbow_kundu, - varex_upper_p, rho_allcomps_elbow, rho_unclassified_elbow, elbow_f05, ) = selection_utils.rho_elbow_kundu_liberal(component_table, n_echos=3) assert isinstance(rho_elbow_kundu, float) - assert isinstance(varex_upper_p, float) assert isinstance(rho_allcomps_elbow, float) assert isinstance(rho_unclassified_elbow, float) assert isinstance(elbow_f05, float) @@ -413,7 +417,6 @@ def test_rho_elbow_kundu_liberal_smoke(): # Normal execution with liberal threshold ( rho_elbow_kundu, - varex_upper_p, rho_allcomps_elbow, rho_unclassified_elbow, elbow_f05, @@ -421,7 +424,6 @@ def test_rho_elbow_kundu_liberal_smoke(): component_table, n_echos=3, rho_elbow_type="liberal" ) assert isinstance(rho_elbow_kundu, float) - assert isinstance(varex_upper_p, float) assert isinstance(rho_allcomps_elbow, float) assert isinstance(rho_unclassified_elbow, float) assert isinstance(elbow_f05, float) @@ -429,7 +431,6 @@ def test_rho_elbow_kundu_liberal_smoke(): # Run using only a subset of components ( rho_elbow_kundu, - varex_upper_p, rho_allcomps_elbow, rho_unclassified_elbow, elbow_f05, @@ -441,7 +442,6 @@ def test_rho_elbow_kundu_liberal_smoke(): subset_comps2use=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 18, 20], ) assert isinstance(rho_elbow_kundu, float) - assert isinstance(varex_upper_p, float) assert isinstance(rho_allcomps_elbow, float) assert isinstance(rho_unclassified_elbow, float) assert isinstance(elbow_f05, float) @@ -450,13 +450,11 @@ def test_rho_elbow_kundu_liberal_smoke(): component_table = sample_component_table() ( rho_elbow_kundu, - varex_upper_p, rho_allcomps_elbow, rho_unclassified_elbow, elbow_f05, ) = selection_utils.rho_elbow_kundu_liberal(component_table, n_echos=3) assert isinstance(rho_elbow_kundu, float) - assert isinstance(varex_upper_p, type(None)) assert isinstance(rho_allcomps_elbow, float) assert isinstance(rho_unclassified_elbow, type(None)) assert isinstance(elbow_f05, float) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index c406478ba..cfc6dfcf1 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -160,7 +160,8 @@ def _get_parser(): "Decision tree to use. You may use a " "packaged tree (kundu, minimal) or supply a JSON " "file which matches the decision tree file " - "specification." + "specification. Minimal still being tested with more" + "details in docs" ), default="kundu", ) @@ -351,6 +352,9 @@ def tedana_workflow( list of echo-specific files, in ascending order. tes : :obj:`list` List of echo times associated with data in milliseconds. + + Other Parameters + ---------------- out_dir : :obj:`str`, optional Output directory. mask : :obj:`str` or None, optional @@ -358,6 +362,12 @@ def tedana_workflow( spatially aligned with `data`. If an explicit mask is not provided, then Nilearn's compute_epi_mask function will be used to derive a mask from the first echo's data. + convention : {'bids', 'orig'}, optional + Filenaming convention. bids uses the latest BIDS derivatives version (1.5.0). + Default is 'bids'. + prefix : :obj:`str` or None, optional + Prefix for filenames generated. + Default is "" fittype : {'loglin', 'curvefit'}, optional Monoexponential fitting method. 'loglin' uses the the default linear fit to the log of the data. 'curvefit' uses a monoexponential fit to @@ -365,52 +375,59 @@ def tedana_workflow( Default is 'loglin'. combmode : {'t2s'}, optional Combination scheme for TEs: 't2s' (Posse 1999, default). + tree : {'kundu', 'minimal', 'json file'}, optional + Decision tree to use for component selection. Can be a + packaged tree (kundu, minimal) or a user-supplied JSON file that + matches the decision tree file specification. Minimal is intented + to be a simpler process that is a bit more conservative, but it + accepts and rejects some distinct components compared to kundu. + Testing to better understand the effects of the differences is ongoing. + Default is 'kundu'. tedpca : {'mdl', 'aic', 'kic', 'kundu', 'kundu-stabilize', float}, optional Method with which to select components in TEDPCA. If a float is provided, then it is assumed to represent percentage of variance explained (0-1) to retain from PCA. Default is 'aic'. + fixed_seed : :obj:`int`, optional + Value passed to ``mdp.numx_rand.seed()``. + Set to a positive integer value for reproducible ICA results; + otherwise, set to -1 for varying results across calls. + maxit : :obj:`int`, optional + Maximum number of iterations for ICA. Default is 500. + maxrestart : :obj:`int`, optional + Maximum number of attempts for ICA. If ICA fails to converge, the + fixed seed will be updated and ICA will be run again. If convergence + is achieved before maxrestart attempts, ICA will finish early. + Default is 10. tedort : :obj:`bool`, optional Orthogonalize rejected components w.r.t. accepted ones prior to denoising. Default is False. gscontrol : {None, 'mir', 'gsr'} or :obj:`list`, optional Perform additional denoising to remove spatially diffuse noise. Default is None. - verbose : :obj:`bool`, optional - Generate intermediate and additional files. Default is False. no_reports : obj:'bool', optional Do not generate .html reports and .png plots. Default is false such that reports are generated. png_cmap : obj:'str', optional Name of a matplotlib colormap to be used when generating figures. Cannot be used with --no-png. Default is 'coolwarm'. + verbose : :obj:`bool`, optional + Generate intermediate and additional files. Default is False. + low_mem : :obj:`bool`, optional + Enables low-memory processing, including the use of IncrementalPCA. + May increase workflow duration. Default is False. + debug : :obj:`bool`, optional + Whether to run in debugging mode or not. Default is False. t2smap : :obj:`str`, optional Precalculated T2* map in the same space as the input data. Values in the map must be in seconds. mixm : :obj:`str` or None, optional File containing mixing matrix, to be used when re-running the workflow. If not provided, ME-PCA and ME-ICA are done. Default is None. - - Other Parameters - ---------------- - fixed_seed : :obj:`int`, optional - Value passed to ``mdp.numx_rand.seed()``. - Set to a positive integer value for reproducible ICA results; - otherwise, set to -1 for varying results across calls. - maxit : :obj:`int`, optional - Maximum number of iterations for ICA. Default is 500. - maxrestart : :obj:`int`, optional - Maximum number of attempts for ICA. If ICA fails to converge, the - fixed seed will be updated and ICA will be run again. If convergence - is achieved before maxrestart attempts, ICA will finish early. - Default is 10. - low_mem : :obj:`bool`, optional - Enables low-memory processing, including the use of IncrementalPCA. - May increase workflow duration. Default is False. - debug : :obj:`bool`, optional - Whether to run in debugging mode or not. Default is False. quiet : :obj:`bool`, optional If True, suppresses logging/printing of messages. Default is False. + overwrite : :obj:`bool`, optional + If True, force overwriting of files. Default is False. Notes ----- From a2a30fa2a2292aa4782fed88f609554a4bbec58c Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Thu, 16 Mar 2023 12:13:52 -0400 Subject: [PATCH 165/177] Replace versioneer with hatch (#35) * Update gitignore. * Delete _version.py * Adopt new packaging. * Ignore the _version.py file. --- .git_archival.txt | 4 + .gitattributes | 3 +- .gitignore | 3 + .readthedocs.yml | 17 +- pyproject.toml | 124 ++- setup.cfg | 90 --- tedana/__about__.py | 18 + tedana/__init__.py | 16 +- tedana/_version.py | 543 ------------- versioneer.py | 1822 ------------------------------------------- 10 files changed, 162 insertions(+), 2478 deletions(-) create mode 100644 .git_archival.txt delete mode 100644 setup.cfg create mode 100644 tedana/__about__.py delete mode 100644 tedana/_version.py delete mode 100644 versioneer.py diff --git a/.git_archival.txt b/.git_archival.txt new file mode 100644 index 000000000..8fb235d70 --- /dev/null +++ b/.git_archival.txt @@ -0,0 +1,4 @@ +node: $Format:%H$ +node-date: $Format:%cI$ +describe-name: $Format:%(describe:tags=true,match=*[0-9]*)$ +ref-names: $Format:%D$ diff --git a/.gitattributes b/.gitattributes index d04993667..c42bb4796 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,5 @@ -tedana/_version.py export-subst +.git_archival.txt export-subst + # Set the default behavior, in case people don't have core.autocrlf set. * text=auto diff --git a/.gitignore b/.gitignore index 620137e3d..a52ae4872 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# setuptools-scm +tedana/_version.py + .DS_Store docs/generated/ .pytest_cache/ diff --git a/.readthedocs.yml b/.readthedocs.yml index f994a7eb1..6404ca6b3 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,16 +1,17 @@ -# .readthedocs.yml -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -# Required version: 2 -# Build documentation in the docs/ directory with Sphinx sphinx: - configuration: docs/conf.py + configuration: docs/conf.py + +build: + os: ubuntu-22.04 + tools: + python: "3.8" + jobs: + post_checkout: + - git fetch --unshallow python: - version: 3.7 install: - method: pip path: . diff --git a/pyproject.toml b/pyproject.toml index 99af7c9f8..14779950e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,14 +1,98 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + [project] name = "tedana" -version = "0.0.12" +description = "TE-Dependent Analysis (tedana) of multi-echo functional magnetic resonance imaging (fMRI) data." +readme = "README.md" +authors = [{name = "tedana developers"}] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "Topic :: Scientific/Engineering :: Information Analysis", + "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", +] +license = {file = "LICENSE"} +requires-python = ">=3.7" +dependencies = [ + "bokeh<2.3.0", + "mapca>=0.0.3", + "matplotlib", + "nibabel>=2.5.1", + "nilearn>=0.7", + "numpy>=1.16", + "pandas>=0.24", + "scikit-learn>=0.21", + "scipy>=1.2.0", + "threadpoolctl", + "jinja2==3.0.1", +] +dynamic = ["version"] + +[project.urls] +Homepage = "https://github.com/ME-ICA/tedana" +Documentation = "https://www.tedana.readthedocs.io" +Paper = "https://joss.theoj.org/papers/10.21105/joss.03669" + +[project.optional-dependencies] +doc = [ + "sphinx>=1.5.3", + "sphinx_copybutton", + "sphinx_rtd_theme", + "sphinx-argparse", + "sphinxcontrib-bibtex", +] +tests = [ + "codecov", + "coverage<5.0", + "flake8>=3.7", + "flake8-black", + "flake8-isort", + "pytest", + "pytest-cov", + "requests", +] + +# Aliases +all = ["tedana[doc,tests]"] [project.scripts] -tedana = "tedana.workflows.tedana:_main" ica_reclassify = "tedana.workflows.ica_reclassify:_main" t2smap = "tedana.workflows.t2smap:_main" +tedana = "tedana.workflows.tedana:_main" -[build-system] -requires = ["setuptools>=64", "wheel"] +# +# Hatch configurations +# + +[tool.hatch.metadata] +allow-direct-references = true + +[tool.hatch.build.targets.sdist] +exclude = [".git_archival.txt"] # No longer needed in sdist + +[tool.hatch.build.targets.wheel] +packages = ["tedana"] +exclude = [ + "tedana/tests/data", # Large test data directory +] + +## The following two sections configure setuptools_scm in the hatch way + +[tool.hatch.version] +source = "vcs" + +[tool.hatch.build.hooks.vcs] +version-file = "tedana/_version.py" + +# +# Developer tool configurations +# [tool.black] line-length = 99 @@ -28,8 +112,7 @@ exclude = ''' | build | dist )/ - | get_version.py - | versioneer.py + | tedana/_version.py ) ''' @@ -37,4 +120,33 @@ exclude = ''' profile = "black" multi_line_output = 3 +[tool.flake8] +max-line-length = 99 +exclude = [ + "*build/", + "tedana/_version.py", +] +ignore = ["E203", "E402", "W503"] +per-file-ignores = [ + "*/__init__.py: F401", +] +docstring-convention = "numpy" + +[tool.coverage.run] +branch = true +omit = [ + "*/tests/*", + "*/__init__.py", + "*/conftest.py", + "tedana/_version.py", +] + +[tool.coverage.report] +# Regexes for lines to exclude from consideration +exclude_lines = [ + 'raise NotImplementedError', + 'warnings\.warn', +] +[tool.pytest] +log_cli = true diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index de0a940c2..000000000 --- a/setup.cfg +++ /dev/null @@ -1,90 +0,0 @@ -[metadata] -url = https://github.com/ME-ICA/tedana -license = LGPL 2.1 -author = tedana developers -author_email = emd222@cornell.edu -maintainer = Elizabeth DuPre -maintainer_email = emd222@cornell.edu -description = TE-Dependent Analysis (tedana) of multi-echo functional magnetic resonance imaging (fMRI) data. -description_file = README.md -long_description = file:README.md -long_description_content_type = text/markdown -classifiers = - Development Status :: 4 - Beta - Intended Audience :: Science/Research - Topic :: Scientific/Engineering :: Information Analysis - License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) - Programming Language :: Python :: 3.7 - Programming Language :: Python :: 3.8 - Programming Language :: Python :: 3.9 - Programming Language :: Python :: 3.10 - -[options] -python_requires = >= 3.7 -install_requires = - bokeh<2.3.0 - mapca>=0.0.3 - matplotlib - nibabel>=2.5.1 - nilearn>=0.7 - numpy>=1.16 - pandas>=0.24 - scikit-learn>=0.21 - scipy>=1.2.0 - threadpoolctl - jinja2==3.0.1 -packages = find: -include_package_data = False - -[options.extras_require] -doc = - sphinx>=1.5.3 - sphinx_copybutton - sphinx_rtd_theme - sphinx-argparse - sphinxcontrib-bibtex -tests = - codecov - coverage<5.0 - flake8>=3.7 - flake8-black - flake8-isort - pytest - pytest-cov - requests -all = - %(doc)s - %(tests)s - -[options.entry_points] -console_scripts = - t2smap = tedana.workflows.t2smap:_main - ica_reclassify = tedana.workflows.ica_reclassify:_main - tedana = tedana.workflows.tedana:_main - -[options.package_data] -* = - resources/references.bib - resources/config/* - # Includes all integration test output text files - reporting/data/* - reporting/data/html/* - -[versioneer] -VCS = git -style = pep440 -versionfile_source = tedana/_version.py -versionfile_build = tedana/_version.py -tag_prefix = -parentdir_prefix = - -[flake8] -max-line-length = 99 -exclude=*build/ -ignore = E203,E402,W503 -per-file-ignores = - */__init__.py:F401 -docstring-convention = numpy - -[tool:pytest] -log_cli = true diff --git a/tedana/__about__.py b/tedana/__about__.py new file mode 100644 index 000000000..9403eacc0 --- /dev/null +++ b/tedana/__about__.py @@ -0,0 +1,18 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Base module variables.""" + +try: + from tedana._version import __version__ +except ImportError: + __version__ = "0+unknown" + +__packagename__ = "tedana" +__copyright__ = "Copyright 2023, The ME-ICA Developers" +__credits__ = ( + "Contributors: please check the ``.zenodo.json`` file at the top-level folder" + "of the repository" +) +__url__ = "https://github.com/ME-ICA/tedana" + +DOWNLOAD_URL = f"https://github.com/ME-ICA/{__packagename__}/archive/{__version__}.tar.gz" diff --git a/tedana/__init__.py b/tedana/__init__.py index a26b9ef10..cec68b396 100644 --- a/tedana/__init__.py +++ b/tedana/__init__.py @@ -1,17 +1,17 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -""" -tedana: A Python package for TE-dependent analysis of multi-echo data. -""" - +"""tedana: A Python package for TE-dependent analysis of multi-echo data.""" import warnings -from ._version import get_versions - -__version__ = get_versions()["version"] +from tedana.__about__ import __copyright__, __credits__, __packagename__, __version__ # cmp is not used, so ignore nipype-generated warnings warnings.filterwarnings("ignore", r"cmp not installed") -del get_versions +__all__ = [ + "__copyright__", + "__credits__", + "__packagename__", + "__version__", +] diff --git a/tedana/_version.py b/tedana/_version.py deleted file mode 100644 index bede1575c..000000000 --- a/tedana/_version.py +++ /dev/null @@ -1,543 +0,0 @@ -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "$Format:%d$" - git_full = "$Format:%H$" - git_date = "$Format:%ci$" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "pep440" - cfg.tag_prefix = "" - cfg.parentdir_prefix = "" - cfg.versionfile_source = "tedana/_version.py" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen( - [c] + args, - cwd=cwd, - env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr else None), - ) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return { - "version": dirname[len(parentdir_prefix) :], - "full-revisionid": None, - "dirty": False, - "error": None, - "date": None, - } - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print( - "Tried directories %s but none started with prefix %s" - % (str(rootdirs), parentdir_prefix) - ) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r"\d", r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix) :] - if verbose: - print("picking %s" % r) - return { - "version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": None, - "date": date, - } - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return { - "version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, - "error": "no suitable tags", - "date": None, - } - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command( - GITS, - ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], - cwd=root, - ) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[: git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix) :] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return { - "version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None, - } - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return { - "version": rendered, - "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], - "error": None, - "date": pieces.get("date"), - } - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split("/"): - root = os.path.dirname(root) - except NameError: - return { - "version": "0+unknown", - "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None, - } - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return { - "version": "0+unknown", - "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", - "date": None, - } diff --git a/versioneer.py b/versioneer.py deleted file mode 100644 index 64fea1c89..000000000 --- a/versioneer.py +++ /dev/null @@ -1,1822 +0,0 @@ - -# Version: 0.18 - -"""The Versioneer - like a rocketeer, but for versions. - -The Versioneer -============== - -* like a rocketeer, but for versions! -* https://github.com/warner/python-versioneer -* Brian Warner -* License: Public Domain -* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy -* [![Latest Version] -(https://pypip.in/version/versioneer/badge.svg?style=flat) -](https://pypi.python.org/pypi/versioneer/) -* [![Build Status] -(https://travis-ci.org/warner/python-versioneer.png?branch=master) -](https://travis-ci.org/warner/python-versioneer) - -This is a tool for managing a recorded version number in distutils-based -python projects. The goal is to remove the tedious and error-prone "update -the embedded version string" step from your release process. Making a new -release should be as easy as recording a new tag in your version-control -system, and maybe making new tarballs. - - -## Quick Install - -* `pip install versioneer` to somewhere to your $PATH -* add a `[versioneer]` section to your setup.cfg (see below) -* run `versioneer install` in your source tree, commit the results - -## Version Identifiers - -Source trees come from a variety of places: - -* a version-control system checkout (mostly used by developers) -* a nightly tarball, produced by build automation -* a snapshot tarball, produced by a web-based VCS browser, like github's - "tarball from tag" feature -* a release tarball, produced by "setup.py sdist", distributed through PyPI - -Within each source tree, the version identifier (either a string or a number, -this tool is format-agnostic) can come from a variety of places: - -* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows - about recent "tags" and an absolute revision-id -* the name of the directory into which the tarball was unpacked -* an expanded VCS keyword ($Id$, etc) -* a `_version.py` created by some earlier build step - -For released software, the version identifier is closely related to a VCS -tag. Some projects use tag names that include more than just the version -string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool -needs to strip the tag prefix to extract the version identifier. For -unreleased software (between tags), the version identifier should provide -enough information to help developers recreate the same tree, while also -giving them an idea of roughly how old the tree is (after version 1.2, before -version 1.3). Many VCS systems can report a description that captures this, -for example `git describe --tags --dirty --always` reports things like -"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the -0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has -uncommitted changes. - -The version identifier is used for multiple purposes: - -* to allow the module to self-identify its version: `myproject.__version__` -* to choose a name and prefix for a 'setup.py sdist' tarball - -## Theory of Operation - -Versioneer works by adding a special `_version.py` file into your source -tree, where your `__init__.py` can import it. This `_version.py` knows how to -dynamically ask the VCS tool for version information at import time. - -`_version.py` also contains `$Revision$` markers, and the installation -process marks `_version.py` to have this marker rewritten with a tag name -during the `git archive` command. As a result, generated tarballs will -contain enough information to get the proper version. - -To allow `setup.py` to compute a version too, a `versioneer.py` is added to -the top level of your source tree, next to `setup.py` and the `setup.cfg` -that configures it. This overrides several distutils/setuptools commands to -compute the version when invoked, and changes `setup.py build` and `setup.py -sdist` to replace `_version.py` with a small static file that contains just -the generated version data. - -## Installation - -See [INSTALL.md](./INSTALL.md) for detailed installation instructions. - -## Version-String Flavors - -Code which uses Versioneer can learn about its version string at runtime by -importing `_version` from your main `__init__.py` file and running the -`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can -import the top-level `versioneer.py` and run `get_versions()`. - -Both functions return a dictionary with different flavors of version -information: - -* `['version']`: A condensed version string, rendered using the selected - style. This is the most commonly used value for the project's version - string. The default "pep440" style yields strings like `0.11`, - `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section - below for alternative styles. - -* `['full-revisionid']`: detailed revision identifier. For Git, this is the - full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". - -* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the - commit date in ISO 8601 format. This will be None if the date is not - available. - -* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that - this is only accurate if run in a VCS checkout, otherwise it is likely to - be False or None - -* `['error']`: if the version string could not be computed, this will be set - to a string describing the problem, otherwise it will be None. It may be - useful to throw an exception in setup.py if this is set, to avoid e.g. - creating tarballs with a version string of "unknown". - -Some variants are more useful than others. Including `full-revisionid` in a -bug report should allow developers to reconstruct the exact code being tested -(or indicate the presence of local changes that should be shared with the -developers). `version` is suitable for display in an "about" box or a CLI -`--version` output: it can be easily compared against release notes and lists -of bugs fixed in various releases. - -The installer adds the following text to your `__init__.py` to place a basic -version in `YOURPROJECT.__version__`: - - from ._version import get_versions - __version__ = get_versions()['version'] - del get_versions - -## Styles - -The setup.cfg `style=` configuration controls how the VCS information is -rendered into a version string. - -The default style, "pep440", produces a PEP440-compliant string, equal to the -un-prefixed tag name for actual releases, and containing an additional "local -version" section with more detail for in-between builds. For Git, this is -TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags ---dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the -tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and -that this commit is two revisions ("+2") beyond the "0.11" tag. For released -software (exactly equal to a known tag), the identifier will only contain the -stripped tag, e.g. "0.11". - -Other styles are available. See [details.md](details.md) in the Versioneer -source tree for descriptions. - -## Debugging - -Versioneer tries to avoid fatal errors: if something goes wrong, it will tend -to return a version of "0+unknown". To investigate the problem, run `setup.py -version`, which will run the version-lookup code in a verbose mode, and will -display the full contents of `get_versions()` (including the `error` string, -which may help identify what went wrong). - -## Known Limitations - -Some situations are known to cause problems for Versioneer. This details the -most significant ones. More can be found on Github -[issues page](https://github.com/warner/python-versioneer/issues). - -### Subprojects - -Versioneer has limited support for source trees in which `setup.py` is not in -the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are -two common reasons why `setup.py` might not be in the root: - -* Source trees which contain multiple subprojects, such as - [Buildbot](https://github.com/buildbot/buildbot), which contains both - "master" and "slave" subprojects, each with their own `setup.py`, - `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI - distributions (and upload multiple independently-installable tarballs). -* Source trees whose main purpose is to contain a C library, but which also - provide bindings to Python (and perhaps other langauges) in subdirectories. - -Versioneer will look for `.git` in parent directories, and most operations -should get the right version string. However `pip` and `setuptools` have bugs -and implementation details which frequently cause `pip install .` from a -subproject directory to fail to find a correct version string (so it usually -defaults to `0+unknown`). - -`pip install --editable .` should work correctly. `setup.py install` might -work too. - -Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in -some later version. - -[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking -this issue. The discussion in -[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the -issue from the Versioneer side in more detail. -[pip PR#3176](https://github.com/pypa/pip/pull/3176) and -[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve -pip to let Versioneer work correctly. - -Versioneer-0.16 and earlier only looked for a `.git` directory next to the -`setup.cfg`, so subprojects were completely unsupported with those releases. - -### Editable installs with setuptools <= 18.5 - -`setup.py develop` and `pip install --editable .` allow you to install a -project into a virtualenv once, then continue editing the source code (and -test) without re-installing after every change. - -"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a -convenient way to specify executable scripts that should be installed along -with the python package. - -These both work as expected when using modern setuptools. When using -setuptools-18.5 or earlier, however, certain operations will cause -`pkg_resources.DistributionNotFound` errors when running the entrypoint -script, which must be resolved by re-installing the package. This happens -when the install happens with one version, then the egg_info data is -regenerated while a different version is checked out. Many setup.py commands -cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into -a different virtualenv), so this can be surprising. - -[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes -this one, but upgrading to a newer version of setuptools should probably -resolve it. - -### Unicode version strings - -While Versioneer works (and is continually tested) with both Python 2 and -Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. -Newer releases probably generate unicode version strings on py2. It's not -clear that this is wrong, but it may be surprising for applications when then -write these strings to a network connection or include them in bytes-oriented -APIs like cryptographic checksums. - -[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates -this question. - - -## Updating Versioneer - -To upgrade your project to a new release of Versioneer, do the following: - -* install the new Versioneer (`pip install -U versioneer` or equivalent) -* edit `setup.cfg`, if necessary, to include any new configuration settings - indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. -* re-run `versioneer install` in your source tree, to replace - `SRC/_version.py` -* commit any changed files - -## Future Directions - -This tool is designed to make it easily extended to other version-control -systems: all VCS-specific components are in separate directories like -src/git/ . The top-level `versioneer.py` script is assembled from these -components by running make-versioneer.py . In the future, make-versioneer.py -will take a VCS name as an argument, and will construct a version of -`versioneer.py` that is specific to the given VCS. It might also take the -configuration arguments that are currently provided manually during -installation by editing setup.py . Alternatively, it might go the other -direction and include code from all supported VCS systems, reducing the -number of intermediate scripts. - - -## License - -To make Versioneer easier to embed, all its code is dedicated to the public -domain. The `_version.py` that it creates is also in the public domain. -Specifically, both are released under the Creative Commons "Public Domain -Dedication" license (CC0-1.0), as described in -https://creativecommons.org/publicdomain/zero/1.0/ . - -""" - -from __future__ import print_function -try: - import configparser -except ImportError: - import ConfigParser as configparser -import errno -import json -import os -import re -import subprocess -import sys - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_root(): - """Get the project root directory. - - We require that all commands are run from the project root, i.e. the - directory that contains setup.py, setup.cfg, and versioneer.py . - """ - root = os.path.realpath(os.path.abspath(os.getcwd())) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - # allow 'python path/to/setup.py COMMAND' - root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) - setup_py = os.path.join(root, "setup.py") - versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") - raise VersioneerBadRootError(err) - try: - # Certain runtime workflows (setup.py install/develop in a setuptools - # tree) execute all dependencies in a single python process, so - # "versioneer" may be imported multiple times, and python's shared - # module-import table will cache the first one. So we can't use - # os.path.dirname(__file__), as that will find whichever - # versioneer.py was first imported, even in later projects. - me = os.path.realpath(os.path.abspath(__file__)) - me_dir = os.path.normcase(os.path.splitext(me)[0]) - vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) - if me_dir != vsr_dir: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) - except NameError: - pass - return root - - -def get_config_from_root(root): - """Read the project setup.cfg file to determine Versioneer config.""" - # This might raise EnvironmentError (if setup.cfg is missing), or - # configparser.NoSectionError (if it lacks a [versioneer] section), or - # configparser.NoOptionError (if it lacks "VCS="). See the docstring at - # the top of versioneer.py for instructions on writing your setup.cfg . - setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() - with open(setup_cfg, "r") as f: - parser.readfp(f) - VCS = parser.get("versioneer", "VCS") # mandatory - - def get(parser, name): - if parser.has_option("versioneer", name): - return parser.get("versioneer", name) - return None - cfg = VersioneerConfig() - cfg.VCS = VCS - cfg.style = get(parser, "style") or "" - cfg.versionfile_source = get(parser, "versionfile_source") - cfg.versionfile_build = get(parser, "versionfile_build") - cfg.tag_prefix = get(parser, "tag_prefix") - if cfg.tag_prefix in ("''", '""'): - cfg.tag_prefix = "" - cfg.parentdir_prefix = get(parser, "parentdir_prefix") - cfg.verbose = get(parser, "verbose") - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -# these dictionaries contain VCS-specific tools -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode - - -LONG_VERSION_PY['git'] = ''' -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" - git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" - git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "%(STYLE)s" - cfg.tag_prefix = "%(TAG_PREFIX)s" - cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" - cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %%s" %% dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %%s" %% (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %%s (error)" %% dispcmd) - print("stdout was %%s" %% stdout) - return None, p.returncode - return stdout, p.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %%s but none started with prefix %%s" %% - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %%d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%%s', no digits" %% ",".join(refs - tags)) - if verbose: - print("likely tags: %%s" %% ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %%s" %% r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %%s not under git control" %% root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%%s*" %% tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%%s'" - %% describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%%s' doesn't start with prefix '%%s'" - print(fmt %% (full_tag, tag_prefix)) - pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" - %% (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%%d" %% pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%%d" %% pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%%s" %% pieces["short"] - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%%s" %% pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%%d" %% pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%%s'" %% style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} -''' - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def do_vcs_install(manifest_in, versionfile_source, ipy): - """Git-specific installation logic for Versioneer. - - For Git, this means creating/changing .gitattributes to mark _version.py - for export-subst keyword substitution. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - files = [manifest_in, versionfile_source] - if ipy: - files.append(ipy) - try: - me = __file__ - if me.endswith(".pyc") or me.endswith(".pyo"): - me = os.path.splitext(me)[0] + ".py" - versioneer_file = os.path.relpath(me) - except NameError: - versioneer_file = "versioneer.py" - files.append(versioneer_file) - present = False - try: - f = open(".gitattributes", "r") - for line in f.readlines(): - if line.strip().startswith(versionfile_source): - if "export-subst" in line.strip().split()[1:]: - present = True - f.close() - except EnvironmentError: - pass - if not present: - f = open(".gitattributes", "a+") - f.write("%s export-subst\n" % versionfile_source) - f.close() - files.append(".gitattributes") - run_command(GITS, ["add", "--"] + files) - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.18) from -# revision-control system data, or from the parent directory name of an -# unpacked source archive. Distribution tarballs contain a pre-generated copy -# of this file. - -import json - -version_json = ''' -%s -''' # END VERSION_JSON - - -def get_versions(): - return json.loads(version_json) -""" - - -def versions_from_file(filename): - """Try to determine the version from _version.py if present.""" - try: - with open(filename) as f: - contents = f.read() - except EnvironmentError: - raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) - if not mo: - raise NotThisMethod("no version_json in _version.py") - return json.loads(mo.group(1)) - - -def write_to_version_file(filename, versions): - """Write the given version number to the given _version.py file.""" - os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) - with open(filename, "w") as f: - f.write(SHORT_VERSION_PY % contents) - - print("set %s to '%s'" % (filename, versions["version"])) - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -class VersioneerBadRootError(Exception): - """The project root directory is unknown or missing key files.""" - - -def get_versions(verbose=False): - """Get the project version from whatever source is available. - - Returns dict with two keys: 'version' and 'full'. - """ - if "versioneer" in sys.modules: - # see the discussion in cmdclass.py:get_cmdclass() - del sys.modules["versioneer"] - - root = get_root() - cfg = get_config_from_root(root) - - assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" - handlers = HANDLERS.get(cfg.VCS) - assert handlers, "unrecognized VCS '%s'" % cfg.VCS - verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" - assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" - - versionfile_abs = os.path.join(root, cfg.versionfile_source) - - # extract version from first of: _version.py, VCS command (e.g. 'git - # describe'), parentdir. This is meant to work for developers using a - # source checkout, for users of a tarball created by 'setup.py sdist', - # and for users of a tarball/zipball created by 'git archive' or github's - # download-from-tag feature or the equivalent in other VCSes. - - get_keywords_f = handlers.get("get_keywords") - from_keywords_f = handlers.get("keywords") - if get_keywords_f and from_keywords_f: - try: - keywords = get_keywords_f(versionfile_abs) - ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) - if verbose: - print("got version from expanded keyword %s" % ver) - return ver - except NotThisMethod: - pass - - try: - ver = versions_from_file(versionfile_abs) - if verbose: - print("got version from file %s %s" % (versionfile_abs, ver)) - return ver - except NotThisMethod: - pass - - from_vcs_f = handlers.get("pieces_from_vcs") - if from_vcs_f: - try: - pieces = from_vcs_f(cfg.tag_prefix, root, verbose) - ver = render(pieces, cfg.style) - if verbose: - print("got version from VCS %s" % ver) - return ver - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - if verbose: - print("got version from parentdir %s" % ver) - return ver - except NotThisMethod: - pass - - if verbose: - print("unable to compute version") - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} - - -def get_version(): - """Get the short version string for this project.""" - return get_versions()["version"] - - -def get_cmdclass(): - """Get the custom setuptools/distutils subclasses used by Versioneer.""" - if "versioneer" in sys.modules: - del sys.modules["versioneer"] - # this fixes the "python setup.py develop" case (also 'install' and - # 'easy_install .'), in which subdependencies of the main project are - # built (using setup.py bdist_egg) in the same python process. Assume - # a main project A and a dependency B, which use different versions - # of Versioneer. A's setup.py imports A's Versioneer, leaving it in - # sys.modules by the time B's setup.py is executed, causing B to run - # with the wrong versioneer. Setuptools wraps the sub-dep builds in a - # sandbox that restores sys.modules to it's pre-build state, so the - # parent is protected against the child's "import versioneer". By - # removing ourselves from sys.modules here, before the child build - # happens, we protect the child from the parent's versioneer too. - # Also see https://github.com/warner/python-versioneer/issues/52 - - cmds = {} - - # we add "version" to both distutils and setuptools - from distutils.core import Command - - class cmd_version(Command): - description = "report generated version string" - user_options = [] - boolean_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - vers = get_versions(verbose=True) - print("Version: %s" % vers["version"]) - print(" full-revisionid: %s" % vers.get("full-revisionid")) - print(" dirty: %s" % vers.get("dirty")) - print(" date: %s" % vers.get("date")) - if vers["error"]: - print(" error: %s" % vers["error"]) - cmds["version"] = cmd_version - - # we override "build_py" in both distutils and setuptools - # - # most invocation pathways end up running build_py: - # distutils/build -> build_py - # distutils/install -> distutils/build ->.. - # setuptools/bdist_wheel -> distutils/install ->.. - # setuptools/bdist_egg -> distutils/install_lib -> build_py - # setuptools/install -> bdist_egg ->.. - # setuptools/develop -> ? - # pip install: - # copies source tree to a tempdir before running egg_info/etc - # if .git isn't copied too, 'git describe' will fail - # then does setup.py bdist_wheel, or sometimes setup.py install - # setup.py egg_info -> ? - - # we override different "build_py" commands for both environments - if "setuptools" in sys.modules: - from setuptools.command.build_py import build_py as _build_py - else: - from distutils.command.build_py import build_py as _build_py - - class cmd_build_py(_build_py): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - _build_py.run(self) - # now locate _version.py in the new build/ directory and replace - # it with an updated value - if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - cmds["build_py"] = cmd_build_py - - if "cx_Freeze" in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe - # nczeczulin reports that py2exe won't like the pep440-style string - # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. - # setup(console=[{ - # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION - # "product_version": versioneer.get_version(), - # ... - - class cmd_build_exe(_build_exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _build_exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["build_exe"] = cmd_build_exe - del cmds["build_py"] - - if 'py2exe' in sys.modules: # py2exe enabled? - try: - from py2exe.distutils_buildexe import py2exe as _py2exe # py3 - except ImportError: - from py2exe.build_exe import py2exe as _py2exe # py2 - - class cmd_py2exe(_py2exe): - def run(self): - root = get_root() - cfg = get_config_from_root(root) - versions = get_versions() - target_versionfile = cfg.versionfile_source - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, versions) - - _py2exe.run(self) - os.unlink(target_versionfile) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - cmds["py2exe"] = cmd_py2exe - - # we override different "sdist" commands for both environments - if "setuptools" in sys.modules: - from setuptools.command.sdist import sdist as _sdist - else: - from distutils.command.sdist import sdist as _sdist - - class cmd_sdist(_sdist): - def run(self): - versions = get_versions() - self._versioneer_generated_versions = versions - # unless we update this, the command will keep using the old - # version - self.distribution.metadata.version = versions["version"] - return _sdist.run(self) - - def make_release_tree(self, base_dir, files): - root = get_root() - cfg = get_config_from_root(root) - _sdist.make_release_tree(self, base_dir, files) - # now locate _version.py in the new base_dir directory - # (remembering that it may be a hardlink) and replace it with an - # updated value - target_versionfile = os.path.join(base_dir, cfg.versionfile_source) - print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) - cmds["sdist"] = cmd_sdist - - return cmds - - -CONFIG_ERROR = """ -setup.cfg is missing the necessary Versioneer configuration. You need -a section like: - - [versioneer] - VCS = git - style = pep440 - versionfile_source = src/myproject/_version.py - versionfile_build = myproject/_version.py - tag_prefix = - parentdir_prefix = myproject- - -You will also need to edit your setup.py to use the results: - - import versioneer - setup(version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), ...) - -Please read the docstring in ./versioneer.py for configuration instructions, -edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. -""" - -SAMPLE_CONFIG = """ -# See the docstring in versioneer.py for instructions. Note that you must -# re-run 'versioneer.py setup' after changing this section, and commit the -# resulting files. - -[versioneer] -#VCS = git -#style = pep440 -#versionfile_source = -#versionfile_build = -#tag_prefix = -#parentdir_prefix = - -""" - -INIT_PY_SNIPPET = """ -from ._version import get_versions -__version__ = get_versions()['version'] -del get_versions -""" - - -def do_setup(): - """Main VCS-independent setup function for installing Versioneer.""" - root = get_root() - try: - cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: - if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) - with open(os.path.join(root, "setup.cfg"), "a") as f: - f.write(SAMPLE_CONFIG) - print(CONFIG_ERROR, file=sys.stderr) - return 1 - - print(" creating %s" % cfg.versionfile_source) - with open(cfg.versionfile_source, "w") as f: - LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") - if os.path.exists(ipy): - try: - with open(ipy, "r") as f: - old = f.read() - except EnvironmentError: - old = "" - if INIT_PY_SNIPPET not in old: - print(" appending to %s" % ipy) - with open(ipy, "a") as f: - f.write(INIT_PY_SNIPPET) - else: - print(" %s unmodified" % ipy) - else: - print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make sure both the top-level "versioneer.py" and versionfile_source - # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so - # they'll be copied into source distributions. Pip won't be able to - # install the package without this. - manifest_in = os.path.join(root, "MANIFEST.in") - simple_includes = set() - try: - with open(manifest_in, "r") as f: - for line in f: - if line.startswith("include "): - for include in line.split()[1:]: - simple_includes.add(include) - except EnvironmentError: - pass - # That doesn't cover everything MANIFEST.in can do - # (http://docs.python.org/2/distutils/sourcedist.html#commands), so - # it might give some false negatives. Appending redundant 'include' - # lines is safe, though. - if "versioneer.py" not in simple_includes: - print(" appending 'versioneer.py' to MANIFEST.in") - with open(manifest_in, "a") as f: - f.write("include versioneer.py\n") - else: - print(" 'versioneer.py' already in MANIFEST.in") - if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) - with open(manifest_in, "a") as f: - f.write("include %s\n" % cfg.versionfile_source) - else: - print(" versionfile_source already in MANIFEST.in") - - # Make VCS-specific changes. For git, this means creating/changing - # .gitattributes to mark _version.py for export-subst keyword - # substitution. - do_vcs_install(manifest_in, cfg.versionfile_source, ipy) - return 0 - - -def scan_setup_py(): - """Validate the contents of setup.py against Versioneer's expectations.""" - found = set() - setters = False - errors = 0 - with open("setup.py", "r") as f: - for line in f.readlines(): - if "import versioneer" in line: - found.add("import") - if "versioneer.get_cmdclass()" in line: - found.add("cmdclass") - if "versioneer.get_version()" in line: - found.add("get_version") - if "versioneer.VCS" in line: - setters = True - if "versioneer.versionfile_source" in line: - setters = True - if len(found) != 3: - print("") - print("Your setup.py appears to be missing some important items") - print("(but I might be wrong). Please make sure it has something") - print("roughly like the following:") - print("") - print(" import versioneer") - print(" setup( version=versioneer.get_version(),") - print(" cmdclass=versioneer.get_cmdclass(), ...)") - print("") - errors += 1 - if setters: - print("You should remove lines like 'versioneer.VCS = ' and") - print("'versioneer.versionfile_source = ' . This configuration") - print("now lives in setup.cfg, and should be removed from setup.py") - print("") - errors += 1 - return errors - - -if __name__ == "__main__": - cmd = sys.argv[1] - if cmd == "setup": - errors = do_setup() - errors += scan_setup_py() - if errors: - sys.exit(1) From 71298547a5fb7ddd387a886847a4a140515c6681 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Thu, 16 Mar 2023 13:15:32 -0400 Subject: [PATCH 166/177] Fix CI (#36) * Base the cache on pyproject.toml, not setup.cfg. * Also drop use of setup.py in publishing action. --- .circleci/config.yml | 32 ++++++++++++++-------------- .github/workflows/python-publish.yml | 2 +- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1437ece74..b5468324d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -14,7 +14,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} - run: name: Generate environment command: | @@ -24,7 +24,7 @@ jobs: pip install -e .[tests] fi - save_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} paths: - /opt/conda/envs/tedana_py37 @@ -35,7 +35,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} - run: name: Running unit tests command: | @@ -57,7 +57,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py38-v2-{{ checksum "setup.cfg" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} - run: name: Generate environment command: | @@ -76,7 +76,7 @@ jobs: mkdir /tmp/src/coverage mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py38 - save_cache: - key: conda-py38-v2-{{ checksum "setup.cfg" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} paths: - /opt/conda/envs/tedana_py38 - persist_to_workspace: @@ -91,7 +91,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py39-v2-{{ checksum "setup.cfg" }} + key: conda-py39-v2-{{ checksum "pyproject.toml" }} - run: name: Generate environment command: | @@ -110,7 +110,7 @@ jobs: mkdir /tmp/src/coverage mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py39 - save_cache: - key: conda-py39-v2-{{ checksum "setup.cfg" }} + key: conda-py39-v2-{{ checksum "pyproject.toml" }} paths: - /opt/conda/envs/tedana_py39 - persist_to_workspace: @@ -125,7 +125,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py310-v1-{{ checksum "setup.cfg" }} + key: conda-py310-v1-{{ checksum "pyproject.toml" }} - run: name: Generate environment command: | @@ -144,7 +144,7 @@ jobs: mkdir /tmp/src/coverage mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py310 - save_cache: - key: conda-py310-v1-{{ checksum "setup.cfg" }} + key: conda-py310-v1-{{ checksum "pyproject.toml" }} paths: - /opt/conda/envs/tedana_py310 - persist_to_workspace: @@ -160,7 +160,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} - run: name: Style check command: | @@ -176,7 +176,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m @@ -201,7 +201,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m @@ -226,7 +226,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m @@ -251,7 +251,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m @@ -277,7 +277,7 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m @@ -304,7 +304,7 @@ jobs: at: /tmp - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "setup.cfg" }} + key: conda-py37-v2-{{ checksum "pyproject.toml" }} - run: name: Merge coverage files command: | diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index 561debfc0..dc678799e 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -27,5 +27,5 @@ jobs: TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | - python setup.py sdist bdist_wheel + python -m build --sdist --wheel --outdir dist/ . twine upload dist/* From df57b56d5e7f846d2296769dc93a176ff09a8932 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Tue, 28 Mar 2023 10:38:17 -0400 Subject: [PATCH 167/177] Add flake8-pyproject as a requirement. (#37) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 14779950e..d884f6c28 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,6 +53,7 @@ tests = [ "flake8>=3.7", "flake8-black", "flake8-isort", + "flake8-pyproject", "pytest", "pytest-cov", "requests", From f7cf82152e0f5553c65e30c72c638fc36a9f007b Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Tue, 28 Mar 2023 12:37:00 -0400 Subject: [PATCH 168/177] Try fixing coverage. (#38) --- .codecov.yml | 1 - MANIFEST.in | 2 -- pyproject.toml | 4 ++-- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index d5fe2b824..ef3135e15 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -16,4 +16,3 @@ coverage: ignore: - "tedana/tests/" - - "tedana/_version.py" diff --git a/MANIFEST.in b/MANIFEST.in index bfe1a3926..e69de29bb 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,2 +0,0 @@ -include versioneer.py -include tedana/_version.py diff --git a/pyproject.toml b/pyproject.toml index d884f6c28..035c3cf2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,13 +49,13 @@ doc = [ ] tests = [ "codecov", - "coverage<5.0", + "coverage", "flake8>=3.7", "flake8-black", "flake8-isort", "flake8-pyproject", "pytest", - "pytest-cov", + "pytest-cov>=4.0.0", "requests", ] From ac52721895f6610d7d6051ede7910d3cb5deebc6 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 20 Apr 2023 10:17:11 -0400 Subject: [PATCH 169/177] Improving ica_reclassify (#39) * ica_reclassify docs now rendering in usage.html * moves file parsing to ica_reclassify_workflow * added error checks and tests --- docs/api.rst | 1 + docs/usage.rst | 20 +++- tedana/selection/selection_utils.py | 1 - tedana/tests/test_integration.py | 46 +++++++-- tedana/workflows/__init__.py | 4 +- tedana/workflows/ica_reclassify.py | 144 +++++++++++++++++++--------- tedana/workflows/t2smap.py | 2 +- tedana/workflows/tedana.py | 2 +- 8 files changed, 162 insertions(+), 58 deletions(-) diff --git a/docs/api.rst b/docs/api.rst index fce7d27bc..c91211989 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -24,6 +24,7 @@ API :toctree: generated/ tedana.workflows.tedana_workflow + tedana.workflows.ica_reclassify_workflow tedana.workflows.t2smap_workflow diff --git a/docs/usage.rst b/docs/usage.rst index 39d76a3ef..8037a80ff 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -7,8 +7,8 @@ Using tedana from the command line #. Acquired echo times (in milliseconds) #. Functional datasets equal to the number of acquired echoes -But you can supply many other options, viewable with ``tedana -h`` or -``t2smap -h``. +But you can supply many other options, viewable with ``tedana -h``, +``ica_reclassify -h``, or ``t2smap -h``. For most use cases, we recommend that users call tedana from within existing fMRI preprocessing pipelines such as `fMRIPrep`_ or `afni_proc.py`_. @@ -51,6 +51,22 @@ https://tedana.readthedocs.io/en/latest/outputs.html To examine regions-of-interest with multi-echo data, apply masks after TE Dependent ANAlysis. +.. _ica_reclassify cli: + +*********************************** +Running the ica_reclassify workflow +*********************************** + +``ica_reclassify`` takes the output of ``tedana`` and can be used to manually +reclassify components, re-save denoised classifications following the new +classifications, and log the changes in all relevant output files. The +output files are the same as for ``tedana``: +https://tedana.readthedocs.io/en/latest/outputs.html + +.. argparse:: + :ref: tedana.workflows.ica_reclassify._get_parser + :prog: ica_reclassify + :func: _get_parser .. _t2smap cli: diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 5ab9b884f..c4188c3e9 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -815,7 +815,6 @@ def get_extend_factor(n_vols=None, extend_factor=None): LGR.info(f"extend_factor={extend_factor}, based on number of fMRI volumes") else: error_msg = "get_extend_factor need n_vols or extend_factor as an input" - LGR.error(error_msg) raise ValueError(error_msg) return extend_factor diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 7aca3402d..4f1ad9d4e 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -23,7 +23,7 @@ from tedana.io import InputHarvester from tedana.workflows import t2smap as t2smap_cli from tedana.workflows import tedana as tedana_cli -from tedana.workflows.ica_reclassify import post_tedana +from tedana.workflows.ica_reclassify import ica_reclassify_workflow # Need to see if a no BOLD warning occurred LOGGER = logging.getLogger(__name__) @@ -296,7 +296,7 @@ def test_integration_four_echo(skip_integration): verbose=True, ) - post_tedana( + ica_reclassify_workflow( op.join(out_dir, "desc-tedana_registry.json"), accept=[1, 2, 3], reject=[4, 5, 6], @@ -515,7 +515,7 @@ def test_integration_reclassify_both_rej_acc(skip_integration): ValueError, match=r"The following components were both accepted and", ): - post_tedana( + ica_reclassify_workflow( reclassify_raw_registry(), accept=[1, 2, 3], reject=[1, 2, 3], @@ -532,13 +532,13 @@ def test_integration_reclassify_run_twice(skip_integration): if os.path.exists(out_dir): shutil.rmtree(out_dir) - post_tedana( + ica_reclassify_workflow( reclassify_raw_registry(), accept=[1, 2, 3], out_dir=out_dir, no_reports=True, ) - post_tedana( + ica_reclassify_workflow( reclassify_raw_registry(), accept=[1, 2, 3], out_dir=out_dir, @@ -562,7 +562,7 @@ def test_integration_reclassify_no_bold(skip_integration, caplog): comptable = ioh.get_file_contents("ICA metrics tsv") to_accept = [i for i in range(len(comptable))] - post_tedana( + ica_reclassify_workflow( reclassify_raw_registry(), reject=to_accept, out_dir=out_dir, @@ -587,7 +587,7 @@ def test_integration_reclassify_accrej_files(skip_integration, caplog): comptable = ioh.get_file_contents("ICA metrics tsv") to_accept = [i for i in range(len(comptable))] - post_tedana( + ica_reclassify_workflow( reclassify_raw_registry(), reject=to_accept, out_dir=out_dir, @@ -599,6 +599,38 @@ def test_integration_reclassify_accrej_files(skip_integration, caplog): check_integration_outputs(fn, out_dir) +def test_integration_reclassify_index_failures(skip_integration, caplog): + if skip_integration: + pytest.skip("Skip reclassify index failures") + + test_data_path = guarantee_reclassify_data() + out_dir = os.path.abspath(os.path.join(test_data_path, "../outputs/reclassify/index_failures")) + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + with pytest.raises( + ValueError, + match=r"_parse_manual_list expected a list of integers, but the input is", + ): + ica_reclassify_workflow( + reclassify_raw_registry(), + accept=[1, 2.5, 3], + out_dir=out_dir, + no_reports=True, + ) + + with pytest.raises( + ValueError, + match=r"_parse_manual_list expected integers or a filename, but the input is", + ): + ica_reclassify_workflow( + reclassify_raw_registry(), + accept=[2.5], + out_dir=out_dir, + no_reports=True, + ) + + def test_integration_t2smap(skip_integration): """Integration test of the full t2smap workflow using five-echo test data""" if skip_integration: diff --git a/tedana/workflows/__init__.py b/tedana/workflows/__init__.py index c83313efd..571a2d567 100644 --- a/tedana/workflows/__init__.py +++ b/tedana/workflows/__init__.py @@ -1,9 +1,9 @@ # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: - +from .ica_reclassify import ica_reclassify_workflow from .t2smap import t2smap_workflow # Overrides submodules with their functions. from .tedana import tedana_workflow -__all__ = ["tedana_workflow", "t2smap_workflow"] +__all__ = ["tedana_workflow", "t2smap_workflow", "ica_reclassify_workflow"] diff --git a/tedana/workflows/ica_reclassify.py b/tedana/workflows/ica_reclassify.py index 24559ca29..332c40339 100644 --- a/tedana/workflows/ica_reclassify.py +++ b/tedana/workflows/ica_reclassify.py @@ -24,16 +24,29 @@ RepLGR = logging.getLogger("REPORT") -def _main(): +def _get_parser(): + """ + Parses command line inputs for tedana + + Returns + ------- + parser.parse_args() : argparse dict + """ + from tedana import __version__ verstr = "ica_reclassify v{}".format(__version__) + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument( + # Argument parser follow template provided by RalphyZ + # https://stackoverflow.com/a/43456577 + optional = parser._action_groups.pop() + required = parser.add_argument_group("Required Arguments") + required.add_argument( "registry", help="File registry from a previous tedana run", ) - parser.add_argument( + optional.add_argument( "--manacc", dest="manual_accept", nargs="+", @@ -43,8 +56,9 @@ def _main(): "as a csv file, or as a text file with an allowed " f"delimiter {repr(ALLOWED_COMPONENT_DELIMITERS)}." ), + default=[], ) - parser.add_argument( + optional.add_argument( "--manrej", dest="manual_reject", nargs="+", @@ -54,14 +68,15 @@ def _main(): "as a csv file, or as a text file with an allowed " f"delimiter {repr(ALLOWED_COMPONENT_DELIMITERS)}." ), + default=[], ) - parser.add_argument( + optional.add_argument( "--config", dest="config", help="File naming configuration.", default="auto", ) - parser.add_argument( + optional.add_argument( "--out-dir", dest="out_dir", type=str, @@ -69,10 +84,10 @@ def _main(): help="Output directory.", default=".", ) - parser.add_argument( + optional.add_argument( "--prefix", dest="prefix", type=str, help="Prefix for filenames generated.", default="" ) - parser.add_argument( + optional.add_argument( "--convention", dest="convention", action="store", @@ -80,20 +95,21 @@ def _main(): help=("Filenaming convention. bids will use the latest BIDS derivatives version."), default="bids", ) - parser.add_argument( + optional.add_argument( "--tedort", dest="tedort", action="store_true", help=("Orthogonalize rejected components w.r.t. accepted components prior to denoising."), default=False, ) - parser.add_argument( + optional.add_argument( "--mir", dest="mir", action="store_true", help="Run minimum image regression.", + default=False, ) - parser.add_argument( + optional.add_argument( "--no-reports", dest="no_reports", action="store_true", @@ -105,10 +121,10 @@ def _main(): ), default=False, ) - parser.add_argument( + optional.add_argument( "--png-cmap", dest="png_cmap", type=str, help="Colormap for figures", default="coolwarm" ) - parser.add_argument( + optional.add_argument( "--debug", dest="debug", action="store_true", @@ -119,49 +135,32 @@ def _main(): ), default=False, ) - parser.add_argument( + optional.add_argument( "--overwrite", "-f", dest="overwrite", action="store_true", help="Force overwriting of files.", ) - parser.add_argument( + optional.add_argument( "--quiet", dest="quiet", help=argparse.SUPPRESS, action="store_true", default=False ) - parser.add_argument("-v", "--version", action="version", version=verstr) + optional.add_argument("-v", "--version", action="version", version=verstr) - args = parser.parse_args() + parser._action_groups.append(optional) + return parser - if not args.manual_accept: - manual_accept = [] - elif len(args.manual_accept) > 1: - # We should assume that this is a list of integers - manual_accept = [int(x) for x in args.manual_accept] - elif op.exists(args.manual_accept[0]): - # filename was given - manual_accept = fname_to_component_list(args.manual_accept[0]) - else: - # arbitrary string was given, length of list is 1 - manual_accept = str_to_component_list(args.manual_accept[0]) - if not args.manual_reject: - manual_reject = [] - elif len(args.manual_reject) > 1: - # We should assume that this is a list of integers - manual_reject = [int(x) for x in args.manual_reject] - elif op.exists(args.manual_reject[0]): - # filename was given - manual_reject = fname_to_component_list(args.manual_reject[0]) - else: - # arbitrary string - manual_reject = str_to_component_list(args.manual_reject[0]) +def _main(argv=None): + """ica_reclassify entry point""" + + args = _get_parser().parse_args(argv) - # Run post-tedana - post_tedana( + # Run ica_reclassify_workflow + ica_reclassify_workflow( args.registry, - accept=manual_accept, - reject=manual_reject, + accept=args.manual_accept, + reject=args.manual_reject, out_dir=args.out_dir, config=args.config, prefix=args.prefix, @@ -176,7 +175,58 @@ def _main(): ) -def post_tedana( +def _parse_manual_list(manual_list): + """ + Parse the list of components to accept or reject into a list of integers + + Parameters + ---------- + manual_list: :obj:`str` :obj:`list[str]` or [] or None + String of integers separated by spaces, commas, or tabs + A file name for a file that contains integers + + Returns + ------- + manual_nums: :obj:`list[int]` + A list of integers or an empty list. + + Note + ---- + Do not need to check if integers are less than 0 or greater than the total + number of components here, because it is later checked in selectcomps2use + and a descriptive error message will appear there + """ + if not manual_list: + manual_nums = [] + elif len(manual_list) > 1: + # We should assume that this is a list of integers + manual_nums = [] + for x in manual_list: + if float(x) == int(x): + manual_nums.append(int(x)) + else: + raise ValueError( + "_parse_manual_list expected a list of integers, " + f"but the input is {manual_list}" + ) + elif op.exists(str(manual_list[0])): + # filename was given + manual_nums = fname_to_component_list(manual_list[0]) + elif type(manual_list[0]) == str: + # arbitrary string was given, length of list is 1 + manual_nums = str_to_component_list(manual_list[0]) + elif type(manual_list[0]) == int: + # Is a single integer and should remain a list with a single integer + manual_nums = manual_list + else: + raise ValueError( + f"_parse_manual_list expected integers or a filename, but the input is {manual_list}" + ) + + return manual_nums + + +def ica_reclassify_workflow( registry, accept=[], reject=[], @@ -245,6 +295,12 @@ def post_tedana( if not op.isdir(out_dir): os.mkdir(out_dir) + # If accept and reject are a list of integers, they stay the same + # If they are a filename, load numbers of from + # If they are a string of values, convert to a list of ints + accept = _parse_manual_list(accept) + reject = _parse_manual_list(reject) + # Check that there is no overlap in accepted/rejected components if accept: acc = set(accept) diff --git a/tedana/workflows/t2smap.py b/tedana/workflows/t2smap.py index 012a0e270..67cf39e57 100644 --- a/tedana/workflows/t2smap.py +++ b/tedana/workflows/t2smap.py @@ -26,7 +26,7 @@ def _get_parser(): parser.parse_args() : argparse dict """ parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - # Argument parser follow templtate provided by RalphyZ + # Argument parser follow template provided by RalphyZ # https://stackoverflow.com/a/43456577 optional = parser._action_groups.pop() required = parser.add_argument_group("Required Arguments") diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index cfc6dfcf1..26ccc8c66 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -48,7 +48,7 @@ def _get_parser(): verstr = "tedana v{}".format(__version__) parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - # Argument parser follow templtate provided by RalphyZ + # Argument parser follow template provided by RalphyZ # https://stackoverflow.com/a/43456577 optional = parser._action_groups.pop() required = parser.add_argument_group("Required Arguments") From b06f65a009addeedc0bef766070bab6d1b5a7be5 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Wed, 26 Apr 2023 11:17:59 -0400 Subject: [PATCH 170/177] Ica reclassify registry fixes (#42) * add pandas version check >= 1.5.2 and mod behavior (#938) * add version check and mod behavior if pandas >= 1.5.2 to prevent error in writing csv * formatting * adding P. Molfese --------- Co-authored-by: Molfese * readded InputHarvester and expanduser * fixed handler base_dir path * mixing matrix file always in registry --------- Co-authored-by: Peter J. Molfese Co-authored-by: Molfese --- .zenodo.json | 5 +++++ tedana/io.py | 19 +++++++++++-------- tedana/workflows/ica_reclassify.py | 6 +++--- tedana/workflows/tedana.py | 4 ++++ 4 files changed, 23 insertions(+), 11 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index 5c3aacd77..693dde392 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -78,6 +78,11 @@ "affiliation": "Basque Center on Cognition, Brain and Language", "orcid": "0000-0002-2553-3327" }, + { + "name": "Molfese, Peter", + "affiliation": "National Institutes of Mental Health, CMN", + "orcid": "0000-0002-3045-9408" + }, { "name": "Salo, Taylor", "affiliation": "Florida International University", diff --git a/tedana/io.py b/tedana/io.py index 1516df774..1d9f8763a 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -339,10 +339,10 @@ def save_tsv(self, data, name): data_type = type(data) if not isinstance(data, pd.DataFrame): raise TypeError(f"data must be pd.Data, not type {data_type}.") - - # Replace blanks with numpy NaN - deblanked = data.replace("", np.nan) - deblanked.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) + if versiontuple(pd.__version__) >= versiontuple("1.5.2"): + data.to_csv(name, sep="\t", lineterminator="\n", na_rep="n/a", index=False) + else: + data.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) def save_self(self): fname = self.save_file(self.registry, "registry json") @@ -359,9 +359,9 @@ class InputHarvester: } def __init__(self, path): - self._full_path = path - self._base_dir = op.dirname(path) - self._registry = load_json(path) + self._full_path = op.abspath(path) + self._base_dir = op.dirname(self._full_path) + self._registry = load_json(self._full_path) def get_file_path(self, description): if description in self._registry.keys(): @@ -371,7 +371,6 @@ def get_file_path(self, description): def get_file_contents(self, description): """Get file contents. - Notes ----- Since we restrict to just these three types, this function should always return. @@ -390,6 +389,10 @@ def registry(self): return d +def versiontuple(v): + return tuple(map(int, (v.split(".")))) + + def get_fields(name): """Identify all fields in an unformatted string. diff --git a/tedana/workflows/ica_reclassify.py b/tedana/workflows/ica_reclassify.py index 332c40339..209764778 100644 --- a/tedana/workflows/ica_reclassify.py +++ b/tedana/workflows/ica_reclassify.py @@ -199,7 +199,7 @@ def _parse_manual_list(manual_list): if not manual_list: manual_nums = [] elif len(manual_list) > 1: - # We should assume that this is a list of integers + # Assume that this is a list of integers, but raise error if not manual_nums = [] for x in manual_list: if float(x) == int(x): @@ -209,9 +209,9 @@ def _parse_manual_list(manual_list): "_parse_manual_list expected a list of integers, " f"but the input is {manual_list}" ) - elif op.exists(str(manual_list[0])): + elif op.exists(op.expanduser(str(manual_list[0]).strip(" "))): # filename was given - manual_nums = fname_to_component_list(manual_list[0]) + manual_nums = fname_to_component_list(op.expanduser(str(manual_list[0]).strip(" "))) elif type(manual_list[0]) == str: # arbitrary string was given, length of list is 1 manual_nums = str_to_component_list(manual_list[0]) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 26ccc8c66..a60e3235f 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -732,6 +732,10 @@ def tedana_workflow( mixing_df = pd.DataFrame(data=mmix, columns=comp_names) if not op.exists(io_generator.get_name("ICA mixing tsv")): io_generator.save_file(mixing_df, "ICA mixing tsv") + else: # Make sure the relative path to the supplied mixing matrix is saved in the registry + io_generator.registry["ICA mixing tsv"] = op.basename( + io_generator.get_name("ICA mixing tsv") + ) betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask_denoise), mask_denoise) io_generator.save_file(betas_oc, "z-scored ICA components img") From 29eee669df9ca5afb4ad6bfe0a1df64592feff96 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 26 Apr 2023 12:39:02 -0400 Subject: [PATCH 171/177] Drop Python 3.6 and 3.7 support (#40) * Drop Python 3.6 and 3.7 support. * line_terminator --> lineterminator --- .circleci/config.yml | 103 +++++++++------------------ .github/workflows/python-publish.yml | 2 +- Dockerfile_dev | 8 +-- README.md | 2 +- docs/installation.rst | 2 +- pyproject.toml | 5 +- tedana/io.py | 8 +-- 7 files changed, 46 insertions(+), 84 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b5468324d..896d7c212 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,48 +7,26 @@ orbs: codecov: codecov/codecov@1.0.5 jobs: - makeenv_37: + makeenv_38: docker: - image: continuumio/miniconda3 working_directory: /tmp/src/tedana steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} - run: name: Generate environment command: | - if [ ! -d /opt/conda/envs/tedana_py37 ]; then - conda create -yq -n tedana_py37 python=3.7 - source activate tedana_py37 + if [ ! -d /opt/conda/envs/tedana_py38 ]; then + conda create -yq -n tedana_py38 python=3.8 + source activate tedana_py38 pip install -e .[tests] fi - save_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} - paths: - - /opt/conda/envs/tedana_py37 - - unittest_37: - docker: - - image: continuumio/miniconda3 - working_directory: /tmp/src/tedana - steps: - - checkout - - restore_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} - - run: - name: Running unit tests - command: | - apt-get update - apt-get install -y make - source activate tedana_py37 # depends on makeenv_37 - make unittest - mkdir /tmp/src/coverage - mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py37 - - persist_to_workspace: - root: /tmp + key: conda-py38-v2-{{ checksum "pyproject.toml" }} paths: - - src/coverage/.coverage.py37 + - /opt/conda/envs/tedana_py38 unittest_38: docker: @@ -58,27 +36,15 @@ jobs: - checkout - restore_cache: key: conda-py38-v2-{{ checksum "pyproject.toml" }} - - run: - name: Generate environment - command: | - apt-get update - apt-get install -yqq make - if [ ! -d /opt/conda/envs/tedana_py38 ]; then - conda create -yq -n tedana_py38 python=3.8 - source activate tedana_py38 - pip install .[tests] - fi - run: name: Running unit tests command: | - source activate tedana_py38 + apt-get update + apt-get install -y make + source activate tedana_py38 # depends on makeenv_38 make unittest mkdir /tmp/src/coverage mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py38 - - save_cache: - key: conda-py38-v2-{{ checksum "pyproject.toml" }} - paths: - - /opt/conda/envs/tedana_py38 - persist_to_workspace: root: /tmp paths: @@ -152,7 +118,6 @@ jobs: paths: - src/coverage/.coverage.py310 - style_check: docker: - image: continuumio/miniconda3 @@ -160,13 +125,13 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} - run: name: Style check command: | apt-get update apt-get install -yqq make - source activate tedana_py37 # depends on makeenv37 + source activate tedana_py38 # depends on makeenv38 make lint three-echo: @@ -176,14 +141,14 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m command: | apt-get update apt-get install -yqq make - source activate tedana_py37 # depends on makeenv_37 + source activate tedana_py38 # depends on makeenv_38 make three-echo mkdir /tmp/src/coverage mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.three-echo @@ -201,14 +166,14 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m command: | apt-get update apt-get install -yqq make - source activate tedana_py37 # depends on makeenv_37 + source activate tedana_py38 # depends on makeenv_38 make four-echo mkdir /tmp/src/coverage mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.four-echo @@ -226,14 +191,14 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m command: | apt-get update apt-get install -yqq make - source activate tedana_py37 # depends on makeenv_37 + source activate tedana_py38 # depends on makeenv_38 make five-echo mkdir /tmp/src/coverage mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.five-echo @@ -251,14 +216,14 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m command: | apt-get update apt-get install -yqq make - source activate tedana_py37 # depends on makeenv_37 + source activate tedana_py38 # depends on makeenv_38 make reclassify mkdir /tmp/src/coverage mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.reclassify @@ -277,14 +242,14 @@ jobs: steps: - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} - run: name: Run integration tests no_output_timeout: 40m command: | apt-get update apt-get install -yqq make - source activate tedana_py37 # depends on makeenv_37 + source activate tedana_py38 # depends on makeenv_38 make t2smap mkdir /tmp/src/coverage mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.t2smap @@ -304,13 +269,13 @@ jobs: at: /tmp - checkout - restore_cache: - key: conda-py37-v2-{{ checksum "pyproject.toml" }} + key: conda-py38-v2-{{ checksum "pyproject.toml" }} - run: name: Merge coverage files command: | apt-get update apt-get install -yqq curl - source activate tedana_py37 # depends on makeenv37 + source activate tedana_py38 # depends on makeenv38 cd /tmp/src/coverage/ coverage combine coverage xml @@ -323,34 +288,32 @@ workflows: version: 2.1 build_test: jobs: - - makeenv_37 - - unittest_37: + - makeenv_38 + - unittest_38: requires: - - makeenv_37 + - makeenv_38 - style_check: requires: - - makeenv_37 + - makeenv_38 - three-echo: requires: - - makeenv_37 + - makeenv_38 - four-echo: requires: - - makeenv_37 + - makeenv_38 - five-echo: requires: - - makeenv_37 + - makeenv_38 - reclassify: requires: - - makeenv_37 + - makeenv_38 - t2smap: requires: - - makeenv_37 - - unittest_38 + - makeenv_38 - unittest_39 - unittest_310 - merge_coverage: requires: - - unittest_37 - unittest_38 - unittest_39 - unittest_310 diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index dc678799e..a1ab31fca 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -17,7 +17,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v2 with: - python-version: '3.7' + python-version: '3.8' - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/Dockerfile_dev b/Dockerfile_dev index 7e908ab98..1b48d6cc7 100644 --- a/Dockerfile_dev +++ b/Dockerfile_dev @@ -41,14 +41,14 @@ ENV LANG="C.UTF-8" \ RUN git clone https://github.com/me-ica/tedana.git /tedana -RUN bash -c "conda create -yq --name tedana_py36 python=3.6 pip \ - && source activate tedana_py36 \ +RUN bash -c "conda create -yq --name tedana_env python=3.8 pip \ + && source activate tedana_env \ && pip install /tedana[all] \ && pip install ipython \ && rm -rf ~/.cache/pip/* \ && conda clean --all" -RUN /opt/conda/envs/tedana_py36/bin/ipython profile create \ +RUN /opt/conda/envs/tedana_env/bin/ipython profile create \ && sed -i 's/#c.InteractiveShellApp.extensions = \[\]/ \ c.InteractiveShellApp.extensions = \['\''autoreload'\''\]/g' \ /root/.ipython/profile_default/ipython_config.py @@ -57,7 +57,7 @@ RUN mkdir -p /tedana/dev_tools COPY ["./dev_tools", "/tedana/dev_tools"] -RUN sed -i '$isource activate tedana_py36' $ND_ENTRYPOINT +RUN sed -i '$isource activate tedana_env' $ND_ENTRYPOINT RUN sed -i '$isource /tedana/dev_tools/run_tests.sh' $ND_ENTRYPOINT diff --git a/README.md b/README.md index 78e170762..cca32a220 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ If you use `tedana`, please cite the following papers, as well as our [most rece ### Use `tedana` with your local Python environment You'll need to set up a working development environment to use `tedana`. -To set up a local environment, you will need Python >=3.6 and the following packages will need to be installed: +To set up a local environment, you will need Python >=3.8 and the following packages will need to be installed: * [numpy](http://www.numpy.org/) * [scipy](https://www.scipy.org/) diff --git a/docs/installation.rst b/docs/installation.rst index 4cba104c9..9ff6bd4ab 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation ############ You'll need to set up a working development environment to use ``tedana``. -To set up a local environment, you will need Python >=3.6 and the following +To set up a local environment, you will need Python >=3.8 and the following packages will need to be installed: - nilearn diff --git a/pyproject.toml b/pyproject.toml index 035c3cf2b..2e695b542 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,13 +12,12 @@ classifiers = [ "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering :: Information Analysis", "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", ] license = {file = "LICENSE"} -requires-python = ">=3.7" +requires-python = ">=3.8" dependencies = [ "bokeh<2.3.0", "mapca>=0.0.3", @@ -26,7 +25,7 @@ dependencies = [ "nibabel>=2.5.1", "nilearn>=0.7", "numpy>=1.16", - "pandas>=0.24", + "pandas>=2.0", "scikit-learn>=0.21", "scipy>=1.2.0", "threadpoolctl", diff --git a/tedana/io.py b/tedana/io.py index 1d9f8763a..0389ffc67 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -339,10 +339,10 @@ def save_tsv(self, data, name): data_type = type(data) if not isinstance(data, pd.DataFrame): raise TypeError(f"data must be pd.Data, not type {data_type}.") - if versiontuple(pd.__version__) >= versiontuple("1.5.2"): - data.to_csv(name, sep="\t", lineterminator="\n", na_rep="n/a", index=False) - else: - data.to_csv(name, sep="\t", line_terminator="\n", na_rep="n/a", index=False) + + # Replace blanks with numpy NaN + deblanked = data.replace("", np.nan) + deblanked.to_csv(name, sep="\t", lineterminator="\n", na_rep="n/a", index=False) def save_self(self): fname = self.save_file(self.registry, "registry json") From a3dc1c28ad589cdd5d81a4f91e1fb2bd1f1141ac Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 27 Apr 2023 17:09:11 -0400 Subject: [PATCH 172/177] added mixm to 4echo test (#43) --- tedana/tests/data/fiu_four_echo_outputs.txt | 22 +-------------------- tedana/tests/test_integration.py | 1 + 2 files changed, 2 insertions(+), 21 deletions(-) diff --git a/tedana/tests/data/fiu_four_echo_outputs.txt b/tedana/tests/data/fiu_four_echo_outputs.txt index 3d0a1b346..e7fd98823 100644 --- a/tedana/tests/data/fiu_four_echo_outputs.txt +++ b/tedana/tests/data/fiu_four_echo_outputs.txt @@ -17,15 +17,8 @@ desc-ICA_decision_tree.json desc-ICAS0_stat-F_statmap.nii.gz desc-ICAT2_stat-F_statmap.nii.gz desc-ICA_mixing.tsv +desc-ICA_mixing_static.tsv desc-ICA_stat-z_components.nii.gz -desc-PCAAveragingWeights_components.nii.gz -desc-PCAS0_stat-F_statmap.nii.gz -desc-PCAT2_stat-F_statmap.nii.gz -desc-PCA_decomposition.json -desc-PCA_metrics.json -desc-PCA_metrics.tsv -desc-PCA_mixing.tsv -desc-PCA_stat-z_components.nii.gz desc-T1likeEffect_min.nii.gz desc-adaptiveGoodSignal_mask.nii.gz desc-globalSignal_map.nii.gz @@ -37,7 +30,6 @@ desc-optcomAccepted_bold.nii.gz desc-optcomDenoised_bold.nii.gz desc-optcomMIRDenoised_bold.nii.gz desc-optcomNoGlobalSignal_bold.nii.gz -desc-optcom_whitened_bold.nii.gz desc-optcomRejected_bold.nii.gz desc-optcomWithGlobalSignal_bold.nii.gz desc-optcom_bold.nii.gz @@ -46,36 +38,24 @@ echo-1_desc-Denoised_bold.nii.gz echo-1_desc-ICAT2ModelPredictions_components.nii.gz echo-1_desc-ICAS0ModelPredictions_components.nii.gz echo-1_desc-ICA_components.nii.gz -echo-1_desc-PCAT2ModelPredictions_components.nii.gz -echo-1_desc-PCAS0ModelPredictions_components.nii.gz -echo-1_desc-PCA_components.nii.gz echo-1_desc-Rejected_bold.nii.gz echo-2_desc-Accepted_bold.nii.gz echo-2_desc-Denoised_bold.nii.gz echo-2_desc-ICAT2ModelPredictions_components.nii.gz echo-2_desc-ICAS0ModelPredictions_components.nii.gz echo-2_desc-ICA_components.nii.gz -echo-2_desc-PCAT2ModelPredictions_components.nii.gz -echo-2_desc-PCAS0ModelPredictions_components.nii.gz -echo-2_desc-PCA_components.nii.gz echo-2_desc-Rejected_bold.nii.gz echo-3_desc-Accepted_bold.nii.gz echo-3_desc-Denoised_bold.nii.gz echo-3_desc-ICAT2ModelPredictions_components.nii.gz echo-3_desc-ICAS0ModelPredictions_components.nii.gz echo-3_desc-ICA_components.nii.gz -echo-3_desc-PCAT2ModelPredictions_components.nii.gz -echo-3_desc-PCAS0ModelPredictions_components.nii.gz -echo-3_desc-PCA_components.nii.gz echo-3_desc-Rejected_bold.nii.gz echo-4_desc-Accepted_bold.nii.gz echo-4_desc-Denoised_bold.nii.gz echo-4_desc-ICAT2ModelPredictions_components.nii.gz echo-4_desc-ICAS0ModelPredictions_components.nii.gz echo-4_desc-ICA_components.nii.gz -echo-4_desc-PCAT2ModelPredictions_components.nii.gz -echo-4_desc-PCAS0ModelPredictions_components.nii.gz -echo-4_desc-PCA_components.nii.gz echo-4_desc-Rejected_bold.nii.gz references.bib report.txt diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 4f1ad9d4e..a8de177f4 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -287,6 +287,7 @@ def test_integration_four_echo(skip_integration): datalist = [prepend + str(i + 1) + suffix for i in range(4)] tedana_cli.tedana_workflow( data=datalist, + mixm=op.join(op.dirname(datalist[0]), "desc-ICA_mixing_static.tsv"), tes=[11.8, 28.04, 44.28, 60.52], out_dir=out_dir, tedpca="kundu-stabilize", From 3e48d9fa712dff2c9083e3bae74ab93d60e44442 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Mon, 1 May 2023 16:18:29 -0400 Subject: [PATCH 173/177] Updating Contributor Information (#41) * Some contributor updates * Added doc to Marco --- .all-contributorsrc | 34 +++++++++++++++++++++++++++++++++- contributions.md | 14 +++++++++----- 2 files changed, 42 insertions(+), 6 deletions(-) diff --git a/.all-contributorsrc b/.all-contributorsrc index 180ba8db8..07a6f0f9b 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -34,6 +34,16 @@ "question" ] }, + { + "login": "marco7877", + "name": "Marco Flores-Coronado", + "avatar_url": "https://avatars.githubusercontent.com/u/56403434?v=4", + "profile": "https://github.com/marco7877", + "contributions": [ + "ideas", + "doc" + ] + }, { "login": "javiergcas", "name": "Javier Gonzalez-Castillo", @@ -78,6 +88,28 @@ "question" ] }, + { + "login": "pmolfese", + "name": "Pete Molfese", + "avatar_url": "https://avatars.githubusercontent.com/u/3665743?v=4", + "profile": "https://github.com/pmolfese", + "contributions": [ + "code" + ] + }, + { + "login": "n-reddy", + "name": "Neha Reddy", + "avatar_url": "https://avatars.githubusercontent.com/u/58482773?v=4", + "profile": "https://github.com/n-reddy", + "contributions": [ + "bug", + "doc", + "ideas", + "question", + "review" + ] + }, { "login": "tsalo", "name": "Taylor Salo", @@ -252,4 +284,4 @@ "repoHost": "https://github.com", "skipCi": false, "commitConvention": "angular" -} +} \ No newline at end of file diff --git a/contributions.md b/contributions.md index e7247aa51..eaa2b9b0c 100644 --- a/contributions.md +++ b/contributions.md @@ -1,20 +1,24 @@ # Contributions ## Contributors + We appreciate all of our contributors! Each contributor below has made a statement of how they feel they've contributed to `tedana`. + - [**Logan Dowdle**][logan-dowdle] helps folks get multi-echo data collection going on their scanners, tries to keep up with the increasing number of multi-echo papers, likes making figures that explain what tedana has done to the data, and occasionally adds a new feature (with lots of help!). -- [**Elizabeth DuPre**][elizabeth-dupre] initiated the tedana project in 2017, building on the ME-ICA codebase. She continued to develop the code and began actively creating the community structure as part of the fifth Mozilla Open Leaders cohort (mentored by Kirstie Whitaker). Since her time as interim BDFL, Elizabeth has been involved in most aspects of the project -- although she currently focuses primarily on improving tedana's integration with the broader neuroimaging ecosystem. +- [**Elizabeth DuPre**][elizabeth-dupre] initiated the tedana project in 2017, building on the ME-ICA codebase. She continued to develop the code and began actively creating the community structure as part of the fifth Mozilla Open Leaders cohort (mentored by Kirstie Whitaker). Since her time as BDFL, Elizabeth has been involved in most aspects of the project -- although she currently focuses primarily on improving tedana's integration with the broader neuroimaging ecosystem. - [**Javier Gonzalez-Castillo**][javier-gonzalez-castillo] contributed to the development of dimensionality reduction and decomposition algorithms in tedana, as well as to the development of the interactive reports. -- [**Dan Handwerker**][dan-handwerker] helps with project management (people wrangling & documentation), led the organization for the 2019 tedana hackathon, provides conceptual feedback on many aspects of the code, contributes to documentation, and, once in a while, even contributes to the code. -- [**Taylor Salo**][taylor-salo] helps with code-related issues and with engaging new contributors to tedana. -- [**Joshua Teves**][joshua-teves] helps manage issues and pull requests for a variety of both administrative and code-specific tasks. -- [**Eneko Uruñuela**][eneko-urunuela] helps with the development of dimensionality reduction and decomposition algorithms in tedana, with Principal Component Analysis to be more specific, and contributed to the development of the interactive reports. +- [**Dan Handwerker**][dan-handwerker] helps with project management (people wrangling & documentation), led the organization for the 2019 tedana hackathon, provides conceptual feedback on many aspects of the code, contributes to documentation, and contributes to the code, particularly modulariation and improvements to the component selection process. +- [**Taylor Salo**][taylor-salo] helps and has contributed to many parts of the code, including modularizing the metric calculation process, and helps with engaging new contributors to tedana. +- [**Joshua Teves**][joshua-teves] made many contributions to the code include improving stability and modularization and helped manage issues and pull requests for a variety of both administrative and code-specific tasks. +- [**Eneko Uruñuela**][eneko-urunuela] helps with the development of dimensionality reduction and decomposition algorithms in tedana, with Principal Component Analysis to be more specific, and contributed to the development of the interactive reports and RICA. - [**Maryam Vaziri-Pashkam**][maryam-vaziri-pashkam] helps with the tedana documentation to make it easier to understand for beginners. ## Funding and Operational Support + Special thanks to the following sources of funding and operational support for `tedana`: + - National Institutes of Mental Health, [Section on Functional Imaging Methods][sfim], for supporting the 2019 `tedana` hackathon. - National Institutes of Health for supporting the 2019 AFNI Code Convergence, where work in the 2019 `tedana` hackathon was continued. - The Mozilla Open Leaders program for support in developing the tedana community structure as part of [the ME-BIDS project](https://medium.com/read-write-participate/brain-imaging-in-the-open-aac7c17bcf69). From 1e7ee6e47fe27f6e63b83dcd2b3e0eaefc5d624f Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 4 May 2023 10:04:43 -0400 Subject: [PATCH 174/177] Added flow charts and some text (#44) * Added flow charts and some text * Finished flow charts and text. Co-authored-by: marco7877 --------- Co-authored-by: marco7877 --- docs/_static/decision_tree_kundu.png | Bin 0 -> 179850 bytes docs/_static/decision_tree_kundu.tex | 157 ++++++++++++++++++ docs/_static/decision_tree_legend.png | Bin 0 -> 15078 bytes docs/_static/decision_tree_legend.tex | 57 +++++++ docs/_static/decision_tree_minimal.png | Bin 0 -> 87621 bytes docs/_static/decision_tree_minimal.tex | 106 ++++++++++++ docs/approach.rst | 5 +- ... trees.rst => building_decision_trees.rst} | 13 ++ docs/classification_output_descriptions.rst | 2 +- docs/faq.rst | 5 +- docs/included_decision_trees.rst | 100 +++++++++++ docs/index.rst | 3 +- 12 files changed, 443 insertions(+), 5 deletions(-) create mode 100644 docs/_static/decision_tree_kundu.png create mode 100644 docs/_static/decision_tree_kundu.tex create mode 100644 docs/_static/decision_tree_legend.png create mode 100644 docs/_static/decision_tree_legend.tex create mode 100644 docs/_static/decision_tree_minimal.png create mode 100644 docs/_static/decision_tree_minimal.tex rename docs/{building decision trees.rst => building_decision_trees.rst} (98%) create mode 100644 docs/included_decision_trees.rst diff --git a/docs/_static/decision_tree_kundu.png b/docs/_static/decision_tree_kundu.png new file mode 100644 index 0000000000000000000000000000000000000000..76b646374c496790f921bb50e5d9ce15ae18eff7 GIT binary patch literal 179850 zcmeFZc{r3|*f%_u(p1(d5+P}ZiZYV14T(yaLCKO`qO3#qy<}e-(PS7ZB-zQnWG6di z-(^U4SqI-Wdfw;w-tT?>c#rq}?>nA9I=Y+tyw2;qmh-yK^Y=UNg4Hp17>}Pl4uL=z zmG3HQLLl_lA&~QF$7sPX$9B_q!C&+iH`Q)JAi0qzC?-cBkmI6HZ{1Q?zIBUF-NEk3 zQ)_bwI8wBnoA@R~-f6zoGcRUQr`D z^%zoc-1oz+=v@AgS2P;A^YomGy*T?*T(QcJGdah~-Im(o$9Olrtwo|qdD z!PmngGmiHkTML`L7DT=fo^peX-)%Np?!Kzi+5!8{T;JZRC&DI>7Q_kHzaJ2ehlNgb zbf}R->szleNrYdR6zwN)>T*vwK%R6^bWge2aOUw#_oF0H*KCAsDg}Fe3@GuhH=Sb* z{e2cW6?wO6H?+M8jF6mE^TL6hK%1whAqMj_>ScR@ZN^t2A$gTfOkOyiQcpw8y2&dR zmotWk^;Kv{Rzg>2mv%@@KDUfKJ2WdtmVTMIuHjLypNrlyl(2o`o~l^a)IMpyeR75r zxW&t`@5IB-N)f0Gx*DUT_&rsXPc^piQ4=ia2tgr0ev9;Oi@A3H<3LGl@Zc6}XRHow z%K5YM-aHV&ORRiQkH?ogm%L6i+@afU3+*DjP1x;tvG1i_th(RCF5nYkc1ICe%%;)) z=N(0l_BtO#NuKVvJl8B8teXC7HES`1d7O^+reF{~&RBH(#i=M7MLv#GzJ*C=VqRn# zBR(Bsk~Kg+s~+BtpnWwKeA zXv7&GRc`p{eA;}@e6F4d$yb~WVJ4}$I$xxIa?gh_);r{wT8n-??!n^0=5a(i!t3RQ z$>;W8QAYYQn4rsbTH|{52zzaNSTScg+bGLY7<2vF^7j?~gp*%)-bSeutGM12VEY?{sk49YTO6RHETbrc<-S;XQRD6tfg3`80_SbcFCY^5 zNBArF@Xfa$GdsxWs*Wny!b#`H5LFsUmvk=C3of5}c)3^b`6Yb~*VG{G>krl+=zn>e zt&{Akh`YOz&SuzrH#oCSLoemQhvyIG0=f&3S_8KwA4uME%XbJ}J@V_$sVntCleZ=` zLIigOEdb?D2_cx01mRRbMPR*w&1$LG%3xrOGge2T6xR-b@yajh! z=v*>a7v1dKI&#Nk$7x4eOIQo5RrwqKM3UcEiN zMd>c-V?FXu-#+!Xntt}D3zf0*oXIBQJNF{-wPYZ9_@}>k+w}|AwJjHWxI2kIyMM0s zv~+*&miqAbL44Lia>D0|Tf6=nADOfo)$Sza-y(l(Hw!pki;r&q^mtt~^|`pyp3}wB zzojor#oQa*HQZmhFSu{6LwBz13GAKQso1%HRO;yYqeqTr9xY;A3Aj>ws@5Q2FCaTS zEZj8g#u+D0S>!`8rfcmIw&Imm3O8^Px=*@|9okNrwIJ2RMr_;%PTt6uP!Y`#2XvoZ^C-E7tY1OyNzWtXk zKfG*8ZZqlbsuu0a{kd{9wBfj!&65Nd1!*_QOlPT?^`8UZbpLqvZ*y>@s=QMP{r< zGiCQ#TWs9h5VETq>L{!8=nR~M{kp&4IHuhpF;MiwrKMxk?$wx(Pk~R=!Hokh8aT|{froqxxMR91gmpuG@JS`6+fBAv2$g(#9rnbIvF*w!#}0u7AkV(rw*}lYm2S^BJtuK zwyt2&VBz2;$zW8$z=13R$|i=(o5BcUe=RyU0SZHDq9ZYko-n$M3|r z1^pWrkF$xJ-x>(bE33ag(9!E#r$`OpQn&pcC}G5$x8AgA zzcKO}d|hCk)0%AE@v!yb#LxMm;i7Yb+9DlSd53uNDjXTi?leclNyq&uS2xktC<@I{ z$y3qe)E_V~S1&C5?d5du@7-{1HIK|E3#o&BWo~8Je^&Z?9;Rt&>uuU*8!xrFSM9VP z{SXl1_0~&rm*>W9i4C8&ozfEJ59tMSwcZonbACZ|UTqO=ZNl$_%`@#YK02)$*L?dv z=IYZk(qNromsS+VLmMa%$m0{gweYw0W}ru8PDFv1#g>5$L*a*yu;2Qll0V*bUtEZo z=Ru!!EhuVCNt^QczO>2PBm2-%|Cj3kv?lmHjD(pX5z%j)e$UNK_n1|vxGxcdWgRoG zmfI~`cMhzPJv$QSHvh=EjsB(No@nQYFnnzAr*zsr-Lcv`X^LT&SL*1wqtb!gfu&ca z4Hh{QB|IlXcHi`9S7axavD+_nH&LP|<~924M(-A%uV^mYo_)BJNiK4*4+h3(wmel)ZIpi=DW_@KbEeuQYrrY#i+x=rhV+DdqKC`=; zY7mG!90Kw6hd_3~PrkDdh|@I)Ojt~e>E%gsgd0gBe zXb(|Vy!pVD_-AD3O~&n^Pa02|?~Ln6y^{NV^;JZ@kfo8a8R3?OJdK@Jo1|y%m)pdT zwkttByKCF!Kw;v>L1H2e=&?_Q2>y992nH0=<3OzI&xZj&v(!N$5SAAxAfmv6fI#Gr zP11uf_$F2O{C^?L(zUzi?auCdqGHUhYNmP^bR#3n3^_RaRh#nk#mu%j!;Dl2eqG`^ z{%;P-{*#k9p*Djoe6X9z(AVAEQPRYVx2QAiNys zitfLX9F)xcXO>#L*M!zG?wpxuV>xhSFD5u*9&U%aRg-zIHo}aI`^s_~ifkw2vbN0w z(cvTxY{xN{S18?7Rm@rK5gT5~sZ)^^&k&?`x38GRiAzZlJ zHGd{}Im~EDZ?9*Zdt1~LnmWIoEX z@_LHn=F$!5rv|s9A-hv^oW}%w0+lCUBdz3G+&mq-33R+~E7G_PDSi9D-Z6_MUV2xh zcf+E%>4d>g&+d2xx6#ViFyp2AYYqWK=BkYtJQ#9SIpVz)hqr??C!wS$0}?5Mi?>^|2Ci!LWp(kdmtQr!9}U8(%>nQNR)x^(f{ z_VWtlxD# zYZgn@q-=_kVBV#WtKU!K+x34}h($@(P@;#|T$ZYNX_Azg=G`k62j9qS@9uAVHr!#a zvUn$h+{W+it(RUA=^8xiS~Z4{%l(;HnW4aO2kE_-S{2^znQVuznp?Y9DY8u`ciHEm z%3jvRpSQrMlrp3oHzL}$AB*$o_!}&h=6!dl|5(KCGL%w<3*Q@|yTp&t^r+6xce!8I z&7|C~b%K@M2gh^6;z|wCC*E3Qy8L*VVVv1_kJ_sS`(N1InjOA2U@)`487sNGc_YKg z)n!?|c`*skStH9CMGT(N(5>?zLdQ`Ez}#!reUE#)9x#%%ls|UxHcY_hNytg| zmg)HRwh2eWID-r2H}}8rBu*5zUY-q5mdWUq{{7y?AWDLgVD~cMV3B6!ueSnalcRaR z{l?SP77SCEw`qiB8>5}Tl9u7dTbuSNpQcM@!^S^Us&xh;%b;{K*8L55!Li;9I4&P9Kqt&t0^ z5y~*;6j?PafK3wUnms734F+p5?PL6gW1cfE4 zzS{Z2EG`x(Z0IRw;{Bt&u%lOuR7jZr`p4MhoQHwKQ4#UKW~&x|r+fx_;!wIdo0o3GI1C)nyD{_4#X!k!OH*pQjKQBD6Z>nud2(~7{@@t`z1H1RYOyg> zX*cqw^7cL@>D9IcWwRQ8WT5coHKS#UR`lD6W{8$FrPP{K!0ajtlK&4>whXh@^ z?Mv7F%QBP;K0+eyeMboEkJ$)z?RC997lx(f#_p?jbc?=VIHR?(;(U81)ziJH{)f1n zGR`}(b`r(GU>M)&nX_%l+%ZbgjlXY{KYy;!)tl_+*OTGmm@a1Yg2;(JP_S!p6TT4{ zq=GH#oqv7RG1Q_jFO5`rS?PvPAG?nfVYqqocDumL1p?{@v&D?(YD=ootY%d(hwyhN ze4?DJBWZ1Bh)=Xe-DmUcZWaUT!v_=%KbK3ftn7uD#~<+A-g|!0wWSc>MpWvFEjY%P zg^j@v(w09&eRLZGve95>w>*1~HV>>ZT1DBeF2pnOnut617KV^V z-Ox>P`!SNiP#h@7NIlIlK@9(!C;NY116$yvO0r$r+?~^f93xZD_Y)8;HU#&kch$k> zZeNch4a@g~J<9g_6-aN3iNXJRX9&(5c<(>IAyaMgA9sm;l!_A#L_PNKma!+n7O$23 zJ}CbF3y6i4?Edlys3O}h;4E(`EiH#WDSR&u=@h`#{0d;+ZHDh3cprp#lJZP!$&SSv zsDkq~(w@W}J+3c%%0kG$$E+Yp7EJS#dTVV#6Fy!WbPQKwn;rATerXy&5W)D)QKQ7f zxC`vxAKUL+wW~i8@OPN=sO4IE7A(gUEGGmh_-0~|%s5tU@BlvcVPsd!C|bp7z=!tA z;9s&*2BbidxNh0>dWnx92d+yAM;+~R$Dh=i2I`lF&V-Cya4yMd5H*0PU&XPF*jU8t z=MGKpzWz1`F4Aff16lX!<;=G4FGZMSd|Eamox{Z}rsMR=uHj5`wjpaIp5lw&r|*|JEc`ZK^cmurr(S{hjPaEbq7aOwKq-Tl@pwup;TCN`nu>U(doJN>2+ zksU?98^U|*8_5@%iKYVsL(+rz3q;C#E7w!UE&in+2BFU0zosdg(+88&w=(L^BRVKXa6mTPz9n)@&ma@#yG3-&tKHq>$$s{C+Y`=kGh9wh21gmI z_ak3WZEd#tdgr1jUDwB|#43|+R_oh?MXhW6$MvKcjUC4g@F=7Oxizw1sL+(IGDwn1 zV)|X1xaWw=8uBsq+};p`oLaW%t>pTGJC6^eb{vV*;8& z%%vraGgsXM8>P?Tw?p$w0_;4X<*5*Hhj!&GI$8TwnJ7wH_Bn*}tBSbarH!L__rCV7 zG}*DG@Fky)4qqwB)vxCJlh#=ilUHdV;j9Sr;qqH%4woq8HE7vYbH{lb@8IOx4=qGO zMjA3&tfXxrUs_^(CjWWYpAm`zcYOGZDL3cKl-5D?cBtdQyJn)60X<6;N|HZ~o~pZ@gR z+s+Gp6A(TUYJfa*d4rufvW?X+TIwKv=x)m{-ti>rpaV5ELe9?zc$72g@0?GsZCc)`$@?5=aMaJ z7OTExueK~9;;19CWVvq}m#FVwH?{W7+2Fx-+~S|CmwmvI2J@7)Z|=akENm;I-CSO+ zzgSAhtveaX^^73qT>r-=VuNxFVHPj9*Qyj1SrO!shK#U44w_v?>^ay~n*2-^-?wvT zB%jQ&N@l7m{2AaTocX)nLakxE_Egd_TwceMyQ*2=-t<7ojV0S3+LZ3fpm|&U25s!$ zToP0wLSiO!vd%I6DSkVK@%D~wl2Pd<tL>|Q3>uuO z5;L}$KiKsSow^piM?;wj&%(~C#(L)CLFus?Bk2dREg~*44 zS1_=@7=ieO=O-Y&7$W2i>v>4$B^+&md@e+u4hZlH(L&U~X82oh98v&MLoGG|NGA`D zwpT3$A}{uD@9<(fAl0m(|IO@HwTefqEa-u_>=sJ`@=T%4>ccim(t+G>I{yJ#ROQh}xEM!~;M?0+ve+pI{1GW~s`RiL?9yllI`%_g{ z=U@<&DiOjJ(`*RdM}Tz=K}y~R?`bCKby8H@!IlA82TiOHehujV1N0wh`FINKsqH%G zwG`EsZUiJsnFu*ZR?Pz2h%XRDv!9}x1$s5cgU3|WtW@x;8WEBmhb##NW&uD%5dR4D zs&fI83)j&ie>07rZJK3uj3h~6Q}XU0Hx=zSZ4V4q$iO~C9~2BZ9T1Ywn6Oz*(xkuCm`pg9+4 zu1LrMYeMziAL{Ur;5~5NB&HUBZqWX7Fzk~r@;nJmlVmta|NcJwPYM?DDF)@&83Szu ze)~)S!zW^1!wE!x2f>crBiJVbQQ)=wn(&zSKr{&DXCDJKr)oWq;bS+iISuUBVnMJ3 zAt$6jD{!VMCaV^|L(}k~aI}ERSp+K)q)Mz_i-Ed;xdi??Zqnkv3lx~LA(jQqYwiOD zG{G>}Fyw?TP+(aHmAOS=0jC1rDI6`cI-I(cLAQw2BQek}VBrfEg#Bf61yvw=tPVPS zhrmKW(~RRL>22Su24P7Ms-L>3!+RCLBOi`7?mnEln#({}pC-KbE_ghKqvZgPDkO-# z9FA671Kx`Pk77Vp9Zp?NdjyX5mk>-33>SE)Urh{j0j%sWSXt#3|A)YETRu$E8FPgp zV7N9|xd3D`1F%LV7$zuB=w$Yyt#+z|5>r%X!+~M^lk^x3ICVYkUu}}j%8oN>!k;ga zvAp~X_mkhPSnUD9*&g;dy>Nj|F8Z*`leW2VQ2QQx^6 z(O3odDaGfeklwBxe}cZ~b26tPkl~{s4Ar_fWmV+T(VX+Gv_$FAXBhjG1$tEzR$*#g z29MThXiKwb-|$OOk@@xB=~qtH3>d>hAsFgI#Z=koLE**+;zVC!fB)I|W!W9hsli7> zRxA74y7PV$(KufZPj4T&swxR=%+Vm2PO9p7dBe{1+IE=H0H(6+-2SGAugNz#BX;}~ zWc9hwuJ@QcgoS8dfZMav;RX*LTRX?E%M+_<&1qAv|3nZPkjl*YI;fiuTy9X7W%qg< zaro`8-tD3f%%y2W_%X)!2=cn&f19INLU-k7UfQ5p+lPj@BG-cj`SB>f19iCltA-r` zVIO63+6VtZHwA-s?scbh?+r^0e-&tLRSZ-=t!Vy)9lT{z89pVqqbo-=aU=x?eTgd}$L>*KRR;$PxZ&YV;hr#q>ti+n>9lcSHh=+dJm2#N4qDZELwiIdt}`b%=)R+ z7Xy_^Ac0avq8l< zIbNUkQyIsuNs)6z+&`-FMYu;UJ(JFO_xw+(;mSg0Qv9a(DM_t{l+vB^nAr%VpXIYT zeDlcgBd0A*gZTJ;R;J^6#O8JVUyZGslIJ6@JavDFeW>dn7s(R@+j+0bm#Vk(_;FG5 z`?>}zirUbNB2;-hN^Z^Lv+B_%&&eP9CSetI&~qAa7F`OOv%^Rm4xLKp7-J5&gs*GI zZ(CO}+2|M*_w6XEHW9{D zYFd`O#gWn?@NOv!wcM?`^Km@13{^9E--Kfq`zc}sZl74S%l*6-ia<1GtG*`?r5sjO zU%y~*Z@V!4SWzQt4SSEXO5?2|1eH{K_BysYXxq*BI2)@2q)U)ZEcw--QM!z2|Xtyx-DK6;dDZA9F15& zbsmksV}G%MFj}^H%xL$>^IBQuhN$9C7l$8SWGXq|z~CX_a`q-BD4c6Hp=b z)!1EqS+i#JTA{t}1V8&hKxE_5F|;(~;5_E6izAPW$0Z6%!;5XW`BqNI*o1(r@1`b8 z%f}!$Z=DRq8BGHaZKyR;v@HK-`r_}=BfYQ`_?He(GuM-E zkV{eE&>4($#>Kmx3Bt!Nl%_5V%C=JU3{B*wK|dCTLGz0-ug|2pV1Q9qF6S3_EG>1O zULx@t(2z^?w69iPHeKU4@MtW_uvlE*_SAotPSa-)96BDOe3R7C*iwEs8qRs!!D)1w zJLPBx8$uZ6+|oFSj%wahh0|G&cHs7#rpg|!H|HxJ2^4~Tc68_&i-?0-)NVGL7zlGk zSlt`8#;vhTFYO9@FS+WLiA`;1I0=u^uJ(U|uRXcE{p4MgSWI&p$gJs2I+RbaNzcn2 z+@%OsWc%QJ>(e}+Y+P-rxf{YAVtfF7ph!?B))EXK(!OCqtSgX)J4P=tff(WYD1wTj zJ+E6aaLG%Qlqc9nPrTFPsIQ&j32uhq2kstRx9}|-c&pFgwrVG&q&n$eISV_cLQwzE z7Ef$tcq^0lmSr1@mD}=S&EelP^UDZ^m43kT@tN2B2>BzU{3qe=Ngvnu1T1#?UWNYW zj7RN~$#>f2{M2Kb^YuSp9wTpV?pF2notuPVbv*^dcy5qveU1U7==nX8JWGrJ&`D|R z@kPz10UL+@?Xpxs_Gi;MB0ImfL|lnTlWhK{8uoUzXJod?=}VfsUAiFfPPW>i=lQ6L z8u!%DcNxf1LZq25F;w7mI4T@HIb;+F_Eqa55o;H!ObhzbZ3E^&&f`Vvavv^pu^o+X zyV2yilR~bLsZS|&hhxV7b|dUBn6urJ1K8G@WmbJWPZnd8+~>NNRxh1aC%f5jv_3ru zdph%)3nC+`*f6)1&)>GwKpY?|+JAOLK?f5Q=p!k|Ba))}seO>$XD4=f&Y@rH2mwnn zy0|^Pr!_7#Nxy%KRIL;P6>m8iytJ@i=vwT4!@2De2oR0I_;D@%79;sewq;evENWfa zB@sE*+X(e2Bs7DRKVf#sqs-m%CCblyhdx9d9`#{dZ9W-fN+38xDHG&tTvJ1ke$kE? zzWQy1{ad7;+6#L4A4xyP4N^5t3%xJMovnKiIiX0J2wb`d2$Q-HQObng8W0uv)VEf!|>|OW}}ca)P(JL zC*rvh=#UjhYd#B;N>+swgBZYc9olvu4$;2{!pCSfya387>iEU1dkvmEheIqrNHAIAo~)X z%nu&Zz+*5v`8arF00NQd^C)xL>Ue?DLhE(k>L=m{K90I)9^WzsUAIj#KlNqU}CtgAngh7ZKy9X*I)RiFUGRxIn# zBA@^)G@wsv7=eQ5VAwH5lD!aUWj{%8+J(?p1fnF6YOF)+!N@=JVfgsH6mEhJ08()5 z9!VW&O#-3yWH*8cX7p1iNMP2X;1q#;zJTGA@~WZE!C7jqgErv+S^+ZAdy@nfnaT^$ z{^&$lYr-K9Kmw+=4ut~IK_D82Hi-sWK`Q(E1NN;toC>MPzgdS~0PO*o(tD4z3>pw$Lhj7s!BYEb0$3&r?FJeN-XlU9okkHrDS%+Bg+?}l!TU+@{$ZdOFq-P7f=L-@ zu#!|ajpV!^BcH-%gjAEqh*Zt7d}?hgzY0NhmGE&MN47jV;YwzhTZ zBXHC4ON@t)z)i;$v^J@~0yiDkdvn+?aMN*zABW9=n~pP`K70gjIu6%5YzEwP9Q|z< ztT5G0?K5%?Z3J#w{HU60BXCn19+uBk!+@LmTF}o?tpRR2E*g2*8F16(j|@}=R5x|) zzd%($byJz8nL`C&krawEsS2n7o1pD1RRM5Qz9^N$R=`bze!V{wC820)gbt?;xT$Z@ z@xvid0liQBRIOAu4Vr&UoiE^~Gz!v(lLXw!BO_PwJhgyM~e(Ly0eSe62o;|cm%<~gnp?lTAsN3Yy89*Z@{$Ezr z{5RhHe}H%YFV#!(;-dh6-or5454A!d^@nSCByT_%BJT+nv4OnCxi31$qWcede#8Fq z@fQwA(rxf6m;&(y8FdnC>5)VDxh04NV)+I|qnD%up`O#9B2Ury!hwLh-jRRkIg|}z z8Hu9#ovH~*0+=PJn3{-8vw(~Tf&O>J0$D!q*NlR7+nJ&F9>IFm@pfA%Cv>ABZXrb|+ujArku=_{Z$(KU`dMlKz;D z-62HW3m{_Lxqo;rK+^a=iT!KIfumJ?UU}FO6w3J8-+OZioeKkWTV>#|&rjgkq%C=W zC^8B9Jk|G41i0L>iQk7J08fK-l>O_!2Ao&5i~lg^w^X;@J+$S}nP<}fiQfj|u3rx| zDzGA|>G)#~JLRSx7{o)7W8hRQ3i&rsz*&u>MgPIi)H6kS`cT9b)CnxIR2^#J0SEr~ zADV~QG8Sko{QVEdryf@d;{QPJJ#aF;asQ|02~eZBa5w{0bSF8g~Op%GlFwKHRGQ+nCh4==Rig~`{5?x3&8p86Hzn{LipK(p+<$O!A^gx38%uQO$iT&}%5T-qi>);B_?-<>xJg2Vo`TEr=9^%-N`1irpkV zKu;!t{%as6IA_iVU`SsOw^a(_-vUkGrU%icB0$LBRUG7;r>TM*9!L zSem2-;;&QyNdVE;D8CRPd?GMl5a9m+A_V}q8Hlcw3D>Cjxgt^CD+Wr1Lfb$%%xPXj zg+du8>7Tt=cm19T!n(nG;JWGz<_b)pJ)l${$P)kvdU(Zj3Bfi1DE$CLXO=Dg zW-E>;VOUuX25joDc#EK;9v9 zo3MNZC;%67VzO!wpr2U4w1SX_x|9I27`JWlw*;*~t!VKZ@H4P&U87Kb--Te*)pP~Atb}0Cz`VMG%;g056(Ik`AjZxBvY=Tp z0p+J62%|11D5>yUje%164B(;i0IuHv%LN#!Bmr}U3}6B^{Rl%Q+W^Bs$}4z_&N>ZH-1gN*vBz?Okd>zmb)Nmr$V~)!K zSQjt=z%i?-2=5&d)iJBbz#~}CsEvJ3>u^5+n^GuqAJX9NuYD$>b~$gXo4o3N_vdPqmu8m z&{NkutPVN?@JQek$O<2C{rPMgfIkSrf|N=0)0zg=d&6Y^2=LF=J?y1I=bqrD99`}i z`EAcgi?zA3qaxCC_$ed2+(>hOH@4O7h#2X$+)fb;4|wy~T7mNc2YWomI;9#kkgcX$KRt27pCBR5GsRaa^I3`B;+;GBU<$|JSEnEHd2uHP4e=`^5(% z1IwhbvC=17VN{ZUDI_le!oi9tP$h)9Z z6YE=*p&o1(S5-QbLw2TjyQ|mkPr$;X4XdTE3R0Rw1b=3dC%CJf(NUOpM~cLbW<9n7|H5w$&h<3TSVEL3sZI&cU#e@ulJg~Wm-^Ljsds)J zB>eu3uiqvTV~{$-ZAjkK*dxqVYoouv36IWtl{->KHBKj%e1fG&P5QGJXv4p8Sm5ro zSbaU`vzGNHFPLrP;@#GMPdlvp);+B*#N4f%85i;9k~QUJB_`XIg4jY6e(6~L6x4?V zt{~XwK0|`_U}2ZrD(Zc$hqmDXQTd=L!}0HPP)xSk#^EM|O_kMJ{|`PSN9DifL(quc z%s^y`CC$(yyV~s)000LZ9FHi8jWZTi8eGJukxjMe7J5IL_5BU8vpX_UzWJdk9VznT z2QyBg{@qpli}*JZ)`r<_3yfTiW4fB2ttq#i97gqQ!o(E*DywWN%Xp?|wW%E(V>lw* z{@6Wh)$9rP_I;Iii9hgK>=VB}FiiUEi5tPKX2?$ZQpsj=6^*VPTbJ_$WJmghuYS`v z6}2;j-t_;C*XSryW8=zRk+3;2I=L)0MQL3E@h9I}u(snX(G51lgY+RJpaYJ+FrODPv1-(jaI!L6Q2e+SJU9WdHsF13FsIVeq{nY%d(U9V zOWC1>Dr0Vn@oth`y@<@Kuzo~O8d*~G>bp&Tm^Srp_Sz6>ihhy6vh`s-h-`^ zb)rjn;)%yzc1?@!l;yiKebWgtR^CNh^@#R3FPupadzmGUGPgpMvlX`3S7{S&l|ASq zT4=c>v|w7M-wYQI56fe%zb}@iZ%;<6%ms^#ZhUM~akhx{#IB^r8H?TR?mN{^QWx{y zUk`rRaG@yBPWjV_qK*9xPi=}2`LABdfp&UVM0;cv37OuNg6k&#+z`bYR7%o%IV2(_ zl?nPciO=gL5&Gt+7Sr{2g)M%p&SlM7uL`x(dMkcZ>`2a-vrTj#DS|reLk$QdV>;pl z5w`zuLppeSWvq7ZH8U?qmg=)y=WAyq&OCG_XUH~fBi5U3VW6TX+w zhM>;#;kr+gi7fA^IO)E!o=Qm7J_mZsPTYXneDr4G+_zbvqE>l z3;M1Z%W*YaoY%zEI(xw4N3+%Vw9o!P807dW;$mPSz2bOImi&y9=;-5yMg! zbpQ0qde$3;Dy6j}JnA7{Igrz=2-b6!FWW8@vrVZk83>s#S;~>H*o0t*$S9w|w{yY-<58CBN@wR$~Sy~O^wv+YA z9ePwN8XvsQ>m2u6@A{~*=r{M5FY1)l3A~>Oo2FwPaQBtSHL(l5h?KCvW~E1jvWT(W z#O(@(eWgeC0R3Y7Lin%v7JJ)D!AuHh^|(J`jQb0FsE0?*;@oizVL1sOzwcbGisGuoP71`D zm5q>h<>6ydZ5{5z<)kS}w1I|b*hGbZGUjR+vlat|GFiCmLLbyXS8@f= zx{-U9jyV*DHG>MTamv<1;lx|74-||HGRM;9PkHXzw0})(1+jBA;B~M>^^0B(ss`~T z_!5I%msI`}=tPD*XSHiQ$*TR=_3zKWckLg(Aj)Zs8K53$W}x_9F-f&a`!QMBbV<>4s}|Pg>CUhH8`Q#&upxTY zNYj^Hw?~>ku{U~-v*d}rS$#0!x!JGseprPNGpT8?JwuZ-z`VxGNQlwB7mqEH^9LnB zpVP42m7CTAM^Q4X+Fd=8A+0~&sVr4_yYilD6ofIIS(-6?cHf(O;yr#>WLW$aIvA#x zg#8&7zcocbX!|R22Xn?REhsblJUJ@R@*_sMBZNEi!8MH=E-z)`1=Y`iD|EeCzrE{W z-$GIo_kwJoSsRB~g~Z(ZU`>DI-0=4M;>A3JPz>ff6N~#4JglI0kF)BcNUiw(r>384 zN<(9DJUwr`m!}^D6iQLFou|uVpyX-N9L{=LJ8VPhN82@kY6H~!bzTb{&6? zJ!utZS)8M5w3sHG$YDWhe!}$_y*K^96jU!H0^>h`;a>HPcO1f3%=Dc?OV)VKSw$NZ z1j~JCHq%9NVrO2CXu>(>$eIQ&JZ*)pJ+*~2t9RbG+S}w{4E6Q+F_uwiPT{P;p~~Pf z8S6ja;^rul^J6ZkT*s+rE&g~wV2zbWEPv!)#Wr?$x|`;Rm%hp&vS8zhyj9LMo8Uu` zF*k?t_F&eZ%YHSIl510^RD|~jziZgD0SsQo(k4{SOJ(c1JCR$8F_d}EsT^go|C zc9+5rL%9RoZV02$U*NYx;*qn6$o^dP9@PqjZP*#(f1$*R84xZjhY z<|_p|?%Xns17*#1(^EA2Gy5y{%;s$0b{Duf`BxihNG!_$QPBoeg8=HwiG4tF7RLbM zNR3$SGYcyYLec=*z)#4FmdXYJ+CV1>n?&W90B!Jn9eN5-W`H&@euMVI12f7%_SZ$_ z{b;DP0oOW|J4uyF8&u!$qAdfq1KJ=;i6jpPW&+y4VGh<0C?r4|R5R_+vw`*?yX?P1 zs^$mp0d3&igSZCt1KOaPb%*{o;70=E6mgc!5l z3e!FtZW@qN) zW5X}b3mH4$GYZP{rHrVzYi%Act+SeIGo;8ndfOHqc4*l(t)rE|eqc`@) zqcYFmx!+b9(4M%D+hOP8J97)Kk8zI;8N`bbW97Qq&Id{kI59`jE#d1MVG`$s@;<9n zl(k@J-Dh${EQwQPmz+Af^w;bX5OQaQZH+G`Ch|NoC6X`Rc-2!oeHGI#;~7b*eoK~C z4*Bwl?nLG0=nL>^AKC+dMR#~zuH>^HV7Lmfqz8x@H+X3NbszgYr?;a8Bc3~k+u!gd zs789v?b{w4o|GN2>)qH_YZ)!7t4Wo-s&v6}X0*nPHFB!A(vC~;HAf}Zc zh@u@3H9)`#tz+0#QAqQhR(WvT=+{@^^wG zA1#jdTo>Y-k{~|*@7MCOR+qoM4$uBhNb>IA{zV*TDgYnqb#gSiy!vZQ@Z=rP0`+N| zu79|iH!HH>jf(X$V`g33lDsf;SB@Wf;{?iW`FH;q+ZRh>&D%A*yFQJ`=Bl&(brX!m z#I8jHA_~*q|MEnlEw2rr;d>-{)?`(OD>oV@7briETU35~C)xfY-`%=<%}?bV-X9-SIOygapX{rkEyE1-B+0kQlB4bnGAH7Jb+mkqV@ zpkKv(d8_Sz)B=Di`SoT{g#cj!-vr$7$^*qFzGj{;XiJ7kq9B!Ta_w8T%-Gy@p6Qyg z(TL(?LUPGpM=OGg6XkcvlA4^;(qH7C`I?9s>rI>a>xAN9Z$4VCb^m7lVNr9g~}HTYSH#T$~z)GtMHJO24NkQYX_w_ zsJkQ(mMKSwmj?blDGX=|T*4tQoOgJ_lzO2_=^yaJb|)JNsPM@1bU! z-_DT!dLQiGQ(KG!wRJPkU1TdI*8-O!UK`q3iGix)na@q6x?D-4c+>qB)q_t>(#MO# zYfF7;OQkAR2*jddKh#(k&im3B{!C-lhHLtqA;O|oDq1-dbQ!{b^`WW2TpD+x>Bg z{w20n;(^er>AAOSnl8RL1m~v+*reaHn&tfs2(mbM3B8(Io!r;V8akK&9QsS()Z(7uCB~48YE&e)$srpo?1bK;;8> z>6fTQ0-(^ZIw^8P>bbv}r;&~2> zCb|HJXaef6fx`e+WVt;4P=P9y{rq~UAeQ+lU(_L?`^qN<9MPjoUf*<=GQ@PdaW_Yo zGDaQ7?tgKqP;zw0bk`U!2US=!zCK__&w~56&2}RN5NkqI``Ffw$GgKe z)(t$bl3qDGXPJ!j@hdAFlS@I_z>DpNJg645QVoTfzxPR*LDNIs?_M>Yuj2+xiy!Sv zm+i^$Q!@KmcllOvK)sEZm(I4PfgNVJtJOZC8%0ZWzbajhfn}KU;UA<_hXnTyKo67UsN z0!GXO)KT{o6c+PGc=nGsjdfY|Zsiv;R2E(u#RZT$torpEk(T{U*@(!6Q z>`u6Jv1(tP<6QETJ%eYi=hO_Xjf;m-VhGnrH|#%8K$xlWCvj;c@=<2bydgotRA&`U zMdjGcaKIx9@px8$+=ik=>CJh2HLXQ&Jjxd2BtFzRZpAAk6Nk?`u99<;E7;hA*JIF?`D5e+IWDh&T|B5>=)nBj zg%bfZ9XW-?D*U8tWAo~GNNQke=g?^C`K6uOa`)euw#4n8UA@ynjbk<<&+}MLJt-Tn z>8vhbc&`Y~%@~@*rB#LStfXy4-aKi?mgJ?N$#ER@@vR!76W$9q9baC$w@4e6_!yyGkSmnRDF~{x z<3fhN|6Q%`vdj{`%XSm5`Oiiv*7nc)#ZW(C@DzpO{vj}IeUVNc(%@U|VY=V7K^8=f zF3<*i%G6i}FO+$&d3bI|_0?`VO283Os4JU3MckWSMCbSX=S2S3#g^9Nn5VL`J5d}C zL2g~ST^Vl&yw^K3&DdPu6Pjl_V!PFs&O}PNuO)kZ3toi7DSbbO>VqHZK$FM)mm;^m;H~bqZXplTe;X>}3K+*@MGDZx^njQA?PFH>S5_CoiF-+_B*bF*e;#yjp(IylejI+VY~!@<2&VGy7d?_{_)s znVj?cFUoa#V1|BtF|(YdM-HX4?!V<1!rfgo{MgrK2lF0~fY*wQg?0~7?CgxmL!>J_ z$Lg{p3%U&Bt8-}d?mH}stc= z@ah!G9Yj)R=YB~}q#5oj`t`==>&^o%X-xB2Z}TT1XX8~>#zKg-Zm+b3 zwMu9$27#^*#PnkRK+b5d_)JiAdJywP}?a%OU5mZ)rO9hB+xTlQ6q z^QdWdw9;X3H^0t);}lOqOSJtPnW^Q8PWiI$sDolUm!i~kI*zF>a(0if2+lXm8d6Ew zFNRqXZNY@)^`sG72yf4hBq3X&#kQrEEny+b!dI7aw3&`QoNL%04WnUoKWorT4&lm4 zGAI2Latl3^+J7@0I_1c8#s9mj%(s$9sL{i42%3Kwb)!yM_hbE7WXFR}V!=AH{NUHp z-fH~8>xl`qo~RVK1WS+c zceLjR>Gs~bUb)!Wf69Ls%6I7*{zGM@#vy%0U0j)RvvJ0x-xgHjj^X6J98}XV~0%& z#@rt_`7yfQGa)c{%M*V+OvfpW4{0==4xXLt1o~I^(1=>+^B;Dy+BfONl;aHx&#R$a z9Gf)jtF=s~`p+wjnFh^=SZcR^u|HF$(p>(KW@^**7wJ*MFD>lueUcnGO=0~X@|^hp<$PDr%3I&Y@rA{XVxt+|||= zYR%IeS6ed8%Ens1SUD;8-m!^YcX;Bfrxky7^>v_7w{df8S?Wf=Cr%B&cwkAf;}|=& z?coT8H)D9QH3%;xh*Ad8%B4I8&qU*a15bRKO?8EGb{8^&Cn$kRe>%DNgN^^bFu!?w zblE`F^4KmW$?W}%VBR2CIosmG7R=t}A(Nw6Bb*IfGCl zZ(^L1dN1~9F|m%R?$>JSR^J}zuX6T(V=Ee;vG1LDUlyn?V&!b-$;A?pu@ExT& zqziY^Tnw|wDJfDkMhap@0nb*O7$-udP^|0|T5-y!i=EySTIrVV++B(b~| z&Zs6o>%I_582fo=m%n&EeL=JONU*Edks55+FY?wW%q@Ym8NP7ET`1-Aaphc!dECtj z+e(!1(RDi7R}!Q59VqZ7JGIUTn*Md_5J!9-!py01o__UJcI2etN$CsZF>|%K!IchBd+++HVhGLiQ6cdgRC6tbK4t}) zMPG)jou~hK-Eq!LVcpA2A6mm{A@a7SGmqwvt}S};RbTAj zqBhDpU7zRuV6sDTWmrhglm219{!RNq$q5zlGLKssN{?m-BmGj+TN+tK>@PVj?ISuf zUtXH+-+BG>jFFN5$eq%H@6q?D&*$Y|HXG{3OoTH=JfE`Fm(tnly&tSkL@DPUik0Un z`%D}4IvW&ETOgsUi~Gn63KJ?$TWEsavEo-`;MDsLJMiT<+U7=!>$KL`Z%;TlHTG2h zUbNRNeEA25|MtgWvKj>v#O{pp4*Udj&l1a_?i+H@fBvI|keaqvc zfj?Vsb^>zMcc}N2+dU7_;sUdt{ie6ObIK|c==YwvpUP++YU4Y&amF>or-~klIu5Fh z6VS8uEBmCb8huv5=v{$gwyx~@@vLvVyK^!*6UI6kNkx=0nqjB<%go{=1^MMssh7OE zegpLKePEXMSs~s!%iY3fUq$aLUAtf2`l;vC2dAslaf?P2pBm{m8-a@RQX0b0Ax??L z^FH{BaaMA-`BHr%%yHH%#;1HS?|p;P!q5%FvCAmC;VijCv3A=J4IH*TefCG=%V%cZ zX+L$poikTeqW%3*gLT(3%blJ2Ct&21>H3F4+i+{oS-)_5J08J=*MiYw%YkF~kd5S$8(waT zZJ%zuUE7_0wc+x8YxgAQVV3)+Q}{|_i8|z01AS5^t1c=Ir&CLeuzh*mQLeLuIG@`$ zYOq`?zLWNU$Wa-_{OYRbF}vh2gmw{8*jfSbjlyp6A6iu-nhsD zqj(yT4<) zURt8pS7>J55nD8NPO5DAdrTgG=fI@;7GH#>M~9$!fpSECNgiYyh`|^lg5+wPV6sF5 zeR~BE@3x^V>;*uk5t@F-xi@v-)3B)qDFh1_q>6|VD=PTS6*0W`v=3VT>rI{l=);#M za3vb+lPvyNZ`9FNY`r~8U?IF{0fo23-XmTi2SJd+X(@>h$M|(J1X&Kl50x@+y#=tS z@V#e|&|2C&EeTMt>~PYHd9lD&TtO4qqZ@}x${y?*Ch_phecnln`m_(BIK9#V%QH2ZU!{pVRUMTL!=le8 ztoN_F3OSr#DNFL3Q@bgK7o#@k7x*b-^79%C8T3ylauS+{!xwkaOg*St^D0=bhnNHE zlhK{Q28D|wp3EVY!zFRNc>oS(A|1eHj~B@mEJL zP{sxZJJ0TT*qM=9oc}^0Ggn2Mp1ZW|@q_a4%xFh91!uDJS_uja3Cm+mCGl5@UfEd% zkp%^NV;stRJ(j;Uw(9X!@@#Aq+A#FH;rie<4=e7$cQ#^3j1$#1SDmF6$o5S4uh_kV$n%&(?-#?S`N(*CmU6JgOVE25 z^TKcnZ^ZL}dacZz^7tZKW${gNO8s={(7w_^4I=C9pSa_z}z>J;A45pdNG|b@LqM8(pj=Ao!L1%oLny-!Pt4=qn3#! z`tvv47W&=C!h&70%vY45S*CsdiVa*2r3R(q+>NNZ zkL{Qk_>I@N3bPg1{nCiR_MFWiIa zt#RF=!B)u@rib4QBnKr&CW}@FMa$QqQt)+yv5iD5(lYFUQ5h|g=qcC;`iV2FmQGwy41%JNrIR!iq0>N* zLc9CIR=|SqdWv=rgQD6n%M~$QpvI9d9yOv)8cyoVKWCwESPd!?a2Cph^#EyuvrzIL zI6m}bKv)qBSNhLcC|Ls*ZgXTRfy6$X3Fsz8NXZVslsCbS0??nHfE)qj6orXcssG3+ zmL&0i;b#<)Q#g{6uR&Gle%_Bke*(K4L{9NG1$*#6atcLB{5_;9K=-Ff$qG;vkyE@x zTf;p=

k;VGsUCPVra*KL&RSkyGp?C38VlkW<|ELyv>-1o=b$oQgg9A2~&#B;F3G z3UUf_QnE8tMGS=^(RTnhN0hCR5o~b#Hs!7tW3u2>5Ro-Wf=dxLg2psajO34k^;00C zI2+>tFALIkyIc`F2iFfyU>aK^*izxFrH<$%J|g+E!2UtqC-D|73?4m+APOL*VDpi! z;7tj*CT0OtK4K`uDanOA%C<9c8jcZc-Eb^W5+=%!{9|EXBzORaNdAZm)D9?ell(z| zjJgaLyPf1;1I1vVOT0+_K~RB&WHU(q`ml5d6tzkIJh1e#h|b^%l7A-@gAeNr3XuHI zAo~SH#a5F4JESfYtx5h&NZms^gI7uZ=1?>Rje-ivzXFP6ID|0CUkd7)!hYFE{=rZr z!_FR){E;VW3VSdm`Co>*39#7(lK*EY3X16r%8~rHL4^d^Y(L2#d8C4%P*%4*gNRL< zAtMxnPSS+?#QKb@Rx{@3QP5HJgChMs_C6Rd4c;7PnY$wP2)v9OgdCZ8CAbRVP09!7 zC(twqz%A=L9anyYE72+ndBa3{JVwg(<`{-rn#8A~Q4IQ5@#Jp*+Gyl96&6LJ@vMIbmK73Khv2?$ON z7}FLeu^_?O1j2Z*s)6A2)%#~I1i^U-bQ-%!EXWf*2kXJe1%gupQjI>~sRM6UxiDnj zK%D`>sS&O0=L5zu5S)jA&p9f=S9s0qP&`*hq- zc}pfFI2)dGolLWfbhXK)0oNr36Y})4RRqf`@D&nE@|Vhjde{;Tz`odn10QpO^Y?DZ}|g| z)`|2tb-1L8B5uX9!h#YeAZurwedv!dX;UTpFFXT95PZJhpK~4j zWf9Zo;j}a)G!l~S+SY15zxHAGtpB-LazE&Dkl)ZJV)di@3>2L+8%9aw$f1p;eSOP! zBpzQpr~e$@1H&Uaped{S({aLp+Izi9zi*%%1*k{lp z>%}Cnyt+PBRBCHiaS37OWwdzQ-xI&B2#@XajuhuGdOOXl-cOkQLM9jG<*SdR0}khw zpeEk-nSqY6=;p{$!8b~qRrPH-x_ZW1|u&Uqsp?EH01}o?pMJ1P9S@-OGVZLI) z-x3W|P^A|^B4bR%GoUpc4886t(9nbFH;%i6V}xIG4S&)kSXLyIa1cLnXwFGexj!z= zk0>$UQ)?`Am=?Hsq*Psf?csXHF_C+$RXF_%Dg3SN%#LSHM6ktnu%7#s1IH-cu;A1? z7^{l|n(@ZyL;KH>OuIws5d#H6D5G6wv4e(I63z-onYF9uz8H~X6(j$FU;g@yU$QrMP#)5u0|4Jo-;m1_cxy^s zmb1Z7iFjbt;nhBt%(&fUU9ZX(Ils9Mq|FvKc1T&d!3X0^QUqJrd227u?<#FX8(i*C zj4V}Sie=iwP5FDHWM)J=o@N~MF5i&n$TcyI3tLUgY;xq5(DXNOv~*i-MwjwLffV*5ixil)i0%$n85_Wj4=YsWjRuTdnRaI$`F$=@bCBB>9z zsuj~j>iXX)EL@kQGe~`yl(q*@mKOp(qP7?i*UtQ6=Ji*Q@WT4CMts276vWSz7TO6J> zX@of-9{!Fhvo2DpXnCB29qBGkCGBx+@p)OK{U?}M=te*n*x7Hj!+6{H`D@Fs%{S{4 zX5NX1Qr|Y2*_UE3%$Km@-;pfl2QwO3@k(Fp4Z9p}Lrg5aGK$?V>VBIod}&%o^ly6= z$K}f(uyQ@UBMOAu%YRn~!T}f~Ejo_9Jtn%c7G?H?FT2mr(YbZQA^Xn8x3KaN9;<9j zI&NOe)7eN%`fF|aMy<-lj14_`HG(erSG7ZvruwR}-Nlztk=Mm|s^ZFobq;9|zLYq5 z(%*3B#(v}-P8MRHcu+%I@furE?EDJWO;)7QFd{kOYQ)I~Y>SowPALzYn9fyg@=^g#D$fj7nZ#en%?+usJF?|od>X}mq!l&I*Cl#MB%s0Ch z=Es~Xh{Oo*EYco48O3Qw>Mt|SX)ue47@tQx+@277{m};fczeHH%KprOUY3I*I&Kl@x{odDDXbZGxCbMrW+KK< zR{2S9`9jFQA3ZC1m-EX!42PW${t|VBF)x2r@F$M2i}SbOTNbp_EuOC#8%;OGlwz8& zmz3!gMg?2haJTNI!8fmBPjJpx3Xf;sn|)tmuKLralrOmWAg0u2of%g=P6~PI=hhTR zeW@m%UNFvgD~8*qge}2KO2@^owhjj9^3QBes>d&)R0z z@l;yqf+6+1qoV4}Nk^ymjf9mE^@_5k{XRc;U{wis=*_oh=Zl}BC5=aIuLZWH4_S-F z_RjKDH5?mBHyjSo&8a){t}mH@;pP9cmHR?mpXG9u;`-g$R`nSh``Hvn%8JME;x1+p z-kW7^I&0(-x|U%~-%!k1j}D{Np%S6+V+QKcO8OPfMfrbq)YR^{9Fcy|smF|5n4bR5 zoe|i$K68Nsu`XZP1Ml3}8(Va)LvMOYrC?qB>xAX&Rb$RRM{MUELw`c4n?;Xl88s|* z=F*bf-+iAxqOzJtF*@U0+yc0UsV?7lGZpEXkMZiA;&A?0o-St>S(?j;^t$We{ixMc zdv{K6pz1xr+uan^CgYgC)I1@x5?T2xVxBs7;WHxZ!-9eIRD#qSnTsZ&o-Pm1?3a*s<{6k zo;r^4S@te#_96I`H4C^8Jmt~9H^(+R!D?UV(cW(~6~cQ4ly#1vZFg+=6Q2)xrAMnR}V1z8pY}vb)b|}JUq2i{Du1D z1s#K{>0+<-^ zo7R&P@L6B4b)?<@92H_C--$aol9j=pyZV$49fKKx^%pM}0|I$kh3++UyPJlmxIeVM zQDgalK)GzpVN--Pe}>O0QRYRv1Sofl>YN;n!z*bI?R5^%FrLRIILkGue`*^0M7H1k zL8wXA9hBw4GAas9qtoc;DwlVYf5+jAL?`b&D>y|O_7-bRo2;r92=X+ctb#tQlj7xh z$brZ5j2vO0EVC`ZOKc~YX==u)+V;r}6om{ID=d`?QzX&aIqv+O9tnQkfwK3U+$QIQ zy{-k6h|O02+GUA$!tz|%?&uU%ZeOI~VjSbX`jl;&{ySZlMCnfg*_aRy&HCF91(WMA zo>pAP5HB}Ee)WX%n-aK8p|d-qYS|J?ZEp5DrOQscBqMffm&j^+G}}_3g0{-W>-r~G zl^rWq7TCZr_RIx8tzA^@EH{f|!ovhXZwK$aS;VCMRoye7r{Mc@N(;u+;e^%y?ZP*_OY^Dwj?Yfm(g;T%KARS0B?OflZ zEUF{%46_A<^#@!NEc#ORuoJ&XmTW=}uL zbLa%cI5>H8YZJl}hnlg( zkT>z@JJTJF3*~W2nFslOPI`~FGUKOy$#U1T%|TVmnI7b8VuW1kc=@QWgzdV!S3j@dKyDUHJS_L`g8VH(FS zW$dz7aJ+pp{JmjWp-2%}KRx)S9AXkvesq`jTfHM*XeHbI&AO2Z_R>_qP4VR-eszZ_ z1qZNhmlQ(ws}Sf`CcI>io5X+{2kHJoI`E(Py5Wuqcdd$eJfy#GIN>^b%+owbuS;rp zngtFkK2{Rf>N@wT+w5!fy2D=ftqJ>bibTBK=3_)@PiLN`ViS*$SRBu!FVt0##~zN} zn;siFdv@WgX`U2L%j+PyS$)9eTzcxRno#Mj1OPa*dRZjGl#|StMMG*`qHS)-e;*`p z=hPpmpLtFDD}mRGVAJpf0aAaQ5TRZ=hUQ@ucz1AkMn|pRdfG~o3$eTb1z5J5)7FgU zB>4wLs+}`9G3t9P%J6rVoZ%onBN%HHT;{e+QVyJXKFPshfAEFwoUkR+&!dR6OEY`s zrrY4Gi}QJXPnFN}Ye^~jWtMx((qd~V#r0_M^ko+de$!E@ z-2+3jGh+*2fYz<8B*rQcL|8ojk$^F z>^hF^nOQP*wU3QArebU0lcpS82PzyvA@o&(SsM*ld z?cIJYNpSgQJmY$sqPyI-Y8n7_bfC_KxSwdRmUOWJni!81xRN~r5tFi4OHv?B_IUCA zog|h--C_TwaXhb5EftUVU`6p)ypxctc#EY`NQQF4ALjahmNgZRSO=1Fm{2coH`}Jv=BTlK$o=-)`Lfw{~xYPu>B=S zZ~0>Ua^R$_LB-T6zDG9(6$aLDVA8Gt3z5`MitZsWqyrknk86Q#63)~ZGFtpzOg6A5 z8IU%C{UO{}fJpqf|LLK?r$V$v98L)a2sBQ5ooR+0b%T-MIl@nI&`=3fvKfOM>LnNn zLIxG46+i%B{T#WPU!!zA%T5j1CRL6{^K zD87T@OUw`u(7r8D#AAkJp~wV#h`|gYj}r|=0%iy-d40_x`}qVjgqSnFhGGC_2pmp) zE1>ucGlYR6Fn$stm?3s3f`<%zL4+Z-1uP3e!)Xa*4Xv>hqB`EbXPr0MS#abpmp zeFT5VcjH_w5}StYiI0~3ktZ6-DSl;(*BQiEuw<3}=>y;h;WI+yoWsT7Fju>al|GlG zvc55&jbKC&nteiqRI-Rs(gQfn0h2JN3)gb>g!+@lZj{hUR5vbsI7{~a_#7iV*zYAa z^0;~o#bkCl`rfUrb@W;?e}(fa_kO8}oI}4~Bww&nuU@pP$-IccT%&S^*CX(tLk0b* z5h3g5+%Myiayh%4X*Dm*{I{=1)On_}^u=VmW%dlbcmZSOk&f$ZHbu<}{O8eB4R&ekKNv>L229{IxyFth5+vFW=1mb1& zLblF5sX`eYTV0&jGBE9?VoLFQS4K~E7p2yCq}fnAr2VX>yd;MTt^8D|CFdIS4xcA8 zL|237Gv(yOOA5X#QQmx?yZfQr^C^?F)=at8Db>jk0+HcdBW`EP%MSwryMKz8d@8|S zcCaKgc*H(jzjV9EmU>TS_89*?+LLE&#f?Uq57{3sw+u^$%YS3E%=7GM5HF?^e@VTJ z^Y)l&BZazK%<<{T_;8-8E=-!7bV8p>lq)v*vdt{bo-2`%ECZ4*K#dJG;R&|V7TRRY(waz zhoz^Z_w5FvyjPxvxz|k|eou>YjP_G=#}%lC_7lmvh{cs|&rjIsl&8l+KOmhyK8?%E zIjyFA%xD%Tn>SYvO(ew6cenl_W_nNXaZBRNmj(1)l~0^bcGWF?2S_P=jvjR0iZSl4 zeyLip8q=Pu@}lIcwg?BIm0z9c5x;CikE}AI>E2$eDfT#Tf2GIOfNf`2OY=!NFz-Tp zUCV?HUg49Ukw@NsN7FKt-r_VmKfTOq#FEG+29Iw}#^Zb**lYA$>Hw?XMseF&=YabS zX#=!v0Y|=_o{P=(E;zN{Vcy(aztr72l7r=2Gh?>>(?{%Kr6irT{*y=7#`Lm{nI=d= zu^a@x97e7Zv2iP#?jHJIbFD{ezYC=~I~vB8>~P#w#VdZgZNpmnH@zE~Hc@R%TtHNDH*hOZ}wo>mw11b6M9A(<-#ZulCdVtGD$dmhT`I(6X1cgFecUcbbv zu89@bD;#UrRJKf$sUjO&7?Uj&={Wn~w>K{0Ou`0Q$4g+}lN%)F=O7grxzRxY_n1Q6s>RFzN;o$T&FURCTg969 zVx6{>EPiO?J7wwnPR+-TRUPZ4qAz@w*$)!-tp~mxE8hNkXv6zwJiq+R9vGwbalmy7 z{FUYU_={UOgjzHkPDk^mUPsS>ajeWZ9wb_-;|^uP)S9vJI+(fR_ESvVc|ZrvU(uOh z0L<5RdGsUzyyqpbS_;zslhg~&@Nq+kQ zY7^!RdE8BAOV0dL=jIkBY-_44*p3;P9Z|ejJ|Rg<;8SV-7S(jh@wTcJwat-UI4eCf z8Ao`OKi5fUmS)DRuG;JH(gh3Vewp~3G2iy)D1|uQ*?jQTeo?2jho)L@rLJ%Aak~X# zp0{CR9@+`9Jh?M($LV)5uw7v-wwE_NGhYes%aD~b8{0-4HUTbb#}r5yqnw< ztIGBUNgs83uCrTyM#^33f_z5xVqb@Ds6qRX(i-WaYnQ=YHtJwibM+^oL)OQVKR_|T1|1WuK-#yYtsXZvBaQq?fe=XcasCC%3=Na~A><~hg0(gfW`l^G}C z!f97C|A9nF6VK!tN@Q>Yp2N==`^iS znf1*1T(QV;HsfyLvtM1Va-D!rUK*ljgrn;$dwjoRGH>xWMrL8syhU_gG9D7!^3NQ& zZW#bD!6gE?B6aEp5JJkr^j9M$^v%o?{_qB6=9hlC@=$})!Z;DqsXn>H@dmy zQ_RJT20fwFqIC6bO1tG(#9so_aLABpD#pAh|#Ar)(-0E^1-t4z# z=x&)0x`OMI=e^hQ0}EDt$BQeX-a_wg@HJ*7c?hlI`OT}LWG7Rrzu5w5>n))@Ig=sJ zd6sTYeo_%?S{i=akS(w=W57RO@?e5TyY$*jTsL%>BJM7e;VDC{eGGCBB@RQy=Gvw+x zdWk8g1VbG2G0sa}Gy7V8?BqCJkd!R9j^1I7U?6PJGLHTtA~GlFcP1);nem4y_~|*) zr5b|ry$3fd^(LsyxNGgWe3$5T?4zAs3~^GjM3Zws$l**bROSpHPyYF1n+iPzhzh%3 z)&CTFz-AP+Z3^sQEZUc^y%!Pd!3edlXL}b?gmG)%-u8B+2&37);%zOPOJTF1h=I+e zzm*hbKAKZO_H>&*zl3x3xv0u8)Q|PExp4VU>0Y-gmcW1>n%2R0$B)Y+o2GzOe3eEjR8=8Zq5)+Y%+DA897O0I1EN>jw zT1G@>iKxRsdTnn84Pg>)-gcm>V~R50-AgvHmzEbaPMNa>%2TB9(lFW;o+(69L&#vuu9~16&gSs?1#Ddx=xzu7VqzRpnISpiRiV{;ESI4 zQ19yXRNLwsZ|fYbgm^j&WFL%k#XVCX4Ymc+j99W^bw=5W; z%fZn9Qq!mHm)7^!dJb0#3_PDWrkN3^Gv9I47bZ;$#^=Emt10IO2&l{sbB9FS>9_Z8 z^qpZ$-{Ijp(s>jZmIw)bIMT_z^D1Wc$Aj`}j|m=loVGZu>X5OaO+-P*n)~lEydErK zt$}mh_Q4fgmX3?$+O5SS#jE~zkdK~3XC&P8PozDHK@tbV?zTAC)ahc&A4 zO?Yn4uoB#ee_cFp;e6d%mt&XPn$*yWY;~W8*wBk`Jt9hx@qNqRJ?`Gc=%n4PZ)V5h zxrog@v1lAuwI0nfbVF{Zs)EDL@bXrkdgZMeZrwc2AT3*?>F3=id;Nt^}Xb*t}p?WHK*b1kBv^6p$vQ z5@r)}+;q&2G}wf_Hs=wl0D3o=hwB74w3_7bt;K9s%5Lju_2+Y&Fe zSp_-o`)vQJ0QnjrUGR_Kgm&bNiOgn|U^wPE10+LaKco2rt{{T zL>f16hE%uz>>e689`0-Zb`MJY*5H394}bZCSGR7$Y~>TM`#AGWEOgKWc7JJTbN4fV zt0BxEFMuv?;9PE}e|2;B#n86VK4)y6J<^o&-7wtTJqKWwCZFXt&mNK!gZp>?!|Co; zcABJo{x1u_7G?%c&_JZj01;!+acgszssOF3ocIUEy24FR+#R(^;-$m4_Tv0zbtGY8 z@l$iNdLO(!`Yty%kLn5+bwuDFl?PqDOP#ixRYu`Kodf$!fUPtxz~UsV7{F?vf&vK0 z0&?UU@HhYhdI%@t0FNCYpe?W-{y6{wa)OOu0FVVBpu=$Om*Bm;OBzL{@q?TZKtQ4Z zi-o}%3zI|v>q4?i009}pGaiJt9bA$P0CIW;H)c0U6d`@XVSO?-8*rUH;CBoVkP_fT z4&cHJ5KtZ*AJSXCK9YEZ>e>z$b5sJ)3PcVWTaZ-pAYF@xXMU8UR5cG#6;s5$|JB*uDg|hdFK#ED)T^ zFQ;NJ0r&+ob^5)N!Y)J%T0vOBBM|_^aqx^Cb7U8)Pf$I!T#KOEFTM};#vyfAFxnf5xIr5#LS!eMzv9BvsPv|)beBx$&1v5Ai){^j0@=5LX^_ zD%pUZy#m2mPFcxKx*!jK^vgi##r9)@wv=tmg|H=>d9(i9X^HM!3Y`1)XscY0@^>cI zn{d59(>F@gP;O7qF<+Ek*~;m?ou$jcQep5%!&subC&OlaC@a?F4iT3z``;OGnAk0iQM6tDwx+#p{1PZVs`r*KkF@ zu*ewL3i7=(FeRwpoES;?1u0H)7w?)Q=oZUJ3v+o~y@GF{IH##$)FcgY3NR6Qa*lP8>PfNS39HP_GFcW6b~tXW)KJ zyxJF=zT0Yj?;6|LVrO#_+%tGe?%JiA9?p6Zq#MfrxT0lif>UcD%>6zOLqKSfXh>1# zkeBCOcB*YQ&FwaogtKqy?uP%vQoPq~c4Pg?b#SN-m5UE`SWg+BEno1|m2v*j3Aozy zRQy9jPZ(xrRyg`_v7Fw=N_sH+jwqsX@$Ncgk$kx#G-bWl8T%J?0zy}$9(*)bSgPxN ze+wuGJrx;F^VInWSMCNkh`X69kUGHby1{%}d%i>JtvJbLa~{kqyDgdc zrE+u+Yg=+A+{v=Nxw>;zen~2RfFXSv=)|!xJxl)8) zvA{lS^g1+A$4=PpY~?u;iP?Mp94NB)fvY{`%R&aX=eni_&Ks*YEg*Y=aF}D+1H&0- zQz<#@-vS)>oWb9VFTK#`L`kcnc$bu)&%N1izdt+d51*Jr2G(Ba_^0O_*j3awJtl(*VOwEmaF<%%%Rr#kQik`bDOYQAsw!D;;XoN?o+z6JTA z#Kgwaffx4)9h-MPV`|Vcb4C890h_&$+8*&@-Izx<9s@q>j2`z2Jh{i`+PWP+go!Wn zw|&pO;;JF953^sWpR%J5%{_O`>Gg-+pS+%!y}N6!)?QQEEdrL3sbnVhe!MM&@$`J8 z=50!yN7vbwz(wtyeC#iP?;tm&S)YE*`eu)HYfWNYUV_x4*{9iDCrk^@!(m_G8Aa-X z!;9o+f7+;JtAfTPh__Kky$Nk{N#n%-mW&(l=rdf^L*IMOHM}wuf4ICd1r&BI? zyUeCEI^BwEN|*_JLd{ZsBsU8peo5e?Lu$-_v!pU$qnGJWR}KQ6d~r->Ch4IpNT{WKe28KXBrg z8?pxqTNHy_y)f8=m<`dncarQAmuEWuGX-aU#L;FLE|dA5sP6I_8)7h* z1iY2$l1l)_8FQB{DW*0+1~@191)+dAj}1P#*U~$umgv2cUr|vl+G||&@`dS z*df#~O#AvJ=+T3X7wAt_tLC|WcwRX>`gHsqpMl-HMCYPX8Q*e;>jJJJSJ{=M5UftGYFp)}ld*>^qJnOsiTj~0*x(eB4q}AGt z@{}+kqjJUs*-^Cj=qW=$Cfa&KGs$mwB=E`w=PN?#A)W_@38k)aU0#{=HH@o-7KL32 z+iG`-ld&L8b)w^56cY&1{W}#h-5#^M##tYW7ZKH(dn+fgYTGn6x2=?g3eLFf&+$B} zIPou4pJp;wIhX(9S#2}^)-DPi>yBiwiZyvJejW-Jsch+}Uc|%<_+Nc9+CG!6(=k@I z7uaCmlcDVQBj?*ZCQkQ&HyEOUHY$d-5P=m(6{nTe&j+iUbi<>#s; zm^-WCA$!zyjjqos>I!kFmeTs7nNz&)xh8U`E8Sba6EsqWp0C20SF3-yIcfSa&V(;E zGuvDBD5X(bjkZ(Z}F^)OHQX^Y1pE+ZQrm3UU@vsS%-Xl?eq3k z0qGUh5emQHjX92;6avUz-}C{=&&yxqlKMZHxg$E$ud}w6Z4Ye3cXqB|D|~kK7GETF zpE{bcK^v9_txd%HAoLI~kJ8%IF@vn$WL=$SK0R*b?PL?~yG=J&o2K)W|8Hb&x}+oP zFJ4AnGERsC#-v_=djoex0=|gf^s4t-dvw|_*0DXun!gSF=%i#R8LjC0@<`&nmE)|l zI6wL;4{0y6xP_pFPk9msPX@WnqCwZ!tS45&SYO3?Z7QBwf1%l_uuC!ge*sX!J1xcV ze?L9iYOhA!3V9ldD2(I)fLJ9cHzh-uA|f24nTU<{oV%aV7%Mofy+=K*{HZ72s#;5n)T+*e_zv@kUl;1GX5(>f-$`amRpy_G$9RI#1xMkrkd*VGr zSA>JJ$Yx*i4gygzZJ|JEfC^fO6u36@B3r9!__g2TcA;2-dew=HFKS3&U08OwC*|Vg z>n)UHGO=jG91?)8m)vqrD@y{wnt}TC8&0Nmx|N|+=6&_In7g70d<6WB6k?thG)LAHaVK_Mzrl5)Ebv+l(k{SXBxzzaY`=9x>{%*2;e;}ymLEUX-aNRN?OctZ4I0Cffk7m5Gw>wPZnnw37kSW}3#GsP)l?xAT9b+(8{d zhkHz*jxH%`jU4Qd_S5xj;qz|+t}C$sq$(QrBj*NXQZjapPb8!jz`_9yNh+8&fFP6Z zg*~JNXfb(gHwg314KkJj=e~UqHJpUAIt6kj0q1{m)nP&l=BnVh=xCD zaFEi&g5;}Xn^gJ$2td-305!r>-oMU`7n3zO0B(%~WKW~mCuOwy>jtToBuBzn0R{h~ zO{xaaQ{c_a5hD|D6LbYApOD?KTp%0(wQ7LVCRHYQQ~pI5u*3#{aV5nVdpr-l3?9Nk zKo_MXe?T4R*B3Uaq`}w#7wai{pLXq z;G49kDf4?^g#aH&^65q_KtV+4ze=k1O{!%urcyvf=(~>sJB;LUI>oq;5(7n&AF9wk zY7}&Ub;P$c=yTI{1ph%}Nn?Wui~BPf!5Y+3hF^q@%cycOL?kq{Upe_Zu_R0*jr zpliC1B26dBch_eh1p`Ik(fjG_KFS$T%5n4XnQR%m%%vHM2mCFHpf&$Qep^Ovyq65r>K#G%9FehA;1Tf3h? zuB@*eEpw3rfJ239UUUb8V5E~ZhqV#cFc*Iw9PK=dKCE}t;c6}&PCkW>cMfovxi5M? zd(Qm4?zN;a4$Q^ol9D57j^+~~U2mK8y)-K=uCHefUykH`{v`X(<*sV*skulZ>2_{wER=SuhpVjwnj$WOO6sGK5{8DhC3dA>^+_xg9uIW?^# zs;whhU?N6zgdOA;zBq^0O6F4KjiiRjA;M&-$p6FeUc}y=nsHEwfo6eA- zQ&_sBt2%fk>{*-ldN1wfNV6fKyeq+=&~1ZXMNLEeMjiW&VCKP7(l@IY5m!`r95lJ2beote?sM-p z7_;u-VoiGCJE*XGfT3tjW(?3LCL*XzC&h_ny=t_Tet zPiCm2+Pp8bpr_Z$73ENp`BO|l{cPm=a}2B)9Uq@S$`y)c?>Ti7%z zd$u8*?{o5KCzRY0OdF8I=wF}q?M7bcqJyr z4=xk3$?)Ck3f4&>**I#{;L(rq=19;>!Mzk0Y3|4h+c-n7ADYJDy$(iJBC**U9_m?= z0)pnojT<=*Ci;E$>s&2Ws|gk9Z5cM3@)7~KR@kzoG)CUE)imVHXMDq0#7j!aRQAgbBnh>N2YKrXVeKPbw4 z=|7r~LyYREq{6-F--99ro{~*PCQuF-i*Mm6ajG(9vEV5bREYO>WCHC7D5(3Ly;;u) zDu@6*60$e@7?d*UNV!5~%7Q>$Dqo&}lXc&wC(1lw7KijZ1t*r-AY%AzEd4Ii-Ygdl zcjV`sL6tWP13M2Z2@;k0LL9Xqh>TJ2>Ye-d0exF8Eab_CvOx!TWU+4B*Y#X(TXQ$L zczbezFbzu!A&fULEHft1$StCvAXdGj{uj_@@i*}=~8QL7Z9L+6RJ4%;u`+@@S14)nSK0?fBXB(n8z)(1(I z6Md}hiE}ovqT#o+H~YwtQ1Z;WZrvw*rbX;op*xRE&4^ZnvM<#hgP`rVWcr{O-O9%f zxy*q+;mEjgzf}irJY@@aQi?PurjmB&-iFt6G?9x@|6u<48+6ckPi4ml`~dy*=Om8{ z@$tfs&jjS$>exY2SK4P6lzJW2Lphme#4r;_oWmKb0%5Nv%6p+RvTkE zi_XG+_-b^*B3kLtiCK-}uvr0jbYwh}cUGUVn9SUBNyF#H`?V1o<&iL(mH;cgh3#>b zW)yTP%$xvio&C^AoJ6cvmp*^!W%dy!y1G;}F?2nrRGraE*m9s64O@7V^=jSOYK|e* z9B1V7J%0xspEo92v?dqA_35?ZIQyg<9J4dlq(NZT&`Qx)Sqht(&^t zdYHMR?S`un=725Txk(Ho?$UdlEUn{7eLVeGp<+gKj3W|#9-%R$z8aETbrQcke`sc{ z+jiB(Dj*Q&XB>b(e~s(EVgdN2xso;$)o|?v?+Dh=DbIHep{Lei}G{}N`xQqtHELSetx1CqNGlBEBIzzU1>vT5bP4NF`?*ses z)$sS3gp5i*c9XW?ghzSGcL! zZXJ2cbYwM#8=MvgefE-X0bi~ErLj}mo*MM3qv~(3UV(li-F|~!Z7_56vs47Tf^K#I z3^o{{hRx=28i+eMpxSW40vIl}iTay0FbSuTMhsd-GJok1yMEIz8QUPq3#V2Sj;^UD zRz37=_&S-Ex4kgEx!%wK4S?EAc$X=T*9#~fZ{P3{QJ#Ky!$z$KsXsP_sj!x*uiV~) zhJ~_!%T)py>`Qq-_bp}&TT(JWN>^xNXi-R8X?@}F!HrQ?j0nt7?G{(I{FAvSJmb|K zB7>g33%ZOQ;$EJ|tXm-^UBlCx*b?uQu}W=&@>&Iv*wZzK%LUi(b%he0ITb2i=C%7& zEJZuCU*Pt$V)3J5k$T7pPHB#yf@U{m9tFbq{*dQW3z?HqYs4u+A0Q?Uqm%%p>BN?`q>q^kVps%R^OCQhX|;NqJf@~!t(y)$fgB+WKB1Xg zY-+R-#g|x6@E$F|h>MdzBx0WtMmS$&O%w&*WvO)5=ALGolzskByL}Mi&3o zH0@h%kL`SS?rwZ9iB}wjSgat(>e9VV0!P(3V`V1cpI7dEMg884lqG0|LwiBDyW?d-jxn|k5I61PU6u?xP*%2;Y z7ad}^#z%Mb{Oa?M0Oj?tnKWz;=UHQec8X$!3>VL}?*=F|t0qKA9Uc%v%e6Fb*_!j) ztTa{A0NX`TA0pbGyupPFeBA(pw4Oz!Qs<2U`MHHkRo(=}7VmeQn9L%}9m^G46N2^A zW5jH~%bNCX9yZJK&vtrs-=Y|YU*~o8Egx0EI;xAK)nL2nTjyy&P2>l|t_{)#j(^lh z`8saOR+DtBggLO3?Ip~(@f+yZh19wEF=dG^r8HCMAATU$xsHYOM1u{Q64aRH$c<}= zkMmnj*G^6I>Z)*$D9GQ_nExs4)Z<;7jD}&hI?X#XUpW+marXQ*Qk$ilit>58swY)N zgBIJJ?b985u@i940-S3!n4M--o@Scys79F`!fPv$G_SRVM~_nlpIPf2`^R_)Jj?WP zQXA661P)+xR;!~s^u4Ps5o0g%g%-q^Y*IVKxcGiPb^gSt6(#d9!#XJWjfq37pmxbf z|4>cnA>SE3fen7z+b>1H$Z7T8i9HWVJ-((4f2fuj^Pux)!$uA3vQT~gug|Z zU4=98w|{}7OjC1zkQVZ-E%Z(|-;NE1oH=MaN-%g1>88Phpyp@qk^-6EyGX+7T||Mf z?HrWRmU0O)-!!&*XM8Q*-kn@l55jsdvvOKay~gjPx`OIQps%m*iY>@V56;v7 z4P)}3zYVipkxxeEH9x}FBj@P>}g$MIh4tH*@@!4#g*mE;BCjEEK z{EnP=9hH}apc^hJDQW%W`1r`{uxHoL?NaH>^;{Rx7dCH(AKvwrVV8FIe~5x^mrfP_ zq9As4zewCM_B%P2rV`w%PF*&XvNllH8RHPUwB}pGzOi^(0R^qYTqty}6W_gfkEd@7 zKqu$c3WaFXam>{S6Jqs1XC`-KZQq0Avo9fQYIRhSiQ&vR`?n^xB0!GEz_4y_d{EG2 zcY?UfF*iAw(s`U0@+{YSLsvBw+sh_ry4qO`3wp=}mZsS&lF1O!=igYTk-zi2OxthJ4c<>=6r%NngW zxC+S71Boy4Vj3BgH87i<&D@oft9ZYmuU*ubk{4M8hIt+efNjMPLlUo_xlo9dSZEmC zA!N*MKK3Ly1S|;uWQ83vc5#^s9k39o1p;G}XZ5_gmoxrY-EoY+gEL>R{i%@pyU;v; zs`|k1`|4X~Dukrzu5+G^)|Fp*9+S?;sU^$|j^}PzHo2`o(MZbSj_mO0wb9cH9}>99 zb*?ot&zRV|&a*sKE$O=^*N2K^Q|r8!+BG34Bynih$^50^sY&Ovl6IGoyrsVO$=1>@ zc2$FlC&=`z_>lImA!zl?M(=mM3`YDf-v9Zyx3~AqYs{=XyD=*k#~Z_P>%_90Llz6G zIjLe~8}z+0_*1GBL=?{)0DLfgMMWb8WZM3|H<8hcxbVa%Cz>U~BT!13ZKowRzfU36 zu@h}4RDJpCcgf*?WdwuQ!2QdY^?$`Fr&{TmiF7{dIbg4kVtr}d`!=PcrfC&!+e!#B zI&rCT9q@4?_-N@9T52h>pve74@idMrd#X_$P42IZs-If!&^0j_X(1XS&VE#m!o*bz zXO~(>+^oS>4omw8-w@<#u5C=eR#mb1-F;0CeSXn$B82v+HmB^cF@q~6w$^|j`9~l3 z$T|zYXn0Lx_^#}MXJe^{VqMeD^`}pR+H)9YjqVHaalO#tJE=Q0SGFrYKUGVi4n^K^ zTOS&kEh$i`Vhvpy?J-rG{Me0}0Xmer-H4tL$%92Pf=YIbiDwsE<|f~T(`TrApg!^I zWKVvdMFIZDpw&-EyK|X)uO`G-@xti16;`&F&)TTW_zC}Zu9NNkn#@~8-uG8cT$+@3 z5jRYUv|EQ6`7;Gv3nDy$a1o87S?@2}H8^a+4ety&EG!S9H7tWBd!%~%7iYAG{Y&bnZxMMHR%5VwtMuqJSn=+7QSsx1 zE{YEY`GD)5E($4-#(UN&cQNLmQf$74D05cJg^szMICm~DEx+nbQ9GMKtFMO5-oQkw2mP2jUl}6vQcXjs+x5|nTW`VfEv@AD1jfjGvN|o?ZGzj} znK`^#JkRg4+~0q{I{bae%rvzSf%Rw6*fe@S5&fBWE*h=Cz-sYlG)TC3Z(n9-y{Vl& zD7V=C8-rcX++?p)CsXSUS2xZ*8pDIh_JxW?^{(P)O3|m8vL1~SeqXA{X)g*ySdmqE zs&wVp1=_=9lnW$$*84i>8LjlChKdHZ2mZK5fkRPpJ~hkq4Hg4C8Y-fOFO&%J;FKgW zHpkyG(t8KeJqfbCJw{e)o--GE_??(i7Im>s)j4`6SHZSjFWBM*+9@)y zROc1h@svtnupdYSq}}>^3DtTbA^!GFU%U-u1LnZ}>~Akm=g#vtYWDvp7xjOBDg6hQ z0ks2Q`4uz|n?X(naGNe1T9Y}eNs#G^Hawbq3F$ub|I1GQFXnUp$HV7u2Ck$xA`l4B zJj$tp44As)K+OE)Qb*MtOhO+vbG_%Hr-pQ21bk7i@fGG?P`(DrfxBc7Rw&XR*&+z( zz5^ZvUnGZA0pX5KNdVGK1|Ed+l0yu@141hw7=7>{LXI4A1bF;n9@PmX=up#IRK!^2BQ z49~~5u+2p8gZ00+XOU-g)#eQhJJio3_L}n44_cs+`yg}8MexBdkK!(k1BetBq~DYd zd_u4~s%gCjxV6D2vmZ7aOIn@JT!mot5djnPXC1$9nuf_c+B$P_LV#QjX^D`%qe@Hm z9PFqo&_l6T<+ESuAlX11&;5Qo=Il)f3nx}CZ?EHb#axoCt>`*ra9t4{dfrgq)ggWL zyaJ7C+X#;Wzn-ppNc&czcYK*5e5_b|l<<{z^sH8~PGbhQP$kG#C=md+3RMSsmSiRUT27o)!uYeBSKt!bd+DE zH%I8k=-K9^Sv_%8pn8}k^1k2=n|GS)N{aIk3~Go~lD8txWm+$s))2RIJCTsZEb+@` zy}Hh#-^a<#)W(h$_0=c4JV+R()9N?qD}VY(Uy6*QLmJjye3tq7e3@(CD$879#TflPLKW{!!W7wa zur3{0YZ!b5bkEp&&p7tP6R=MvOY-N&&&d|=e#mV+BfToL_8~~Ts=vJ1;1Dt;RYw(- z?xL`I>0tNi_D%LPkD}Of<7+J?>K6r zC|M=(*6W&|r;>GP_{XOf9L2!s)II7%>u+2YGm^?{3=*pwdH<_4dCz7=VKsxyzwE!C zW-`MEtx`~_)v%9g)~?*JDtu$P*n9d8J2@jgONUhYoVBqMn!d#TO_kp{OKS^+(_I$k z#(mtji(Sw7%tsm!YvMo^ zR2pkf-t{|A6(YxLSa-J4P$*%|vx-*A8Up!l-$iv~-_=Ts79YH?0|fi|1b*1sD>xP8 z;(KsG{7(sO;3j!)c^I)oZAjth;Iih}Pj(TA;!h^4M@x5NT-Uf4QwOBOhK8TceWT#` zZx@Wj_wl~HEUbi`8#M()0j!sEo477BYKkAwQpbO%!jJn!@UPSmciO)KW34nSS8)CD zhmIYEcJtvpB~M)A2+!NY!^88$U|Bv-mDNMzHe*aqG;Dy;CaE!^FXNf6f^oPEkM@tEz%^*|C zji75pQ=^lWUiCXWXrJkhKVn`#h@Gb6#F4;{VWpW-hW2qah3RK9S|;mKz3^O^GbWvU zDOP~GP?fq^sm8c3VU zt-xesc(Stc)Vj%Jc3@PB!KyMNsImMCO+dw$)npPe95_R^QzV<~m)Ubt*=9Y|`N~BO zf=^=&8ka>4?QB^4nsqwEWFb0W13n|fGFg5s84-9fpP^#Din zWk@0&a!P;3seNd3tjU45rp?e(WM9Jbx9r+?p(?z{to6X=EwfNaZo<`Ryui$R{9t*# zS3z5;UF{#ZWGPuj%Z;`Etvf49#E{HSYUDp0>Zl6Mms8mnn7?0TLk<|_+@34qOYr(^ zJW%vAy|lo#>p8?3bRBvmfY31`WFQD4m#VI$s$AE|`d!IKuP0%SJ3KvGDQl91)dI7V zH~2mI<#~u>rEF5yxQ+jviB-4g>YmZ68gw62{G6~5JSpp2d66rUFEKW^*feAk7B(O8 zRdN#}>;!BS7!Xnv-xx9HDCRg?nXHV_3JH5UkfrM!Jg`M1&iva>UL?70(t-A~ER-(~*a)(lq085Oj$?{@)oK;9>PD1F>RSHzG*fWSpQ!-f^%`(N2Hj)Y7=< z_{%JGO|pKs52y`v$D8dCGau?GK8EIjpFw9XL-bo|)}`D0DA%5gxTu}85K_{&yM_5R z#8S@vk;HY(-gq}>^jj(=LmN|}YO9+3L_&i{YS81e^W%-x$r@Hi$nBrG*=$RtzhAB| z^G15`*u7@Ad`5khcn{|!tn$d{VKW`%dANOVfnAu6H+uS}Ph14=3wYNd_a@quC+HN! zAAwIf6L>yMiB<7jS5oO4rTc!rUQt32zessvP*XpttP?6#O%AbxMMJ^CX*r3aNt532 zngWjk>A(@Oj;k?GW=wEUWJ?6+E~^e_ZtD5=sHCFfyRc<1LJvsH{;BR;_2nF<6VGIG+M=KA zPFqXHN6puoYnQm=eDli&C#XvIrd!&tScwX!4U48`6hFJ?J5bTa{{kB z9hLm8)w@Ej8zDRT=sT0>f-FJ3JJLpvTALHX%Y=h|Rv5Bt*=j}6 z^`w>OEKgU^{NR{6iv$73Bgedr-qt6(YBnqjO)$l>zJy%FzAy-bGh2wT7hRCd{HWm+ zOtxlUM|G)UH(q(`vrN(Viu?=GH4*lYiE#1GkI{hM}>RV>L{+(O$c%U9CyXaZZ^w}bczBp>0(63+K>s8O+bj2LFvN8<)dg<0;?`XAYQhzeo^GK9>K# zFylX-ND<3UL2k?cRpJ6xLF2ZERYO--*Yv}KhBj^jJF7QM%WTwk_~#SFiNHsx%zHE~ zBd_Yy&^BY^Q$^<2aysn2pQ!yz-<^oVehry>=54ZOR<*#qadM4bHS36*e2t}!DpX_v zB1r4Jhs1V?sbQRJnoN(urG8Q&R#jMj-RkM6k!){p>tV%Hk4g`wl09$Zo?>17oYqd= zc0LBH^sR}w_OC*=RqdWmw88B)ALQF;5EN6UI!qcOt6W)$6v)lxUQ=49Z1t{m+oU3) z=Y;&`g!O}+)$t7P5o($tc>T2X8&(WwS_~hpm}uHsa$>a8?A#&fm|Pu|fD%?$Mn>Aa zll7==Cb5UFV^~1smXDD>!rf*5{7}CmCRz6VrXnN^Fj=7@?Dj^ti&Iu|cb{n38BGSu z@1b{etvf4sdziDT2m;5vb}4(nzI- zi0vE>iZlIXac~rRb?k`(RJvy-+q<-{TQ$VboyFqMM@3{TAjty5{-}dK5;4!#+xlhd z(_E0?U_w3Qv#r?)_~IIklDYhG&&qU*&&MnfcXB+cxpg6fdt%%EoyhmGBf}7p_(!SP zjcI)=i=6~1`u)=JBKe9-uDwb33kN7UKy{c(jpK{uqVqXUolDy3wj1X&ShJ)jg7=&X z2N`SJspp1da4(W?ms=0!i|IX5P>hd9jHOFj5r4xVQXL1xy4I{UgLvN0C!vG!;f8M;X(~n{4z+M@dKX+x#K+1c>P@KaRtDR%ZY9~| zn~I4an>}I5Xaf;Yvl5GDgs+_lJ3J0g5+tegH?PC6o6QsOVCU!N=2n1J z9dN_{(_#t-wl#sJN!Sjk21eF}VjTj~s?QPfgW_|GMXk9Zz<4Lealgc6BKGL$U}t5h z%SW|hYS|@Ls~=UA^@2Zf`N&ro$zG%D>wqInNK>ZJfa9A3qAY(`Q`Qh&RAgnF`bq&+ zFL3YbH*lx%zPkv=WD8R(yp#D^GLt`Sx`$M2??$cb&Z@l+y2fHgH!A0{f(W=c7|}Xq zG2@52CDd&zKsFBqr=2l19*A?(-tpkb8yMic&Gur?rZ7Q1+0{{@INXDo`%Xc+z%7=z zSx3g!^5GMxHMd?U^s4ocTy;8?cW;o*?cn=&_qr6l zE`qitoAP;FHXA^pdLZprI5gXD;rXuUp5v8Von7TYHZ^RVjZ-6(2>Xe#tLo6!vYQxv zoRFm+g}})i9_+uF3;q};oacIGa^gujqvUb9$PPyZfd1LEAtbBIc8#sStvr7M`>na4 zC%oMwlAxK&43sf!fM-`1d&A=5Bgy}!as6t&VZCXZ*l2_o^a6Yxl5VjD{r0%!i%z?dn;lufEg?4EBue8|?}4cFID zbca5=LrZCO1uNY`2lPTuD?)E(Z+!nbGrlh}5m#0$$(Xi^<`+sx>CpHhw9U$t#>9|6 z61+E^Mu=>Tj)z6=NiBfRHQpqSZZ_V7TV)Pcb{Ip0v~8T$lfS>GC!k?ha=orE%}22t zRftB0lUgHj15)>yb_n&JF5-z+*dn7np!yLFgpf9Ef?lmSHrRr z2ZZ8f3$lC7GSw$Z2qR!wDf3-k`FQcy%4xUlp>dWSumT8ff_5sCktCx7O@98}`0)EG zH{k4wGAo%Tt`C6?rE%dDW0rf551m%#ct$S5=@EmMuB%$%o3xb&68WwN-}R(|Ll0e? z`0$kXhR5Ch+8;uJ{UKwsU}#0GRaok|+sRco*!w;S3e_74P4(SSw~DWlw=i#KyY+&m zepYqw)IM=o?#z&$a`n1YUp{4)sxheRZTRGHg%JK#J5=ulQO^LaNR1$fmt~J@R`J$^ zp>|{mzwN6!#(f^U)JNfHac`{L?k+Q}+VY^FrIXba-<*)dv^J@H>+l(MR{cSOSHdkn z)KlM=QjLqB_TEIhZYt3-!hLlfS_~Av@&>Zcn&#fYIh_i$AOjWb;yvX1jTABjI-1b$ z@^Z&5tE0K?*LqOgzq{-GrRJIz?VhwXPK5-8j*}`KGbCpM_focL684YSn4gcZh+oZY zd+y70{;r+5cGuoor)`{c(P1*h^_CT12|Q);K;PFb8A|Bx+O_>Yg!yJYNBAx16d zlyocp>S01MPe;-REvr7$7!f~~y3uJZ?L7>=!AE&`>%wb+&H~rt3I;2FQD%zBvME8N z4j9n1RBRh|`JNP!Jv9SeZqKrGpWB6|aMpM(Ok1-() z3kqnzo+%E1rD?SuXsKEM6X zbXCswK3F+EP|+;WAsFXr9hWoB1@3}RN2L$W6us?Y(*84*$y1zA2xRPWzqkLL0=4QY^R!X;kZgW(ftX?^ zal`YFl95{Z0#A;Qfp@y4XD!}OWU@E@uQa|v(Il~b`Net28bB_K+OgdP zIT;BFR!mMU#`JOTQJY?mFEj;q#85!G`SB_2sUsYTx5V1?)#%QEFlQza8W0HEJqe46 z5GniIilmy|QHi`F%zG;tB3kx*<%2NM5E_W?94@lczI!j%)>&gjAxZT3&-%OA6j5#?wRZ9@xNC|6^%{%hwEcy_q zyRa^aH13_u;$Alt{zOsN`s93x&Fo)!e~%m1=~afG9YB`ENwA1GwA(ci6N?x6%?JC= zd@g(nhS}BoS(mVXyPxn}=%)G{s=XwAw&V{tCRl=9>kextK({soUH}75-!eyaK$SpiSTS*7{hFT=`)K? zETS@U-Fl|4gh*X8qtWMN+ZiJ(otC`!Y_2_HkeG1bL^z}!Nt|ph6|0}-%d~0l*q$v! z_*IFS+KwGwhgIo8f47&cnPq$zx3fv_807h>FGVkQ)#8);eW1!<{XWc|ShKe-y2v8D z{XJ(1nj`AyHj`0eA~ROm8#fjsiq08Tl8UpEGEW{ZVuq)MCT!|1G+gLr2w6^`&{)dnn&4$R?8jqhCpSi~)+G#HH#bZHGmshIh({EFyiVH32^rjghfW;#o0FCP+e`;6V@CSKx$swHdkKtbngS6(Pp1}D6B3}l;g|8O#W^fN(3zYm%1 z{5zy{12S_Scq_=tN3{*NNcA1CTZv!u>yS05JV4yKFPP*quy_qW&FU~h*2D}b{oLA1 zS!ko(6Xq<->|H;vZl|{|;ycw#ur@-oc6Ts;>fin3=wAeQ&F;<>+~6NScHy}uD>}4R z(m3u=CA3h!5>aoa2(#AUmDSAjDX%~+it8eu@N4U)mh*>VJtDz16#gYUIoB_`owTm6 z+riY-a$I%}32jrtR;{#>hQ5xKMQ4mGBAlcyySo-Qw6SDo=4m5b>>4}EipsSOB>8l` zyO+}k)SH-0g-8frw{_j-5!_PuFjRp9f8^`tfs;6zUdw9~S~aQNOPjfIFu|Q<*GmUTMLFj zkYG%&x_D|j>cxI(IFY=0F#`Ab+j?prQ(7jClw|3Ut46wnkByaZjJ+XoBQ&oa-uz^6 z+Q?HF8e^a!>q!F>zHs)deonI*)_x<}*PyA^O+taBI>O}qMP(L|=W`S2XAr_L?h#Wuc?Do@4o=-wf{&FcSR2+oAgCQ}4YNvT{O z?Y1(C6uI2zf^iO=d+ztD+PyRC4b52Il*#m3O@a--Ab3##tAg(@-m!v;bf17z$)>br4$+C^1M@b-!QZ5PxYPzWbf* zd!OK^sUVxR(a=xQ_u(E@h9^ghMBkcKn+uSKZ%JZ3R;_v#`WN0Xh zgh=4`zM_b~4NB1fv7eFkuaY3BBrnMG_YK)FEX3SG{2wLYC?uh<_-`=y!PNTr{Og7g zfDlJ-Mg61n&Yrx#-{|j0Pmu#(67`&a4GKU5Rv*}3sz`1ugh7h)A5}I$^$+4-rAY8` zoEoZsKL`tka@))7-<$wY$M8byze?r6Bk01vVi4HuaVBscFjD{o{qO-C1C+s^&fzI} zIWU>wAVMG*_7-kT3RnjAU_ed6)f`(u5oJ$q9jUefo&t*yxpkPD3aJQ|V2TpY0jQ7* zfVP60s%(Ok1LW$ zB0+l0gTbnUg##9ydlgq?26!9>pd%5jsbIMR)y3h4Az0m00pR+MG3mVn zDyIQgL5&Dyc2Z>pi2zz<9%=B+B&)XPb{QhLwYH zMZ&=Z^TTwHhJEPaIa`F*W zMPQDA?I6IO3;G9i;uSbw@|1Y6cL8ClIx5O*7<)EAViyp;2gh3j619NZYm+)rK;jw{ zIprHc^%S%_3&uD>nMYp(gy92YyuyXCX9Df~fzIHgNnI7FoD9@tY!Os+;C-lT*dH`p z&}s1fAMS%xyUINB;PWnmMH(BS_5sW;&{tDtU&ma;c$5EF1|&Y1-17#n-l0cMor|E# z0_{q{3QCJqdxZuy3{X&sSd)8b@Dkv0^CJ*AO@WilUK&r?c@5J|nqM>(%>G;}N(Q`J zAK0z;xuBlcKn-R*C10f4d?=tHih_PqhJStv-ogat_ev|O8Z33-85D4b3yPBiD?qc3 zN|OV##sX^0flmRWmZBZWNAe`K-gA*MM&04IoJaGA$q*O zC|hlFhX*Ss)JoYq8>wAteQ$9#5(q_M3h)3@-xYy9P#qrNy`gLmByIpZ%e@N^c)USn zFLu4clN&1s934pq!V(Ae2VW~C=|JG!BEiExaX}?k5;Wp{4Mw?)J`R~4_S}@dbu#)o z%kG*#IFxiaFmi%fbCGs!H4eu4j6{8)@OSvBTSxXIS12w*?Nj#q;8or8H;Enus<~fg zWVgXC^9xs-IuDwxZ$;ik_Uh+KN@0?KpG`_^70XYp+ur z(#m;(x~lL1uc8DE#vGSSXAPIe4+6)#*;5@EzlU;U#pm`Gb58WTBhr}dJoMt4JsWleD!HKW!^xbJL=N_ln8rZt z+==hogBMq_vGP{KGOIh11v3ZB{9Wd3``vVRx+|_iEd`qf#U!0-e2+$&jksUFb6Gzd zL4`Oz0BO;%0?1h@vSv4r=!1rCyUwyE@`v&gpNG9>SBk-9!FR>zfqx`rJ4~$skyBH_ z?VueHP}tuYk)6#enhLN{H0Ewq)V8jc|=0^BRRme>|`8U@nQ7Ub(h*xN9C=jB7>g`)||Lz0ubfxS&h?|d3W zV(E|{vJ0PF!<=XAtMj0n?*H)E%791X>ex_cvUpNvhm_yS$Xaed>!H@@uRD8OeY>xT z9(1O82NDZk{33sVy@_0jOM=C$N6CL@zEc{g0{f`efZKc>$jGDZy1O%dpPT@-s_IJl zHrp5~w)4nL(5k}G@Yv+1S^V!uQYvwY#@qBvOj2ie546L|T?TYxKV$JV zy^hty3+S4NE^S=9RJ!fqeiVK(GB8ksy@mKzM7pD=3$Le5beW@tkSg8}2!QY7b7+q_e`UoE-QR8=`++oDd-ZFRDt1j+Gz8tyK(1 zcwjIgQqfjwXKYvYZ5$`G;;%0{Zo3yNkBMcdX148op*{^V)W>*aCWkETHhBNIn%PzH z%KrE7v#~PZ1n&*(wGMMaa6d9bjv}&o^ZHdJd@jRK$rUA5TAq*d&J3ky`DW!-HL)Mi zb&Yru&p3_y)@(`fXjM`=JZ`0OsAB`*tI^`?7g!FT{WPlN&1*L*lVD4YS&*zb{?(9s zx3gjlpYNqho2}aZ@}+~x(C&0#6nwYF$j?uqbeYR$J)xlNqZr$W)}3Q>_&w`5oto&8 zPV|G4_t}w`H@9?fg98QV$)>I2lCHTYnp^gB?cq%IelktnBA(bd ztoJ6*M&=as)kWvcx&wtx!jPa}K`ojnX$$3w^m*M7k%u*3qJ3xHxia6lQjL7vUq@JS zhqX^j1&+{`gr`A~p7!gI=~uEYP8YghiYrZ>0w?)8kKGIyRVCfOrA#_K)*Ux78ET8m z9$MtaL|vBMmY#Z`=Uv+f{3)AG5|QRvwX-UO(85mG*cveh zLxX*@g!VrwdKX}eCUdc@s#$_?zu(5?vslNbR2p=qR$=jTGinjZ#RU}@ zTA+IW%lvqFV1&3iI6eyg6w!zp6=BJp29Ln8>?6C`9P4{Q%xjx?wL@H0!jM2vL}b`Y z5v6ipJ{_e9SqgShmfEoLefH8DI4VzNbex`*ePPX3s4o_-)|tzU{1j8LDoBScpY^Ko zu!?1(zdJ43#+fkm_+2W`s<&F<=lOEQPL~8997f|4MMSGI3JPS5+&79;ZdUFRt5yqw zK{I2TgU7ke8XK&HXDrR4!Xl5B%nT$MCk39#1QY}OFt9h?7G6I{{D+RxnXL@@;7x7= z(By#q#na73>bgpe{WMGaU_#D%d6H~3i{}y@(*4|C%DJmyAfQ3_nMbJc$~n{-kKF?e z4hdjX$D_={H8^-c2$NX*I*L}1tbXcRO|V~P|{!3qRf0Nh|HU^CZTLrmVqZz{4H_ zHf#=bmxMO1X@S|F1|$IVM6DI}5D*3#4k#I0U=Put2%H3Cz&o&@2$1F4pojv+8+gjc zv(RBh&>sLR76WxvKoMX_i(3JI4kr^@1J_iwC5a1X+@o|<;#mjo%z&q=tYCn&p`ajm zfLy!@xqiJViCY2z0RluLlED^3YuE#zF0#bdajy7=Uag@8l)G7H5$fPZF)G?^}a3s~@P z;L{P{Z%&LH7}0O{W$;x0egS6x+XI=;b|l_djU?vxb0u~2x1zL_c)Ecs#uuqEA8K;% z3rk5i3!d^1_!Ve!@4l48WnF+cxiJgf=fHGdq$md(eJZSi%x9MfDw=S$PZz*1C7I8_ zublMj|KRP-!=e1b|M4L$(}J2x3sPxDgc`|^#zcrPQm8C#CPK1i9ZPv9DP$}S!w?}t zA#0YRViY1o_N^Krjn zb@d7dS31Nv$<-?WOc|dh#_d?WQi3Zmc@ZmpA42gEmm&Qilb;R+ieeD6NJI$v5v;WL zhoIqnh)-%I?#w}Yt(2dXBOuxLR^ZC_(`&;kDVSIg+Nwg_&t2)8v#WM(8*DXBOrU3= zyI=^AVRZ^I4@O{lzq67Dd@Vjvl{jNE9;uL`AGqgf5{*#$e--4p2BtB=rKJP_3A4GaVM5cE-4>fh|a z^oe3357LpZ$|A_)<8w3RA;D7k9(}NtI1deMwuOB&&ovPSDYXX!!^4au0DEv!Sr6K(zzaFoQcv=Q6MD71Jop1GDHjc;MgRhD{mz-u#s<0VLwyR2liH?7{mgw z7)<71>9q$cdBkA%XxKwDh2qZQximo}5;td6h!E_RgFWgnmA<@$hdzL$Vrviwp)2FD z0Or9%QY3gy2>K~LcGFMTU?DMSpi^K?jhxEp@1a4_$Kl)Mg%8hbLIYmM9ysLu1&tfq zFrgu_dZoVsH@1C31G?`i)cOi275+IoE;M|And{JG&q(vlZSmd;+#$w29Fn z=Es;R7m8w#&7cU=SabE!3B=mbLrXo2%{~Wu1`Hmc)~cbOU=NlDJ9*G-_QI9E_cXAj z6tI_E?V-u6CQbi0A`BD=t$4r8;+hLEovnBpR_3{;apgS^BnaxV;Q zxFm42WzS9Z*zNZ=N~~4?yPfe-=laXoNpHT-#*eu-CYM-Ncdak{C(p^K`-0th>~Y}7 z`wo01=bfg3FNurAW8qEUiZL((7M(q@S_r>H=h`WRN(p)NkigY`UIiYt{64tF7nnPm z-UFwU?+GnGZI1+>0bo)|%mpT%+K0_(lNG6ynid1=wk3PBkN`#W1lzw~+i1 zVXU%%sC>YI;eDmG?cYa0%8*<^{r3?Bu=g%#{r>MGpl(RkX$BIY?9+4<1gj=J)to}a zvv`1fgG=10h#ZR6o_Mtm;@f+>nG9km$})*%SYiX|7Z3xzb3kN%9`=>;&H;9rd+G>( z(dLLqBOwB$Co~wwmu)3$WV~)pWcE6kh~RLz|7dA^g*cJJujqi-9VQda}d? zkq=StKflDHF2FM&zChhVZonHk;tv4cNIci2LprF=^DLfgz(C{O4{;ZH?6^^U5Z~`M zWB{HE)ICU;7meqt05YThN$CFVp*HI00l1^4Gr(PegC7tUuah70=0df@Q(;bjQ2-KAP))BBez7MR z(IkY)JPnU@AfO%vgcBg|cBE89_wi58*G6-#hHVY{PXJplC;II)o$ z&q6Fm;3(%tEkhfnw*kiv#DN@>`oB_o5Cg8SggZo)!uZtZ$Dx2)26Yh_eQF^xrY@fy zH>m`gq7Oo&p(*d6RQ6d&0Bkk2LXZFl39dqdcaT5`swIoTp&Ws@`G*=L2xxu-K?os> z;|CBv1&^|!kA0z0Ks~|r2q^?l;pTrt-~t*M9ucT0DGFo}x_P|UK?@#%%ruaB8u}0D zA5h)U0AQm)mBVYa_=5DxB}nnqR#|}L2w8SQj{-9k>LbLB1p0JTIdmXGXte>zo3qa_ zQ2b#K+4Kp4OGFeRb3%knk|Hq6!B^*f4a!UfNj^|!I0!gEhu2xV4N~lav0@PgS=K=o zh>1x;L6-H91;Ryo>i`SglQxMs6Y(}gB6QW6iK(e~`Xa0Z4M-*dG?Ods73gKkMinA^=~|YupR2M0GyN$jB)1Tz;}V5_{qRB!BV@BNB5P_>h@!cb5_x z0(HKDeAjMKfS5Y&Fb1^_c#q89X|3ak@{evuK|k@Uy+E#baE@GWfLQ9-aCjw&c@bg2 z7SZuW;j7GI$te(d^DNLd{pAfqp22{51FAy*Elqy_#QIa!stl3@CHikL72PaA!$T6l5`Y?V-Iwfs*O0!hv*t& zvZWLa%J5Sf@~{R51*#v&wl)G|7Kw?FhTBGhrk=S3WmW=F5b{BHQurc}hoCuu+?%VN zt%+0!Mxr-DfE6i#v_uoe-*%wPa(-_Lb+-q#5{vNl#fIQ_576LqL#>60;0IFBanX5)RLJkAA@cou`&~rjgjB-!(0Ur!0XHCBk_m&_7GuZVDy56m z;64O1C3f@V8Rj8{=tF&spb)jwNPTWWkwA!5YNCu({Rv$00Y#ML9x>>HIWhQ>foyCd z@-XdmKWJ~13k>&-RLC>3o$qa`%xXtfgHBjk?W9N>}Fp>LmY zL((ZjZ>K=ia8FfZfC)SG9qdEgKC6|bp9XD_RWfaE4%#5=0#|(txU!6f_ za1^W_5VkJymq;D?AzJQ;7;R(p0Iit~5<#;K1mIsa#vv(zZ-%5Es?qzH0c?z@N5C>K z9jdXRsb-YhJFj@*=@wv-zcDuz&FNw!Z&j+}+VHIc4Vu^2VPpYj8AMqEm)CuTm3Oy} zkhbYcqSgzeQza5YlAbAR_#>WSEW+`(OYIlToC%+%#!=-EeO4P{mHSnX!V&Oe_S_`n zWmosL-V>o3Hy;u;F8YaZ#coNLN2tCG7CEN7m4Me9QKS}+@wb2d#CbZdsTFNvkWPkb zOg@}3gwQhLcHB>OvC)JX4>bs8#2{@7)i7j(uHS~^kHHTjoK~QgZ$P*(U?9p?XOOP3 z5*&af>GLDZBTf1k@G)hpGDv4xz6ev+?rxs$^VqKg;2;yx5!prgC=Mh}Ys%4QUTB>K z=BG0_9g)qHj}XC_WC1dDv1T9>8YF_EeB2K*W6RM$bm+D~lO+P|cKR2v3?KvA0A&`1 zq7mk%8$kPYn4-KDl9C|lS-LJ%Ph_JVcf)z?b|51|s&5jCMo6N5kQpS^P&7gkbrIOc zfYg}}D)2*#LD~_D4uS+1&oN5p;Vw}6{Lo;OR)wN7AOTFJr7-IuY)dd{mRiAGNN^Ft zE}slVhanPF7^Ry+(fWu4WpK%cqIV!^NsLmUXKsL>?{yfZz>XCL+1^7DDxv88&>cvG zVl>N_1RZ;KE6(;LMVSDN0JWDFf!{9%{8F$`Xgy2k|9`T6vHq9!>!1H+{ep$jf2?1} z{)hD|cJu$Re(8GuKUlwF^D>zGzUbSA;C-%kcm^FmdruFoWU9bMW~UcGk~OzHj##7_*?0;^0(rW<=L~L#91e9zr&bIb-2@1 zDz$Rki`k{6vKo3{zKC$(dF&h3)cDrJm>YA8^JFFSNQll1GbEZLx|5=OhLZqmXZypL zpD)QTbg`u9a=QMdN)AKyS}2O-^qlJq<-~KhD-@?N_Mjwk+6RFMX$oIA8Zi z?qRzUGG{!$fYmh~AnrDBukdfizP|U#p4BXob@x~%&r$m;0t*3npD0Ir&nC;Vuh>gF ztN*61<@W^-oERoge}G62WIi$Qdyr%5U-%bU2O^6)z71`-LcHAHq2;F;yA-*0<@NCp zfw7ElBXvlB{EobP=i|sd?TjbPrE_^g+`5eV zf&plRVK&v%@Z+#``iFp_HM%<>)Ho2DiQQm6xrwDmd?X(_VZcOfUS%{Zctp*#atcV4 z&dIGry?lfL-oe;yr{~SAUf&!FEMxW@>^W%av&MG4KzTjPIQsrh)CKlEBO{E*&Zcm| zNEB=Ud`HAE$-1wJmCuB)XlS8oWs;n}!>YC6vAw6^&8=&7cO1kyo}s_#ZJJT~QeMwj z**MgiP(6;}+t1|!OBc5|t+TU3Gc|v{+#GTmxiB}Y@y}l#Sh^_-gra%$jF<_`JKUtx zIz!!)w-*y(q4e?9--UFy#svM24zJh6OY<|#!P|+L)@-7jbs(#UxYF0&^EF&V;J_@uE|v0|c8CHslppwZsK)*T=!v_(dCn2Xbs zEfsT=!Ta{)hnn({Tr(*{dZ=s>NpQsA*PO|Z(yQa^Qz;Yee@IY**Wr0}R>pFK%(cw< z;7rUpL+q!;srmk2k4n@`>w z#>~?T`U&OLBow@w!M88uP(|m8-!wqJM(@>xe?B%TUXd`2}+-|=3GxTEIC#5 z`nJoH`h=_OXG;UL3!)qO1Fdvs)z_4ihE~@7iM!J*q4p5BBQPRAW`9l2%qXcCHPYLZsC zFmaFR;-uHsCONZw;!W*n(}v!`OGZ^!bQt-LWmuWdWW1!;8#%?tnH~q!`(zZtb8}hm zt#3|0r3cI|&e!y&dFEn&w)hSX$@BTiXMddgwHBxcW%qbZ@h;=%pN1@bQk?(u=UZmD zjMmq`S<~uy8@-AS(_ZFEU6PIw6EJsjj?NHp;_m z`R&v|P{Ur+UPYXl#8Cs92sO2%oX03=w}#R%Mb6Bidgj7>)YT6qC(0i_f*Q zI8UprFBz0)%GIez?6_B?XtdSQ0Z>Qj{-}>*W0G~&{<`RX(|il-(CCk z5|{G}LMdg#4{L=E?x4#FnMQxDKxZ7a*UP}%39fiDcvHmycP=RX;s{II?^lk~v(IT| zhnkYQSS}i4fZbEF`7p`{%!rp8*Bdt)Fn@+duq)iJgS7N0f0NW`pS>I!E-#f zY2-yv`mdUxd7;HS(}oL~6g+jTyX{LYjH0Py2mSi81h+MFriZF-XwZA2+Oh3Ag=Iha z1|KXnr{#v%kLsYPc>8UYaqb&~>gtc&^<8F3XEpT3NFD!;tLJ*!{a`OSFnoJ!Fh%AF zvy-?ulh*&@NWZJ&<-)AC#qf49TDR!+sH{h68HxQ&napR;RouSIV+2F; zUs41%3i#8K6V)e;a!uvkY?Z2IsL~M%Ob2{CS_?uU=zB##SzTrn3oTH`aWPUGUGkw)qWvb-H$V zjx<@!aL+Vk+-i@aIPn^h?=z{R{^Y904O(tP)$r-ARZx6x(|6_1trNbO&`N4?ak28k zQ;(_3euXsZiNJ~ohqGUm7K;Y`js`C08$WH<+J+HqOIFz%m5(!5x%7^D3#R6=gD*5Z z+4aTkoCLa6MpQFD5bEYW28IU_oh!ZBQZ4-pQjdVW0xU?i{~&3;KZL?OVB2io?Q6=e z$9)s5mCM@S1KYu949+ouN%VOH%jIt0;sH!D4K^SLbq0coJ}t0fU)Sv`Jcvp5TiNdp zgc5!Bz|BOMp6YO8u-YwsLWr~C@U+8j=+ZVETZdT+rWs@#JAiQsg**WSpBi;$=>@n6 z+sCCua;FWfwvPjAMlH@3X3$cwmMp&@16dEax4L=y*yvDLa#x7zT>FRUqX)TQ7x20s zXA3(+#F`WZtQm1y-cg`Ay1tx)ViE>0Kr8%X7*1Bs&j;l;abE+jQXvjcAZ%Y52X(GN z%b?lb^|1W-jtf?ypXvH4)ouSl1MG7>b+}-#VdTLA5$H~H#Axtg@ISjw>zgy-_Jh^V z;?%EY0_;z=HNhXUSSs52=B~wUCCkYECNv|Cd@Vui#XN_@@gQ#d$FedcefB9gZV)po zMtiu~pL@rIN}`14t5R2vZ}nk}^7DHU+@LXQE&Zjp8MZ%M__bJ4z30M=LmY$}Kprso z)NY{Gt$`g1i-&Lqc8Y9`U!mwOXb)_bwZG%iU^<2wi+{Aoz5T4cQhEbzz^6O z=R(n25z{UrkVd@y0KfkL*rk%-DxB!AL+5{n=Sl~@Ydm!u?0Uc?1NPfkJC5i97>+ES zJlLkqvo(&ed_lSB!U9`rfCeKBh8AMvDXPUyUBI46(DJ@tj&?l5`~i&uajb2)S^=sa z3?7gm1`?dv8h2nrG`4%)V<_n$RS)Qob+fKxybnM&O(rsSLNNu1v3DSX=!$H}H#QVK2DHTs0mOW0KS);uW4Z$;;Rr=}ug4Rg^_(&ff&%yOXP% z=TZ_m{Q|bU*H;Q*f&Q65PS?ejgUDR4XsRN1tVdvw0&{UGFV!9@0$LRIl_=;b7(T`< z9v3j_WDd{-S-yGDZD7&?MRj}i0uA<^SaOaSrdtn7ItAqDPl%WGR%E9@b&pcpp2P@V_ ztBh3%BA~8AoAkiybEUg%Gi=#WxfX~~<~uT;#6d>knNg782)3P%KokG88*FI%!O{-q z9B+43q)v`3o(-CiEuaC!4}kax@DuUBL}UQDE6gE^2}@AHGf7(Bh$Dy*iL?qPialUy zQ*S^VKai<*9i&Uk`8VAy=%!WY(lKHUVzO{i--7sIO5xQZ65sc=LCWJ`tS8+Bk*mHX z*wliV1SRcW3`K8&%`6Qb72^;u4e|)8sPB^a?<3HCx&ej%J<o4=ti7` zPDFCCVdym+MqF9zz)^z%%j)@8P;&Mc^*zAV{;y~0&U4TO$6>Th6@V$t_XL>d2EhA~ zaxWc30=p<;^VThlM;ZY3XvC3(nm~#Mb2{QkI@W`D(GXu1yxEUuSNvkDj-&(4K$)ey z7kURwbv#*Z8yh43!vY{34-dd4x9VVm?46Ty3;ln0pa{SF`^^c@9X6}Gd!+c-?3U-c z5w%#f`@qDpb-Nyc_lSlmK!F}Z0kt}^LO{I-rwph+lnc;D!Quc8k5ahbKyM+2l~TlQ z0_35{ssIH{>Zm%%pa`bo;r~0~Uu5{chC8;>pmxlVAF$62CW8#H3YG4MyuhK3*a}d( zF?cQl3`Wv*sIVXiKLkDmnD$ZIp*EnSP`co2gSj3>a)ktlQ)}&47>G!gQhgYMV&E<* zJp?NxP)#V{heEUxijkm&0)>RKg}k7PP^E})UMLleA1Jx4@W2~{f_K0hzFS1kpuiT3 z`UDP@3jE#^T(Ja@H4ih&82A!UC!xkNAT$9SDWF|YKS4OqW1+yX!Q}$)+#3w=2v|BK&h}(Kn=h&kxYYeG0Wj-Djt+x)CZC9z=|2gl1|Il6Ykmq<~l0YA7eo|I`K zi?8$G9azgN*9@DPA?G)7K6igEie~y6ISI}t0#VoAnfcSF? zk*xubi}E6p5*B1A52%i}7;*)apop!?HscaYmynAz7|gbaBh1#6iPVk0 zy0fs=t3j@i2KqryZALEU;Gz%`KF8DRa7M#B5$#fXjAf?5JR%S75ez)?#y7tpCqlY@ zn9(Ef8ud6Bc!)2%;)Ry|060fVk$FE!YhtPu@m2(Yr$mJ=*kBHGAu1i5Esyc?Nx7iy zS6pA(te(f7h)V)-Y&RaJ4Y?r_5byS-I4#Y=xD|&Dh&Y<0#j?yJUuly&SRY| zGRdG^NOs*kvwrw|@=e4cl=lG69VKdwG-@HG*+9w~b-Ln9AEY?M5zgv49|v;;3UP)P zUBK3CT5(epf}%(F@|I;^AGl z0+pR}sKy=35t^mpW^XJA0UyeR=oJSSD5}_ltb|+dR-}Ml17qSJLo!k;;QWi-#1ehH z2Wd9qK>vFI`xe1U6d@Wn_u#&E`hy$h+IxAa^p55O_bX~0eggOc<%I%c8lqe4DI1tM};wt&|c*RF{ z0(>=bT5h^;5Scq5AvdKi2t&HtTjS4PvKE!>_Y8In^x%7J4voo#Eiy^}34^qm<*Nmj2ZQ9_1NH;e}qX6#K#>0C(ib6o1ii1Xi&a#CpE9s_yMR>$lfEwOt)$^68GeF~@U9;Ay(;|NZ^xZ+9{c_r{M-v^ zb+JE4@N);Gg`bE7@adB;V1KB=Pk-=Qtk)P5q$t1hq|QGrl}wO+Lz#2$SIT(39(Fje z;9oeywDnV3<}6N&qZ;OZH_c3_COLh8&w9$iQ{WTq*I`WvJ7}C3<^_0l-ovi`FKo>Q zR%s|t5K zU`f7DOS4l&Q!i`p$$3~RUWr~?Ii{)8dF)CXU4OU1>zdq#+a|gp_&ELQ(QehO2eZ6S zx8hGe7dEZzkSq&r8&LxHRCc~Yx~Kuu{k6DJ?jH`YO{&ss z*HAwWxy`3~(fYF*uE}p!Jp_hkjHZpl@GRA5`L=^aMz8s^ zRs60``mjnhexWr$BX&vN5j5(zUC?H>YVNAOfMtJO{KvI!f^jS5uJ?snWj zt67UvygBn?te2nbxQ-7I$~fJYZsHVJGi!Nl0Fv^Cq%fFWkt)RkG$0hM)zmEn?j3{i>h6h%{U| zu9z|vR{l!*<%sCs?l*Ubb}c48YH7jI=f#)XzA_s|eo;?7j>#W%;L8oacSS%jUh7)r zoJlK=%B$CZb9+*o-K{349qDybn_X`4-?S2;M4CGvnSo^?uP*kDD$|zPEW7614&fiB z&M)?Di?Fr+mhw@;EYd^HuuaZhwy5Wp$9N#64IG=GtAN5Zmq%etq03E4(N(9(=}t)$ z^WC+)!6lx0`6dVAC6486kd=-I4eukEuGD4FAWZJ)84zOdYF{n0vpnoW+;vm5HFw!u zZfmjAWngf9L#C6!G7@IwBoyWl=3pUkcRB3Bk*VRA0v+Fm0;=gM)nlK&yDOP_-i;-0 z#lfUh3UsRLUlX6)wYq=coa;FqW~*I5Pg_(%TTij`B>P2Bd(bRP@bM9fvvbkx(g@fW zUubm-Qk5_J-z?M%s=8F26cg|XlY3F8Rs7mat&I7QFwyhWxJ|fQ)V2UAS--U0!S3}= zPNyO;XY$zkD#tgSPS2>9mGh&K0Fj*1R8Y8VP5Wa?D5oM(VumsZO zbfrNSE%#41SNpdd{n@3*7zEViTnndKPtC09?eRkZSxNGWgTO4E+0sIo@%;JdwcQ%0Q zV4C&G1+5gm>fr;=IbuS+z~ZrtC8OMeU1b;48MQbH_Y^3xCy1*P@ksx;H9-5SlyW}J z+@@jbOU7u|)z+O)R6didzxXBDfYla!d^FW%m)$l2-yZ23 zqvKQhc<@L~iRTc;B4ov@H!?Of&s3>oIgbjZ?y;DRKkE5$=i$1CN}g0XC5zynYUiU> zU}ZtEr(Ms`3Hr{IlcEku3S*ypPBl5xYgD#GbqG_rkH4NfVHD<-njQpxO&|PVvnBm~ zu7a7Jr$C|G>>5Sbfz}czb??jP3y&Zwxl7R@RZ4lN>q(+;&bW=v&UlqV$|1MjWQ5)p^dYQ<)W=KL$uE3bcyb}o`RD;9_H7#8RqvfS`Bc`81{+Tq zm|FCAl&(o~x)Y|c#530+dve-DhgO}?{C#p5Et#4jZ61ab_JA-yVpu zG8%K2=|~)s7?YpnZ7f?m4hTNdIb*Mn7deb2Jxb(-?+>^h2 zRif@y*awRy#q)ks>UVb_iubp&c^iN0(mfcBs_vnc6O9FiOW7swbM^Z?Zgu3jH**Nh z6T}jKbWY51Je4{$ZCs_#y4=6;S5qR-%dK-EopnmaI>b}0+<(*gi69HZB#qbP?qdS# z21lL!UmDzX&K&P4XprvxI!W|voF15IKXu%${^YJuLfm1N*-^odo=ee(aN8(TM)tgc zx!gD3t*@Jz(Fk*O*^%Gh*h|{l;EH5)0l%I*eLg%Ea8v+u=`E`*T=5<&hpsB@y9%Em?Cx zm&4>wAC{lDbc(DWx?r}W_6ZiQF%4nw(#+4zJwgoGYFSF3am8-b zr6%0`Ym@z1vIY)U_xC(e4He1|%kxr5AfLVJp}d{0FCTqm*&|0&tUunc%&RpmV4WE4 zWlHtJ?sfYN3~%4tSZ|-%h90?8GpHmdY2}n(C4DM??rolzfYD6J9y8(m#GpMg89ts9 zwUgzxAMzWjnq@{=9cV+F(Qrb*5wESc^{J(>;eMn|H*;Hxy{Gtm3H;?_HGj;iGOBq$ zIOD`9#;}Kcw%y8oG8xm zHj)|q&jF3(QmI*esP93;O?DglzW6&Yg(R}u8>gSS@4pSP&&{n5@K^Wf(Q>c~qezV> zHWa$21_{?Eb@WPDMa>n-?+fcy=wYHmd#Di8BYkYqt@<<8;Fi;2qm3i`h+NElN}Ey!S>%%A#WBx4hxYC( z#`cFjqm^fkrFY27oj>D$JI7#RV2D}ibGz?x+gmsJtW$C|Umad1)=Qhacb^iUIIO@q zuMt!tbj7EB-y3K0=96~cG~O@=kLqI4gGmlO`QZj+^NHY>ts=y;kg=w#!p9aNz%Z+KF0 z@#eDqR)1PyU6z96+qZ+i%%bxa;%ArQ8YTImDbgYRBlz_>{$+`&mC+|ESR!xBF>32pqTSNb5UIW30F~BirSpvl1tB=qstI(OLVe=G{(tX)C@wyVdrzB9|CO z(?TkQ`<;?$_4 z?&F`nIm#;PG6->|V1YlN%Y3~=wR2TUulYPBT6KGK4C-{gNDS-&Itw@%LCkg=tEp6G z(fnFNd-8R}qAm-D<-=1mmFeZIpsO2;d{76IoX!IhIaGgg&g6R;`r077VQk+)#Giw5 zPBvdCS2>H<-ltA@=Z0P)yxqip#bV#|81kEZyY^J^k|N zwiJoQSoWadDZOZgQ`42N`@MW54&q=s*a_7&+S2cQDCW*bd0D~7TrUY}5+M)oey$_Y z7dX`l-TuY&F0BrMlT5Nuzq9Q0ng!LY3ndp>PF@QfPir}1{t>Vk@%II)CB-G4)Z903eKxzFlo1*{vjwJIaRmC&CHNro!ma8w=7T8(zED52 zlwn+y-_4mbR*O!bpIdD#2^Gj6BX2at9h#aM>YDWX zi%r8?Fx-pst=95L%$%=ZubELt-pR7IP2&+|+`o^j zGu7X|Ybo5MO&;sNT9VWhpD|K{38=W-#h@HAkd5msW-)F#u@?O6k)=*3hVKM@ZRK#S$3jh7lX_~#SA(1PM2d~e7JgSWG?=FU zv_D-X`g7wOa{JQP<}t>Ob`t|PeT?Sc)P<*QZq(GodrBts)QIP=xuhNiB~^D|<_aV+ zrdvnO6rS9^pk(uY^25~uL7cP8u7_xONBex#O>r9FHWter?JoT9Sv1eNfQ$v-?APQqi6yX92d zdEI}9r``9L(+%Ihyxq5_`SSg$9Q9vonmOksd4JIknH^dr#++TGqs`7i^mzub3#G>8lWY zd)c(j?uH7lwpYuPep~cp`hhL3KfdQ?>UZ}fP_Dgh{Dj{{40wD=Xxn9-d|CUtpI5W? zyixEBakv;Eqj@yymSW_xVE%D=FIL0(z}x4HFL(878T8W~9w|lr9WfYI$bQstbB6IY zpyBpJ(+LM>lP>YYXR8#o1UWsQWa;UoZnqh|fWjBEDN;f}p>s2KZu;jTopgnkb@FA0 z{ayQh@QxOkZcx8kBQLDIuQ;RHVJb}J%d0sFhSH=;s@=$anuB_ez`TrkoO;=B%_|k# z&R!BeUgAm0cb_dbZooW{ud#M{rvHuZ)nYViUsD#c#?HRZUpUVthqRPyXmq5klbq^z zQa$JaU$hHNW^!S7A6fp?9@9V{(OV(;a+=g#!i13aE9H$tg*dCU;*glRMk8H=n#&VI zg#}sHuV!=3nJd!g#v5K$#yIR2ldgHEH^_xDk0#9Udzx~)W;c> zNrrsph_po^d4v$6BN*Smtg%#1zbOA;Kg=d75)4z?f)i2hSA9UmzA5;)r^!o}=28|- zc>}qR*^(?XOV?5LlS!db95bt!dV#A-<3~Cqn)(^01C6aL)14*mN<-|;UySf@9z@F3 z#iEdYmG90>CHhmwH7UxyP>=e;>95xA^A&0ByN@jYD%9Lg!OJcdW$Bh!PO~@y-nc?=k5&o`+X8%SN<}b+Z*-#7d18FQW^VeJlmgH z5n)9 zp4yGLMpyaCg~R3<`wGIE^Hr_y3{Ty*Z?ttfq!chS#QHQoXSFK;Pj6lvU%JGw7CL)q z-M+w>TUoNH?4c^@Z;4miqa^J9p6UBNjW4lM7(a3@eDAxHv|gUVy`FCWBo&Hz>DQ-5 zjnu`5DqT=60KU8os)1fHQ6LQ~eCG1y80tUA_~B zCjGNuLA*P&XQtOM!fq{PziQ z3LKvBg?)DafICh9RceYyLc1g^Z*GAOCYdYCPxstz@?7r0+ESi zZ-8Tv&UE{>3<913j$b{$zq-ojZ!OtW0j}Yfu&E7N9%9f{#~SVRM)L$`H!bYxJ}Xz7`!Jhl4xU~hFy^)u75 zK|^u}TF!xT>uMV(ePKsT`yaXfUl@0+!!Hb(*vzSG+?XWbJW^QDi#yphNb}r$6PtS1 zx%=F44Z7kh+A|uoK0`PTYkxz2ae{Q>)VDuoMj698IfX5S6NXsxe0s;pV!|*(<=j`V zDuW0o$AI)2GY-?R{goR)ockCZ4MA{jse%t+5Wik8MSIE{2NOOLkAG`4GrC0+qNa4F ze+|<(B9|n#?VQ)}QEJX>2Y0+VwkrB)RmcFve!ZRYCbmZZ<|s#1`dB*S=7>b)xV-&q zp+-!15ND0@>0#*fWZ?o+zhnxP&>dQGp=&3=!0>qxx^ z!m51^7TCg1-RYMZefM2jx;V4M-6sIz(shh1Y-H#p=;cN+_;M_zuDJrxxm;Y)x8*f?Oip$ny9v^L*Va>U>v*9r2pKLdmA`XMbKJH&BmGOJGpTVt} z&8aCTp28uRZ(t}gk^}>?mT6N%T0yw9jnF+@jD?*=B-^a8DZ)IkuBYUIpFv+&zGVOV z)DW)lao=3XqYp*6`aJ?L(Y0bMNovlwj;8-sh`cr8I)aT{L=xDq0~ zWl~K}g?{lxF5y+Uk>XSF-F%hV?$t)IcP`t1Lo<37(r>x!I|+z9{coX_IG6BNmV?f~ z{eEWlhm$|SC-!8|E>nvJ?J_KU2}*4A4>+D^bYn}D2ev6)RO*1+^>-=#O(^PjoUSBA zZ?6F3hlt%vCtSL5^HqOrNBO*(&H^^}B!@>lK?^yDBsufRe^d8h;Lz{56X0m^A;hV3 zcmPO-+T{Gd;^QZZb3^A|2o#Eu(| z*Rb@kr2(7Qd_M61MK;D=ukN{C+kopDF><1oqeTZWdCK6R3pGNkfVHwro+X+dZqxA2 z;}T4A2kU6sC2k`+@3KnXF_+p0)xO;d$$A6}EW3w(!mZEppU$0AeBlR$*Bt0wr?&7<-G$e$!(CjK;P4V2KCd=#Sg?Nco)L!iM;-Cx!iT zIdT)2fJ>)ZYcdkQHk=&}UvGWr+aTfVayw)Hs{>VT&U{U35NL$TEe~gESc*T4qF=X^ zmD_ziwEza)lrE^{e+;{&1B@k+5>I3rIIMc65u>RJ7G;f)gch^a${rC$gPCX!P1B z96jay=R83gtNE_V5>Svg*=Rr-5X@ZD9GZVO#m67`WOy^rlPYK>E<2EI%#!FZn#|nO z8Hpi$ox&Y`snzMw^TzZ2^RBj=jhY7Wr#@@IUflK}(Z>f)Vi$fHQ048O9Bp&d>0^*P zl>ZDwYJDMZEXZ6VQ<7WZ?l>fD1IS1k8x7~zv)pQTkKFD0l>4Iklr2R@ro`DQb133d zM*HW2TQw^h!}cFyoH@&Px<_fj47+UMH1tep?XpKq)($^|gZ*}Wg+muw2jwJ|-7&N~ zmq*GZ?y9>+;Q|BO%);0czA3tIq6_#Yw_g)S?~Z#NCNV(`G>KVT}?gg)N7n~{D% z_+-)$OmJe=Q#hE9*rl>KW9JcGu9>vYLsoSF%z8VB*%Pk?$LYK!$)Ek}3t|=foxg8` z?-RgRt;$RU$W%)E530)5dA*y*CV`Bg=f+&8@6&<^K81sr1A5pZJFs|w4|o--iUQ1x z4{iX#udcWORoG|%8lv7;jqY~|?U&J$q|{|)NP8)8HJ(~X{g83MC6X8iixlrY;Ax8K zTadB@=u0x76$Mscs^E_$_&5W$72qVvj|o0a1DJNu8_57Wl^et$xaZ_xf=|RC2JnR_ z0Mz)*z)wKBAV-;2ajB?eI>G1400u!oBr^y;kKrd$1UM@T;3vpMPPwh3QCEDoD@fFp z192BMnF#V3Ir}(OhXdsE%GtRFHX07*qYiAt>7Jy1gtPhxYKAA446B`$!-|_ZzJhSG z7C@wNWMol;oYLDy{Rn7Oirj0iX6qR#Zv3w4U-HZ={Y9A_)rL#H++x=xJQ%_IO7S_ZGZ@k zC3o|{#B>A64}bsxw#Ig-N(_Jv{vp0%!gB{eD7+v8_!tGig{y3hXvhu_700to#7YGx zwn;&3jZ<(FuqmsEQB-Lt!N+ru=6o1~fKqB@sT0sS2t0QcD2loUI39K-;M7pofKus? zC&$2x76Gi-DnJw!4Ch_F257LFMZqT6+v_{d8G0fZ5Tq->P;LZj@Sh27Lyl0df3Y zqf`j8>)%E2BAwX(fSxE@a9f3bM}lpj@}T{XZxnkg75^K9ssC5whSS%Y5+au zDv3ZlR*}_G0f_Mt^g9X(j6ph@dQcPqnMg;hwx^r^ zl(y>x7ZMO=wV4}k3rZfd2IdbybD~HPKRV`K0#k@L`$LAL_rpa%sCtJi&6F$`0hsLR z(tO8qanVLpYsGC7Br(7WQMzjZg^9WdU-3$b|9d-k+e7>4C=)JUg|iEUvo zsGc}6dbm&a`)77bLvx!n?^gXW37a$z%i=AJl5sy)RI7r)HCj4 z5Y`-N)OTg~S)R^<3?R|TZQ^3V`*72!C}D{b@`n6#LG8&gu}DOE^<+7A{)mIJLO@ibV+>7^7R+*N20k!M7F-s4*@JJ zlzHUJ%Pf1N>jVBo)VF^8tams2<<|bYI;S>UXP7s{y9-a><_?A^Xn!NU6s1N>bxyYY zO8w!t$ciy@xVNC9nW>6eAVfHw4$wcCfBxa%=-x^tL2D`5JB* z@u)%wME2co(m=n(+Vkdvr!S|gU%P&9@y}s}(X6XYnbj?795W=0GK6PpQT=SL-YTak zS8``z_n!XLua$-FSvZ1)gBnNa4W|0(aqDLgAbAA1Kg|fP-v?e+?ta>56A%RATtkECid!?sEF_)6cI9?c^iz{EF7-{>_!FaFES;~CTp&kgGr5IIJ0IFuYFMNX{ zYY|%OvmY?sKI*%*`=>V6?Cb>E&pd`Lw<0UWasOi!Nn8q{{?y|~zViP&&kFIuzqhpx zZAL{on#?SI>7UHc+2&p7(8q&1@k|3X-Tf6^n%R}r;}>MZFsgpwWFkjbJG0#zLSBnR zdzLfJ^On=hcB%fU;__bi5xrc0^~B-?Mx->f%PzAxm})Y$C2TBeJT}BYHp(`4Y}bf< z7m!2LeD{0)DPwG;gIDrl_5|Z-SEGRt`Hxkg=@sK|T~+~=RF{u(KN3!zy#SZzX)}Twjf;siJ)3Yo(^U#~WKG_)8@hc_Y*#Q;rw^ z8UAp3-f~>9`rUzE{cgd+y;xe&9zmg}O~Pdo9RiCJ`Hcl1Qv$`Q-}Gg#B(+~W;}N7? z{8o=#l<_H|W!k*?Vo}O$PVV2A*S@{k)FqmcK2)qSZ8K)l&~epNUTU*pss}aQ()PzK z>Y|X7(pc)(p4#_#Qzf?H*x3>o?mO)RpmJtG$cah@tS}K>atKl+0%n8?KI8q-Rue{H%DJ=vLx4-oAhQ$&FPk zzE69SxNnCdXYVB~?1z%@%;wo;EKPQO$EMT`I1Q&yPTaj)vlOMYJcV?G&8to?w|316 zbbrv_>D5sD$9&YKB=yAgs=-GpGy1!8^_uleC$vvi*nLU0A5)MIfBM93s3>$<-nCTP zFh(XK6uXzcS3_Mvd_E=UdG5){bCwN>X)XOza-R)Nr<%4cTZrHFYs=qHEPUtJruli` zYD%{J+zzJ(uCr2Z?|pMsL`!%r&$$|QMN~vZMY>9ff`EYZ5*4MFpCX{rRDviS0)#+lf}kQb6oCLLAkvW@TBy=NKza!! zq1VtMz&GeQ&v%}3-u0aKUF%!lyVmpHUbAOr&z?Pd@44>#y6-DgkmCs3P;VM}W8rG9 z1Dsi$xy;_>G>mGz^kWye3vu=%5*|wu*umjWqcG#qbnNA)+2Q2!Yxv-2)~NYna(o*5 ztowmG0ZmTR;wa1(txSq2D|?VLI#z&6^knaNC4X+%28uvdHwwnd*$HQ-%BLtd5>bBK z>vAcYVT7FT`lwfu%a;)f{n&`2sWV45@!j;&^i4uE|*(?`U-` zxSfvUoA%r{O)^<6;7HVZb_t5ZMpH_4fyjM7Y+{SJO`_up=N22}E3#m6#+6*_>|l%; zgyEgrcx+ZaMG?Pkx%3Au_x9*$*-S2lBwWD`wG*R5O6n#Ch(p(-`u6tgh~tbIQq@9x z@rCqsPB%A~BfGxdv2kgJqayZJefT_IWVNK7GjxgAqP(wJXTHbUpPV?sBb@!qa5>WU zN8n{jN=10%XwlKvMLN;MmDTo>UNzyO$m(Z~7A~f-0m7LBSe4kSiV`=4%lqkA2jpV+ zm<*!XXLvo>Ex~O!Z98r@_OC4VSLWZ-$~ScPuKAOKj!Ve$TTK+0N04VN6;z4zE7{vL zM#M{xO+3Eo*3s%`*{URm)?Ii_5Dpb@4xW6gCE%Q4{(Y|Q>lLmhV_xx-XU7eWTl-?l zuGGKY*;IySz~oq!1~Sg|l-;Y*pORC+Vy|xtjs_FSX-2g*s_=`Fo3px#S&gL$6Gi8a zM$LCN1aXfpe74_(x;V-QEU}(nHhOe$$As--q32tSWlzyL){mKU9tu9!UMiVyE9;B4 za0n~-MBmzR;`7YBHkff!)_iTFsYqBZ0`8EjEn;a$suX8E)1$9&yKKvqR6~lehZ9Rp z?U^IoBWziOgiWk%_#cdXDW83}wCY-A3G1&jIyd9EvOr28A>XY+=hdYeEmzVVyA7^b z{$S2J`>N`8pufG%tb=t;7mwa~#81iUwC(|mN9)OXkBGMkdg-(Mn?44m0ummB67ypZ z4|VYv4R6elbVQ%8)QM9nA@; zbZ)~!0{8d#;b$RFmH%EJ|J>#L1z4RjW2Mob7Jokht9m03YX;?kf6C(#)aC-7jY3c& zN2ZwJvo*)irWn%B9v+$kF}C6_*18LahJ*QE+?^fTiE!>&?Bs5Wb?dc!CGdJscYC_I zRZ{ULM1KbSG`qzs6-u(2Vc&L6(Kg?POG5&$KQns;Ca;~;MuGObmC1p@K%iv4z za|VR4^>sJhJO+|rmc-`FiL($npc=xwySvL~6rDFbkkGzqol|88y;~ydc{HtYSMrKL zk&}JQCTxdSlo7)0peUDw=)Sa0(YhA5ef#9nzHO=dF~|ll_LfZ4hf@ry7^!WatL+0M zJe1Rm4)K_oa%D=@qi1OSmFGe6m#dI1Ahn{^d4su!DC5KcbF&q;B2-)%j+6Ei+N``y zvOZWtUIshA%b=3DbS)O$bC(z>``*!YdT$5OF8!`-^DJEilB_q?4`}8<0vZZSme3`t=!R`Hrh3IleZb)PMw0x1O6+DXiaI-S>!G3jd{sBa#+UyWN z{j;_E_m%i>OWHA^eQK8VDVI^NkrB0>l!*a^5#$O*cGusp7lF7OjXKni|J%C%Pi_DG z;{LZT|NmEP$7a;)WTbC0mj4{$HB1k!Y}McksT-n(5TPJkb!8h}ad44z`a8&%3HH4< zuS7%EW{yCLxq%K7(+UlQQv*a+9japiK;ad8<_=^u6(otJ=eYoJfq*Jp_kp1W@NMcXL$-+*{0O~?4p7zY0pDa?wwiR=x0U;>CD6z+XA$Z!qZw$CHH=!&Z$(W$8d3O zS}*vTx5xF|&!e2Dh8O_hlwbwl50PxzG!R;_VRgO{rQwQqW(d1+6mk=^Os|7eLv8^P zo7h_IGb+Lm2#{}{zqNePL6aHs6C}5GRvLpqWJL{_u_;ByPw21@NH&mMc(&(ph--ACL5}G!sB)5@!ktegg#svR)Y5MGimr0&1YIu zzK*e-F^G?Ca9NzRb#Ltgq$w7HXB=4lp-1c}w#JhBVq#~H(4Du8CYmm-+Kfe7S7;WQId#EC)mR^0c1 znnm@dEd7G-NxHVSTb6WQ1pqJJUTtqAh3mbl_mquvUC>^_6!|eKk026W#ETtubH9Kp zJO-&S>^Tyf5%<{A;YAX~v{o?rYoI}Pv^MccjX)`%X{WBIgu4jK##x9qFSb(8n!B)J zVLo6JaB+|k3&|U&(xj#Im!@G8!zA30Wp?{>>k|PO9hyp^!)wE9oqxvFV&cA*G|Kj^ z-!96nr7=FZ+_q5S(-faR>ubTt@ z)7TLGK`A*@#iWK8gAmbOtp9l3DYsNbzKT&O*LI>c(sZ%F9D89&zn@IU0`u4LxRCyy z-sV1Oa#!pC?}jH-6dt~GW71*ZB?5!1%)=e(0;>EYDE$X~`@bySYVSS*sVsN;r|=bs zAs_56E!o;jGxj(Q!&%SVhZKVM+;QiOXZ+ZHQP{I{sls#+R`5|f-AypgEe>&KK$4@O zsadl{6Z`-F%?+^Snb^XZP%7*A-+s*(B4c$arqE7igU-Jo}E1N%_it>9hPhWl33ci~l;3#aKMFVZFvE$}pw4gN#6q;EzUy zY=_1Dq8K55M*Vu8N8j#eR3M=B&6{f~p6N%HOcQ$FUUf-7nq9LeVNR0O(gt;GG}6Pr znOjqr)x>FI^pksm%c-d+p=5sh#ol}lRlQb_8G=ucS7?1?&tN%_gp2F=*I;Zx(|uX; zkA^1e`iia~wP8jIznJvR><_(+d*|gkmk0y`{LWj+?(5wZ??0-aDP`_)re5fM7z8!y zbwA3-Xz$KClQyOJIz3{EH4ekFZ!CsvSSWh=2!$tKO>z?RPP0lHqFE1`+=%sbhN*d#Kz{WJQO+kG-Y^_q(o43!7oC>ALd=n&D@niO|KbzG4nkyd~o9a{$*QHYrIoJ zeohMeM0(Y5G1~cv2NjZ}O_2=2U=IC};Fq#t^9uRH-{d}UC7rzSidQ)M+;tZh>yc?L zWydE6I{0C&$wgmG66+~NHbyQ6=6IVKp>eU|B*E^}-Y;rGUE{m>8g2Xf(#!JMT=MA- ze%KFu6WLzAi9#j2R@LOOn5lW!BiaEprWg(G4Mtj~_PTD2x!C8p?7wtaklM=El*&qM z;Ioy}>_`xM)nHmhK9zYu6~B|dY(AnXYk?^%>wI2iwwG>W>~Fwy2$NkhYa=?;SDxx< zwc3w;->;%0HMA5qW)AooW!b!UT=SXSXm5`fL$wleBq{YijKav!1@7X)_>RD+Xq^k} zw+~_Q#Kaqa{C<9m?h!-Y{mS>;^PNioVE?6zV6>IC<|q0UX+-Mx!(aj~_g?OY5{T#c z0_KF@-^^84u2(fSP6dkbp2-1Anb{!+o~Jo~BvVp#`B3cf^xG7K`|MCOrXzK1!FsV+ zx+aRxB+$fuaCnt|#e@`IipU0r6$8%sEYG3cp+WuDrH2Gx2a<|xfbK#;_N=z_b>zdG z{`=3^vgOmm^-We)uV}wLNYfz7$ndmrIJX^mVCN(qh5#rbM5=KI2!=Pu_Fjj*;g-ql z`yOAH#T@${ebp`E+)zU4Jp9;Mtp^5jN*^j<&eDt53vpYWX|4U_2e`>qzvpzS9w)ZA z_qIs7ZuEYbI<_EXOQj!Z=tqj+i|(=S&kzHw{r0qF{9H$Ojdm|{8nus?Djh*H_vow* zROaj{Vh!^ul7CiO8KK0{x0O83P$+H>Ry(xC1FQ?r7fy?b)s>oXrDWhy$5GT-cRbP zD~j@rB=Abk^-oIZz#sYA^^8xE4L&WZ9 ziIH9*ff_+6Yih`CU{94ELY(bDGs;_Pj^wl{t+0(RHIPIgZP-d%k-roIjTj)7^Z@aG z-zjf^;DKa@RjQ@Nq|Flr(Q^ztYD1L?tVl~x5|8sh2Nt=%f;tWjBzk$NC zfS>-e6&>^%WC#cy{6j~}Oon}{d57gdl@pYYN<-}m$x-tTcSbs7YR)TZGpA-e&LSMR zIUFCY`}%tB?p^0QxEJnofSH_W^2&TcNd8M14p|;qc%|4VBDi7*Eq0g2E@E+Y+t7$+ zK5?#u;*+*?D~J7VB+bu;u)6Lm!kG(>-#^?7jXQn)X+FBI!H$F@nQ>QfZVK93=drP| zm$MFy@Qf@K0bkT1e#DyF{8drhhV8H*Y0nrO56I}c@jh_g*Aq_U1S(^iaQC@dw8ecVY>zt6x)l1UiSKO{X0g*&mKqnNLja0ydx*-v2ansyD;EI2s`qlAOn*Hg-S$lf zk0&RIo7PS4TUlaJ31u2uiXhW^r2}NzZXW=x#lPrOaWB|Wtmn=Q9I%>O6W2u#Ue902 z<=*{lWwAi+oir*@M+LOn*xLOXIHR2>9e(J{hy~7!Azti!b+}q-p1LjA*ttmjXKVzm zt!WKs>_EiUDmh<+@Bqoo){Bx$Z9+%9c)(8jAC}yI4+sAQI!fu5Uha-YhnQ^a5TyY* znR}4YP*7z9ejZpNKowqZP(@jaYVlBax_%m+y?T+8=@h=Q_d=!5-qbAh zQ*GcZ(2s(!Pw#n{7*Rv$9R8Ex zHipIK2ha&MpJ%d?Ao?+Q=o?d?Ef4q#R(b|fSJKxMtSb8xj5prPsQ!{nGqIt_lN(@1 z7lFQuSMS{VY>ADO%R-4wK74c0_<&O=N*&@6$Tm*(6suvJLir}w&s~~l8pk%K1W=C0 z@X*hNV)TPoT~g#@3GK>vy$yi3Zgjom%K3)N5N@!%Doq4Hrl9*R2pZs zAX`p@iPb`wZca&q-n46Ba{|GW3PDANkMpqF(xNpEIdhF)2V959KU8%6H_lR+tos<# z)m{VbA&7=7!iJEDX*9feENe)ZB`9J7BitLy5s3E>Y$;$Nd~1Bn!0LKkmSf+X(wW+D zuG5+n;kR&iq_dDq&xtBv2mwqD40QeJjt4K@+>-WkNjs{3E-Y1zQTpPttf#dLaTbZr zGxoV%9Y0}GI|=D!WZFg*)S&nhT^|sbf-p%0{hC;stuJs_>+emgZi>(k^qbmwFIhT^ ztrP;@7;R_$)UkYWAtlwVQw}U7$kTaBf@ow|aTs;5t!0j@(tHG{Qf@VnHY&zQuTrE2iuzyFecN1}UUq{Uq%-@bj*)0E&T z34c2NfCEuz;1u`BoirLBjqS8>%Uh8N?ur_qFy}r(?c>XbVVYOzq1dFcQl4(z%o?ik zkTWo7h*H~qR9ek|`Q5u%iQ8Ri)%=X-`^)_VTSt)W=%1VlT{IHjEiFBDzV6w^kJf9y zVJE{>mr{NNAvK;OGq%&qJ}iHN749y6DR(@O)6L1BT?mb6N58&Hy6jq2&98f;*KlzD zi!NJ1;v_71WU=&l=7LFmlr42I_}>GcXq9`j&i9SXr2glkREvugnzkP)=!^x+U-$8F zGJnSXXX|2eX3Ypoy%@n1PN80|2v)FUl)xi*D7Y6YKlIl~8mFXl{%kO9575Wr+ebL8 zT0<*Ig^Zcbax`|nxIC7X^`h*@k5LO`oc=_+q}B9yhqljS)ZT@AcoO4_?7bXyk-x@r zg+!a2Sw430mCGK@k?lc?mL8j(C8(uVscr03hEyL|zNP>XtOH<)}1-&B{K zD5n#>ZcC1=+KCA`Vih`--{?$Qz1kDh&DXmW8y8zw{LFbmCADho1+ID{b1~|J-!Fui zY<3#z`)J2L`(ipSC8(tUr>-l5_|0R%Xju=FK8aef<6uL{*hOL2kL^qd&@zea6 zTD>-Ldppa)&LrC5I~L8IJ82&eG~M|;M@|LlO#KMo)s^fp%3jzO9Q3?K=)AMq|4z3m zRg^tDAGbR#w*@9x|6%_Z+*jH#y4`bGSEeyYGZm#tHc+mdgYN?#Tq!L$a5~u}HtZrnuTYZZ+OJiiK;(O~bFU-1= zGH55gcYokB(Ky-W;LIK|VQh(w29;ST|#LrPtaK#FI&elTLLmQ3s28{BR>pj?75&G zH!t*{eFNn&wYZ%%bi&yANjC0*9na}V+Yjg~PV01;`R>N+Ba$8&>#sM-9(b+Bu!zzxLO4|m-F0)U8zx$nP)uY3s#O*XY(e11;#s^i+ z?|S{fE_BWo_BdW`6n#ig`O>7bw$jCiSb7p`&3bXV=FqKHnQbRb#{=y55M}}46>1L* z__NVAsY|d_8&$wS}Z0vl2#(wUVM5|F!>y5-6h{0z)~@w z=e%chxKBX`bCYWpnLp@G_GfF)aS@yXY~KkJ$o?f;;jiy=YazY^!S>3;4@#zqELO_g zDL4ZTz9>3_tj#_)lK)O|*-18F>3oyPI^8=cED2NC89E!eqb706Irw9AQLYgo&O|79 zo>*7G8fm+&Xq16k6GAe4j{{5{myD<9`oEK0f-zk)T3I*?tKl2r32e{WyD^6P$J&rX zpZ2$d()gL~MYS^#T)o+Hp?>?_RUjw;(<>2ye{f2JJJE*%}4;{@=hE={fLu7PW`Tkm*Ckpd-7 zK=zTr=8}J6w)<|9Lt@i;<2fXG0N<&`&EWYhKwMekxZW8ph)einN?WVEZ5EDHM`EZU z=r;GJ6?OHf8?>g|l)aK#y%F_V;{DA6dxHxHY65}tTlgO@akr|2elN_6N{SAP(!j0u zzh+&5x_>8d{iA`qq=g;vz)O=Rc9Z{;};p>GFSy z|M2Ag_om8gaJs~1;yC0MFreE(BepIBw)Srqqoa}j`z(mlgOW<1gC}fHpub^Y;Sr+GUA@9+n(>CjXQe%(h1! zwhA7*{6Ws&+_y$}x!}n1Qy`cC>FWYUXW&t!bHx(?4yyG2KLxLcoo-zQ;?r9R09w^V z`5pBCd|ETe5J_bQ90o$m%Q;|%^sG?Ua%um$Dht7&A~rY#mW z*BZULqC?wx0#d0Cw1??A6|-ij-y$@bVJcGIiWlG1*gBY3W3!m)Z!H zR9o}K#}^pdi{FOJ7@9jgC+TK(6LhtF3N2`uPrU3)4;Ww1o=vuHt&3Rtf@Rw*ruT$; zjtM1n+`sm8(VNq(IkA;RC>wSjzBaqO@v&jzuAVsOtGo=)T!KV8YxX=*aIVEYC{f$u zdnz_huI4sSY5w+KI6nPb^$NcjUM?O>%=Om=Ld^THsmSaB?EQ1mu{<(1zuJ~z%3OER z2N?qxgaEA4E{vdm>2iKFn{3dJX~K55`A}N=g8If{igLTa5jqOALzO`QY2f`dY z0!4V~1 z`}cpmi6Cj`PT005Tw9@TT89$iXY0uY=fCn-TnW26o|tskWontT1>6zg@) zj(KBiK3vetg-OcZo_l-a%`#*}-JV#PqhRq1IBA#P6PMt&ekY)&XYAVBM)hje9abZn zLen?t>^oH z`ioGnCkB*An%%VHw&pf-7i>|CS$WEV%ZdOSL!~ZHUcjS1aw?dcu5}U%ZXt^5vX9@0 zs5kkgZ_wQl`Cv*yj@%IBlQDp8ax!F?YEnax#9^Iq8|@KyRzKaaq|nce{Vy1IILhaF zJ;-pv@hQ&j+-AOPi?UoNw@jUl!9uas+E-HHFvlN3`wuemxuAtH^b&tR%Ly5N; za;!FOdnzWLv*U(Hk_Fg$NwzgS;0RB^(heR-~`*Ua>wtCs@0w zJs6XniuZ1EYhUxg58C8k6r${#F6bOn5AA?cj7Z$fbAs&*RnSr(vp#3G7`Sp=z^J{z zkc%K(Q7+W!;!aZWS?`|Q=(Ho}yILkn1om$osirsG-IoxPOA`7KuM1?6ZQI3sUw>+P z_m!h-F6N;2q`Y;xY;92D7>L4rJ5<{aU^B_tv1brk(`S1)<8YY z$29Gi-dE;r%lKRZKw_*VnDkITLb{--{*LZs*9W<4D6(X{51*lM0P+?cWb4d}IW-H1 z55@^md*T>H{?!6TlQivqk0*EDw!|?)D!J*Q{h6&tLqX#KYzvH;k10hH0`c&-3wv1e z2|dB$w>3DG#UvYq0VFtlr98CAkxU=hW%Gs(G8#n>l{`&P6HzRRn%Mmz5j;AAgm`gH zLR#y>K`v;eBZC4~Dm&ux#n#e6X_cZr98Z*8p9 z>BZZVVgBw6CxD)G9?7tZM(bmxyX}a8xu4vkz-WaVJtz1+qI6F+Mg01nys}lTt5X6GD1yO>pwT__a> z<>@o+w?Z4&7a$q6E*!-4kJN4W>{_q9;$OE;7}Q_H`UbXKYC{0SH6c_F_S79uNE~y3 zos2V70M6R5XWZ*wuke};mZ+c-)m{s>2!2@dcg5Gc3o(>l#cl4!Em&GM|C%M)IXif< z8D{39b#?Z_#=h!4iMRZ0teO<(u|0k&6@Sy>SycJWC;JO`6ucL}ZsOgCqp5q9?l#-g z<=K^=5M=+=rcBn*EFZ1Dl0vl$`a-26W{eM6<<<~5ThT_-#RplsQ7+c(4H9Mcy1@vs zX61n)cSi-(B+sLkfw@I~gNNHgN6rHK3H7^{Kvti`Mu&ohV0wc6rC9U1ov(maIa|KO z_I`ZtN9+?bUH-%`o>z77N&AkeYO)yhS<@9k#j&Ts7|S1nWwk-(f<0Op1ia&P-s4f_ z;wjqr(SBGo6zgM2kVmymU5TEzl=^C`a>CNHjqAtf!m*_@l8XsqER{dTJZh57*3Z{6 zN`>1|w|}=donTjMf-HSK$o@UrJ(jF0k$!Y@Pmm#H{o~K!Il2o##hpFW-dq~fZFF?6 zol}=<2WN#Z3GNg^-bpxMqiA}r3oqqFJs3>|c2HOn-e^%kxO7H0qEQ)+M19t5q&$vr<-%@eKAwc2&q9|Tn4ML)>`^)<kl#PQX1zS_Mec^9_-&Rua|5pmhBMI9C}0kK$_TSImPjO(8lZU4{=jWM*DY( z&)-XmekBP0hDN|q4O%Vxy2K~$&)bc%{2$ROzn=ADyQrBUgB97{^T>|7h3XsVzz8Sw zGehifJRyh3bHH*j%8NzX#G+km6&7DknqtC08Jp2V`H=qB0NVR z9+7TZqa%NS32g*bsx5pG$IH+z_U@OoVTuB3aU;{Cb!e6ee~>anA8}M5S;)o-ZtGTk z5`yG86h3W-8@k!<4Y}-;!fh^06y&kd&1IZinOm~{c>ifuXmN+umd(WO9;b~JyoR}` zq@oDf&jzuh2b}LNjr|*3?B>cIGlb?ynP%q=4%FtmFTFqS5V1cjmr{DJzmS7800Qj( zTm?RRX&2k15IO&#bK`4Fsj=ZPNIx+AaFX7NyquIk!r11!bd76}RJUCe z<1t7ZAatpiFR<%iRz-R7uIB1-Q71`A{4@6jzY{kg6zVZrQH#<9SqEje|XboPr-oBgU4Puxel_-S-ZSuDbO+GTc;T>SNqhXnN zku$5a^&*8xe>{E`rVkk9jFQqV3;xXnedPmb96aldu8|cNLQ>z@gp;uBQ80Eq0x53T zf4*ORh1yFBY;A%>CHWTX3--?6t@*r-ydnQYw4eO)7K~LOD%)e}E+%=WzEE5WoKzVmKxh zYLB)H=VCWla=G)gz5`7PD=c)*;)h&33&2mEn*dc`FL8JeH1bvN@eK=P<7zDZTWq)#y}8Yec)C;qHd3bt&59 zX3@ln6A})G;fMQW`4*E+OEFUi4Rm(c$%(!pvpaiEclSgPj!7I9_Q1KR+5d2Rd1TYC zC`y3Vz_|3Fw32CD|2(8t8c~g(_jp=PWES?4o79S*ck@>&FK}nfG~g!`=UDI6={HBr z5tQj#eUFx5r7ttWq^LrClc@AT)Z<-`?V@ix6QWJ}v0T|At`jd$4c*s4>}hRWM4fGz z?rt1mmRxx2u1jEK;AfCPJFk%W-C|D&?w@V9pGOrlAJmhD{W)vr#HwtUO6E#rgKC<_ z_6;G^QFy{q$Ck%}pSFSuPoHhls9)gw>#&z5488r|2Xy82tghSYnrs7;#;%7o3vWWZ zg8!iXp+D|!Nmaj7sb@gO_Hm8*i&lA6FDIjYTvcB>+293(8-$XT;kQ=mY_n%q_0+5$yc}jiSCCvL*!ycZbb`K;gC0cZ zp)+kL(Kq%tFig$OPBrY=zr*uHWlZu{YC9H5dzb1lCKF8lZfCS?W{55~ngpR-Bu`=+ zhD(G!bxPVgjN%tq-lSgYZ7!;{ce}kp?qYmB!;0i-=_I}Jrs-stoS9zs$2zj&0X}uB z${5S~sub&Ldyab&sEJqa_pz#qoF^iYFZY6<`3~%c&YX7hAiichqxC>``>E?#uXFNT zy=UUx7loQO-F)*7PC7}6TsMn6v0^AyKAI_$nAqEL33B`DbYVQcO0PYSYUNfHP`Ezoo8|1s(4Wn|;@>j-=0QceNwL(7-D)pooNXfZ#^%NpL*1z` z%7tZR2UNFEOikm^_cd`T?5P6~E^K%LF{k1Deb2H8N8TZC`L$>2Z8v?iv|9_*n<*k& zpkYgA(d27Syj->kl`cU&QEA6Bcqh(He>)*` zuWV^QJ9}Hk;rCUV>7f9K3jM!8bTp(%OiI#;oAlJi-lcb$Y5fdck#2QZc*{zCH3Olly(#*N-)Ywo@E=0c+>_!!qa4Dfu$H9wtQ-N&lfM$?|D~|u)7n|GMiZ1n*_!7Cm6|$y!#-oE+Qde zLXlFSu6I7?<;s^_7hObM#_%_fIGm2;%2EA=1U31p*UY`|z9r z&i3=Z!TM0xge!qO@|3i4!iSq?uOqBbFbyv-nB!K@;5lqPI7SQN;~-s4vRB9F7z>P` zg>`V!O6%j&vDShGf!u^_jzzBgg{cGYDtq9;h|lm4(Qt2EPOgY+c=LduifNKSMysac z$hbF_1kT5ZZW0{9*UNPzWq5OSpvt|!mwwP=n(LxrGYFTGGa3A}ASNa`m+~>h{U(H? z9DIc7`2-F$T2FC#?heW9d1__dKsz~ntZ*^v${B5Byi%MnF;yV}rgh62PhmV6#Epe4 z+a)~z+S@yVeEG{@5ZM6Kdjn7OX60E37p&)6z?Ajg68rpek%Wzj{mRiNhfd8cVH)@1 zNZ`p^)r^-LLl(2pEP_AvD6|0zQy2ASvu~&W`EpB6xqWT)!fWKYWgZ3lVrFwKFC}1z z7o{&U#qL<5$L)l{e&BbE;{UfXhb!X6JPc_ZO#ef*6vNQpy$)!T)4yYZA$PkK-_o!` zhGNA2HXXn*iQv|X{q33jGeLG@7;_aY$kqd>mVa`8jCv?dsUDVq7{Pe9_UqM1fLcHm^!^8KX)_mZsALQcq2X7E_WYT#tv z1W2VA@NLG0HxfY>l;7OW!H?>ECV+%IbjTT1l#fO?0+e708qkFPY2Z!|z2&^}=U5XQ zTUcTmqtL)r3K*8C0$z+Tu)^8^Drne1m_`K=W?8Y{!wUff z6T=zGpT+<`+C``Txk5jdZj{mYS zUp~Hump_31vna$4i7q`)fe1Zp4aCiuQvf^Q?Y~aa*XuL`QdQX5uz#{Z|71rns7E=b8d?p@DIq2au z@Ti-HH6E@E5IExH8LAO&w41UN(kYq0?j1`9^v0uFU}FfHKlwAQG;3g0&9mjYn5$7{I5AsrtNY_U@q zjYV*cQFBn)F07H`unHg81!;)lz_@j3sgRQ-8U~qpFWJe2XFh37yJ$u zbPQhhkT`ht6Hr+a1o&Ho(OZK{F9NrH84puo2nea?0Op)9jXo{VGgwCrDx%MX4=Y~* zHnlMNUGUBqp8$5WPz|ZqpgTceUV96#wg-KXnY1pQX!K14*9KIvTgSubr$BeWp5;BK z%7+C5y#%s92=PVRDS#=!f-#4|t93!UvmiJ^G>pC%^xObKpDk7R;9&iPfSG0v(P&Tv z?HEC11YdMWAo_RoJyuQhG8A-i(w9jp7=Gsk7+e&XP@XXQ!{=-W_y}Sud_!-*;NE~5 zSa|i}A_ashZ}2>a4pU3KuFQ|-M?vnoxY@XE4nff3`)6{ zK2e8n8C)L-l2-o=r#}=#%;D9#7$xeB1eemQrS}11WHjJ>nA9_%bUq7)^6?#$U!uk* z32^;)QqQh~!X)tCy=C(IqVeekC`?%DtzcFBi|-_9Guv~6Jh($V*e5YFRTfiDJw4Xs zdvW!YyJSM-t9LO5?C#VzuBSM4z|5$vfgV?7PAz?$4qxKq_%_FgU^V$Q`pS<$TI}Sl z+fBad=%=$H>+e=eA%5cx^urJMe#+EqLzK9+rZJ){U2%vKpe#mp@x9b%#U@(UFWQhF zV=+vwU53$C3Ci|szVAOm^)pU!SkcLHjCNj+bG4d@G#M!8UhcrQ)cBe|b<^N;kjH8v zkrYy)dW$bVafiw4_SR!<{TKmEe_74WZg0B<5;ejw?Cy@_QO>@ue3nzU<$8Caqexy+ z45mSZz5F7cv${*L$mp!z{Z_dH+D~pEF+A5oWTRC9MrNs)tGvQ>3Hm$*vB@~)tuFqY zk7LF=Sv?lhP%vH?0IcUk-AHBnGwS)=7s8>@5aHj?7wAtlB^N=JZm-xS6JF6-^)C;?X3#K z8G+Rl!E~5DcYa`aX8witT3xbiOg`f?hBf+I+O_l`t!k+j7o)@4T|5wOs;<#?5Pb5q z^RZl+(T=PO87x%}d-2PGGuax2rSFtWgU5Q2$}VdS%Zc_Q*tMP-^&-|6yYRKjh8oO> zk*$bU3fsklxP`OQcG?8ZcrM1DRIsL3s-9-laIg}eOyvv3fK=Z%rjS(mK==!!Zmk8c zwJZm7^gjFz0%2nSlJL*ICdPQ!PD^AH5|F`tlg}eF8+%Z@N?eg#+)v z;w2zE`f&R29eD1p;6~n!BDg~$9EiS~*O+G4rn0lOUqrFPs6YbVF|#IGmw1Iko$E18 za2Nm%L!`#U!6A?k04aROIDDcS$!eCP_rPWNLBju|7du9Qo*5dfh`3$&vMyCNP>tOy zg;ELdh`QWocXi94h59~6J$G)&8uXRJ0w<%wE**>@V*LGcBT?OxNcl#1Ses9#_wjASqzLc-Z`w|t9Ep{=c{na@byroi`AL;U51t>0lK$LvCJnowtu6luyJ>ZzO{EHmYUMOy9wke;y z3O?^Zr}bQt#-huf8fl?m^d8c4%2YV3JN~ZFR!!`yt2?ycOw57ELoGe&Dh4h-9}aD) z`E`EqM1}D7#7emSujTtQu?mNh>YtC9g?);TBY!j+(e-XQki=<6njSmp>`LyVY;0XN zJ?inm<%J$|x8Z;W&rhh2l>Lf zzFhog#fXb?E!lU4RI=8&q^Cq;uUXGptvgcSPu{0;>8Y>nEnc5=Zs-Rd3K`FXcporULy~=Y;etkIr#btJJ3v z6}?42%tG!QhZ`OskM>#Pd_(g(brNa^tv_}{O5f<9+m5kX5Rj$WTnHsSy11VNpt__r zcZJ!^K~)!O-+c)!8s>CQ-c<%~iK4&@4NR1?9rO67Te=mmWV7F48T+ZnYJUgF09;ho zc6Z=+P%Y~HvbNc47=xI(vjHt|`!va-H8(C~R_(r8z@)sHLS{+iPmCj+*9s@88m_|l zA4_NXartHrG1*rOHu)MlFKMrb^8QK-R){WnZXA-xD`V6`idbp(%&+jcQYWWqy8bXE z(UOOnCaG1sMTL6Rb7tjI)Q?DBsB`-VHCdzC2m$SwilJn7`#UmbcbpaO-%JWtz}e!? z7DjZIFTCk3&8FShGCf+%vp~;N%6N7gEBM4^HvO`#Qt!-s;Me9u?Ezmn5OnuZh-QpO zpAted!j+!()8U@!6BQxI&5j8=ms=XqWtPY{zr@_&#MdqlfV4{EvEF(vc_$IM%1hLY zTW+S#LMPapit}EUDvn57iON^mttZg2I;&#sqD845%_eG7*WXfbL$Vu}^8)*A+&h9` z{KjV*eM9W2zZ&if=I7SqcRy|B>rNFKt=m0e2752B%&+ZBGV(_(2cd&0LVk2E_dWxu z;c^8yDyWANOuDE=clDWcUE(Y8(5trc)9&N%H_q15GlO>AnwzR0mOn>SLflK&ZUpN3 zwqz%q3NdP-et~>MK_+vNm}yLR*U-BniYaAAvxM42eptl4NPF@6z+wYI zX78F@x@3Bbw9%WlOl{Yg`qsfGm16R);{`iZ+v1D%i&L>f=T_M(yW&P=f{ozl+GoLwkoEZvxxtZBx+Bol^|>Zu*n-Vs2Ll2Y zTi)IkJNu$+Zc*3U2PzV90rqab+FK;yQqk_OW)cL+=RPCY#N(Hs=6jWi9Fv!n(%zl_ z*W<{S{FuZ8b%e(OS$#uX-?rOn{b6$DV6afL0_&usmwH~g+h$s#``SlD33Xn%Y~gUU zMjuD;(3MvL{7Wdy72C#-yc{No@u^9pf?;V9ZnNr8S=m+=BuOCD@?GJ?87Rk3sO>dJ zu4iY9B@%suCTcT%mj>Krbd9}h7D+)30!+tOaL=y}6HG2vhpWm|HkL-1gj7OF#MnZ1#*)asFJ<3%=Q-2&bFTBp`Tefn@4C))&OgUr z_cfmPe!rggeLtU%=l$4|sm&EvU_+GNsGLPbJSFAqAS$9ac3%~dFygO#dAfHgD(P66 zc_(|$#-F94o0`s?+=9xBCEh!GzA24QRuQZP3owJD=&cvNTWbx=H+qVMJ%;u6`#AM3 zulJhk`QvD_ikC|Hh!?H&7BnqZW~x%!2xtq_RFirB$vLU1+U~9-IP-A23(qWdD+KXc zn;999%-otgXIY~MTr3kBJmxm3_}=#3!iJqgTPDlt4eae11Wo*yVJ~bk#?BG7ViZox z(we(m-aS${PoKpeLQ!E)nzitbQ=)CjZlPAJA_u-SGf#c2p1JCsmIF)&zb_fs7O8Nl z@$*$wF9rVe_VK7YYQFOpJ4(T#9;X^|yrGpRXrAH2G0|T6&3guA8;YTK|A^dq7|p--JzE zYu3^7F}xjqtTnnLEC|D%*Yt9aQ(XP4tpwV1&_Ycek~<&=br|9 z-%b1U&P@K8(ox;`_NDi(VH;&lv`;rLPU?!tnfvy__k7gPPwF1hV2=j`NwX$r2zH*K5hy*au({qhO&)deSC zWtaD5lnBk`naz!Lt*WI9Woi8eXSVWj4Z+Vz%7#M+Czs_cmYx0t(9hRv@8)Q~7l%7X z-)iC&l^2u9d5UwbSB8wp$UPBuR6SYCIX?l~6WZ;XC*AT2TtDI$<1B^DkMzfq*nRa9 zo|OOLH%{FzTa9UGkuoug8~Q$gzB|n(cmfEc93wOY@%Z8+xS-~nfBdzT?E|pmli3*o zJ`1hL%T%sR`!7OqJXD(2$xYUZ;JDr$Vd0oW+!;d6AE~4K(&}Vy#UeqXrDFe|v6Iha z4B8~@<5l;6@#1fDjq`8f|1F~5;T{18KEXXe$(5OV0_`t0ROGAVxw-Hszx(!`3i6Dd ztM~=8Iia*z^$Wo9WdQ_UTCa88 zYnN#hr?&qn{NaZCKE=M6F)-iR9v?VtR-2hGVfefpISTht;hF%+_?}cdrIJ5Us{+(; z=e4CI55tP~4dv*WkEgA}7~7%N8mnLgrvKp$_Jn}_dc6B6%1tkG1){V&4!$SZ;hzq} zHA=rG;DL}bKk?P!+Cgp^{k;W+#gC3naH5Uj{>~hUTztu%V4?hh!&&#l@AzEWQJ*~s(`B}*t6)fz|GXVnp6xf`AMgj?~dxl92Sl75bm19DEfKMwf5b|%y z*XrMp^dn;u)%(x!E^FSplRYeVrc7+G>%-p@c&)bt?#5|T+`R9Z$}qPIcbEpFMP6Bj z`;XR_ne0?>aMN!dM_rg7&9n#5vp%WLe5SC4%~Cx+LHU*4a$=w+OL^#@u^&md<2Zlq zgVV?w>pG})Q}#Y5ft`-44}GrrHp3|1DXNbCB!*goGgkXO4nt8Aq>H?w4%Y$)&D zlw#v1@l}I-HKq3ELzQWpRNRKv%CJH1tb;AJyuil`INr65J&Sfv$$AYftqaKhq_bcK1TSb4;DSVJJs)*X z-{8!TPth0kMZ!b|zX*=M+A~mHy*R6KqsO4e>TxK>UpHVk6R{zpx2lqmqcl7Jp}kN@ zaYS)^Si08jKAY;1n?uGsa*R4IN0v03#HMu1DViAgpM0Qs^vlG2nC4&H$Ykfq3R5uw zLt9Oh;*pzXzXx&lNtZ=ho#eg6BBUY(x@=M(u3pwTaALW7RGdHBD}}oL*uQ(qmgerZ zjcpT4tv#gRJ(N9QJq{;&iD{?d%8yH}V*Lt1JhTnZA?rSy$sfJ}Av<^3$0X4@Z6dYn z%aJ}uCGbGdec%zL_t5t8*2}V!Y5gab6Ehp`4~4b(rlLK&yJ9Z654P4Av3q29xK)yU z*i=SjImQA5Y$qj~rrNu>Zo>sOH~2c8@=D>+vi7n0gn^~Y!=3R}Nu-z&g9tYjb53C^ zBBtTW#zD8hV1d#3IsUKhe6*%27D}!ooRrXcH@SAa@ZQew=vDc&lJ~1FWtZ`VAN4%g ze8K*^kv)_YPM!^2qu$@F2EAVS=R-*MPNMt*gHSa`@(9;Uy@<0>M-^uBiV ze9w}(SH6r?!+^P&$M>~Xh@zs=Qf}19{|Im_I^LE})cKf_0E6NN#u&}KEsGp4_E_ zNXj~@Yb+r3A~8(B7nBJ0NKxQy0a_A0J|AWapvj391mlS+69&${=^SVv3F=x{Zwo!N zOTd!3@t~u6;R4cugkiFIj!nF~#;OIpII%$C7_S1`cAFJ078<~5H%8LZ zfAR5G>w@p7p8o-8|Ap1rOR_{FapJaShYFGT5wO}!!adRoOJt6N>MqTni>tsAIZso{ z>F4M&?4G_NaIG?j zhgoS@hDE?fex}9+b8{n0Q*zup#JEnG(x97R@De_N2f)mDU7oh&AKzW68GYEf%+L6%vu)4_kG_azBvQ?nZaytB>78y{MDuy6 zs+P$(_!0*yKNUJ7WFfb(qx${@?aEWXKX&S@lD;HH2;YsxFpUA%;8?&sRVzpNi{0|4 zYVSkqWhS3JBSe5-rcs8@kf(huqsPWhX(lC*5{ddE7%uBozf4eIWPKC%i^(B#itDf-@_Gn>? zII+P|u@a~-6*4n+e7)qya*|I0xJ2DwO!7Z;iw$X_7J-@jw!2bIj*jD3`}jHo*4D9o>@8{dk`G z8d<|?W-$=pKZc^ynRCa10ep@11I!r*daou$?QtX$A>dg0ashc2zBuUv=)D&yYQpfN ztYhi71!Ms*gMJdia|6>K7Z53dc|8f3{@`OPiN-SdFoWq10f&H@)iIQN7>23uDb|0x z8BPDz)PNAqcMK&84doC4OaB7$Ah7XZh4E&WC~B_2GsQi?>N*B4BKW^HcnW-&Zvd~& zKoAO>Oz0DV@`VRj)fbRLP=T>9zAr%4A3iNV)&bUranv{XYQuo`?w}8IIJC6799iCs zc7U23fKqJF0+JOTL)3u>dI1-VnH>ff-E6ks166JArUa6!^S$t_8qej@W$w9Tup1n*&6h%8?dHgs}vm z$r5#BRv1Sqf;ter6sKmj79a^D1(Z&LI3IOuXdh5I=>@1J9)(=s-4WHGG&238Uptsg zuz}@Er(dgSM$_kXM%j`1$2ck$b`^JME0C=nfl;ILt_6;vVu63kJsdoQW2jT0tlVR; z{+i<``c%5dWBpUdQNZVjc<0X`So;9k-UXOy&BOwM4bFFU=a6MhvN{g8y*c>!(z*DN z(6Q7Q!g!g^&)(g2V>o*nBKYt6<{wl~>mBk!;h?HSceYJ!H8T7Se1}-s-(=u!G+?;YyYmDpkt2`xKXXmy+w~5>OlG5P* z0!yWlm*H#`t`=)crjquh%B($yZuJB#&J2lQ<0)@+ayOc%R4mFUoh>|Co!?|yy7_$b zCL@+49JbMAa0rM`E4k{Fu1ioQua@oosYMAhXfhY+j|Z~Y-&0a6CDY@!);>i^Q+ILp zN;yNbrbb~a)jO>Es>W|}R;&gY_GB-HIilK%!N0<_)y4nf*c{7YhqcYQ_S_2{);lmq2S9( z=Wj#H>Z!QVHOa9@SpSRTs4SZ`MUqE@n{Sde?{>HjRjKoP#*{_e)X!V6N2~_$*8ex7IAM%(#*d7!UV&iaXb4q%EbEXu(3Csfu9tuO!J z^skOeAfbJ~TdyiD*Ay(>xjs@>vsqoR^Uzk1;&iOH6;;Uhjx*oiZ#d3V$MCQ3t_4_3 z^UETq-$(Y4+${;i4d+Ag`ZwZLwx@cwQ+C{YVEy@jJUu9*5th={=Wa* zZ65s{47xfmLwfoAQQ8sTp;Y58)XyO9CFph21M8`{aNjk zkm46}L5EHSp=_S0_Wz_>UZEkiJSL}p-{YFM`J|-({_dq7{N;mOk#?z)Xgy`4nIis-yvQY^!#GGciv%% zITlyv4YA>z+SW&C9_#6bWwTp2uY@@NUteeSSqz;vkvA73waqH;EFSgzDtn+k9ce5= z&8J)|T`9|Ct0=Hj7)p`gOzV{*EDXEu==9T9pW|1*CY=HkpeTUAbNm z#J@hbmMjaRXP=_RNUGycFid8=Aeur@1T6_}RsV!>(p$PHC>Ed&q?0)^L8%l~+Yk`d z9bkR(nn2gB+(0LJd5k0i4Y(kJHxpb-b^_7wK`8F$s_$VVKsYj&HK7l|5pCuGT3W>C z$|2xlLpd@(YDP!F8BQ97mX`FHQ-mLZTTU5dG!u?LoCC7vg#(Q0_UWx6O3yoKF}VNe(Xu%RmR3xmRNuOsuprc3l(?!j25i{r29 z?0?F@FStXV!Rf~j)R0+67@UDL6k5%(owp&Wbf;rL^SRL zfs4@yHER&~Fn(;78v2Y4VYEQWwMGd30QN;nanXD+p9Yg3JJyLg+xmtGJWoY4H3g8z zYZm$(e9#qHV?`*!w-`MUU40&4ZNM$s7_3I7gKy-3{PR9mf)oT$HCI?OOyQOoaM?G) z?Eaj(!Vt*@md$HA0E++-`ck-6{Z9lqYp?#X`d1398+jnWM)^Poq5Tgr|0)6>zh2A) zLtGCB;>mgpaXJ_0S5YH|xIB!35{a3F}@r*lNDHN0f>~>>I1CqEn@$Q zIqJwm}qCai{0d><@ocXbq_c(8XcA8h;A$^!K`gr&RzqZs;!e)h$`tU!FKcd_N4xDZ`e zoMp5?8F)!oAL{=ThcU2fG9wx z`idi~;Q++gVCFB2?5CFneXJR>g+&P~jyoRX@feDdaAZEGFCqp1{sau_#x+9^;r~GB zzV{;esyZ}9H?HJqr1mu^Nf&<9Bwy8nAAt!Rta@MJ$EPUnriHR;Mgnxa%#=;Op0SILC62k*+eJx8rt>~^mt!q%Lc1-{oemn|n$Vs|} z@D(n=jn6z1NrnFL0h=(?(mW zGXn|^Z0}buk;7mr8+n0Y`ie*wbO6jo{ufW8vH;ox`d;ltB$5XTFoNT;VoksV6mSXd ziCmWIz_ISP7KAGJ{3+;vumJC#ViHB6T-Z{6s*`76UC}k+)wARoZb*JDM-~vsGt!WR zueU&#JhK~;k0Gf|o;eQ5`;fdqo>7G4HAtQ#>mebj0!cNp-XTZ|mm}efq6a`KwSADh zNY;}803E0+Wlgf40wm$HRX$H{WQ8R3P5CUjkpq%9ftNg0*Cqr2iih9|dqqU= zEd%W6LI4CKut@{h@evdd3Aq65s0X>g-%JNP3W6w-0e0N01DH>^0t49bI}9m&r~q~> z(S#piApoZ}9peZ0edD*fW`Gu?8v*PhkrS}wyU+}@BNP1p6&Mk&h&Vt8=tcxUJJM%2 z0?ZMqx_z8b(-8*R5#THcAr3Uh8(l$IGJU81p&emzzny2G9dAJYz=qMDs4I8^GJ%@1 zt6|LoFk?9fdPb^-bvLXoI>ZqOeKnx`(sJYm02kRH7jrq%TeXIjAIicWr=uNPprw>L461Vbl}c@Yn4LV zG+nEC_P^i-O6wl>IODRs*d5MWn8mB@epTkn)z0Q0bv<|S$15Ec?reDOBII_tY8z_E z&u^goroeTUA!j7=r4|3DV34=~a2?9i_=av=9Ghh8Q?g4kTeReTJf5uwuULCzSMaAX zSKDPT`Ve%Z&|*C(w3c0qZu;POFS_sV4xr}s5< zZ0XuaEJ_jfr>>@DN}X5%n?aBKYMtx&Yh95H9Mc%!r`P}uK`1)h5nyevZ3hBCkqQC^*(L^?$OLFcI48+ni~~i=O#jv-IMJX;5!FzF zb2$s?UE~Qg=u)+0oUf11m#(1|BweKf7P6!a z>IIc51xYbTf=acACOn5EsMO5MBFhq)FLuJ-?e}40bdtwRuSV!OlrgIM8ScbC(Nn!U z^jL5q9{!kW{F`8_j)Bu|7pA1@u7%`<*AZSJH1Y1Sn3K1SQi^>yhq|r1PZK&;azjV3 zo`#j<=ajEoXWb9{o$x0ocD1A`w4Aox`u;q-cmB_=(G_IO2K=oSZ~J&j+B8pZulm~4 z)LYAup;tsSqO>Ac>pl--Gedm8p4$db0)~5J+iPL%-f~oHT%7AwqZ^YUuC04JPwu>a zb)!V{Ttqwo`r`Ktc!Xd)Di*?uPM(^Y_>pm?pY-1Ky8q|BAG+Ie?M=6OlfH*^iIZp| zw9+)uf`c+FY)_I|+bUUrRepxUa;`u;D2umqGn}=8v^jVHYph>2wE}Lz6dApzPUs8U1?)NT%$A2#UO}Q+s(m5o<;NRal3CTT}?$C_k* zq!P7tN`O%9sOY8YmE448{dR=E#__DO=$Il(%C+cvp;~x!n#F)jX{fvVvQ@$@b^BEp zx>kmD=gnWWO$a0lg`IwAvJFYF?Dv-5g+d=p)V(!ZV!CX=<$5MNYFYrN@KtFpDgUX! zxAes>VITGX6x82dY!cEOdG_VmOYl%_L{`s7@4qFUKWEyNr5TD*_Eeck(P@d#EHK@2 zlk<`xoXnGRh}P&UlF1C+Ud>IC(kQZAe!-G)?ZU`)6Pb;ZPwT+b|BPP(x&07FeAXF1 zo|KAuV%kfzmETxWepm8vK73w4)W5|E8-ISNr1NpU+##pcO}lc_Sbf|*E^jY6Tcx2t zQLQ&aDva~ea{o9D4gJKXX~mUu&yI=%q%&~Qt=j88bL!O_tL8^O5snX9xP2OaPED=& z>RuFwJx0a`^=C^$oB>VDhqJA(l*`L--u%eSMuvh!wA9pvA5CIO=`wJNRp$pJV7VV1 z3HbP4rpBy+h1rkp{3K(A?%0>ft=@<2kU1IK*HQ%~QF? zWIraZS^RnC;SqXx!{xqsX0Yj&d04A^>zwwbayon&oy>2hZaS&wB!% zkB$jM+@w-}*ycIOBWuXkgS72U`=A?&e4{Bt?d(g20>|-MV6(A0w^-fuIV4Gm-FJz< z_S;4=dxH3n@(X=BH(b}umrD2Ss4ySgud72WXxK3e2NlHi3*S_U9@O+CCe`hR(AM1w zd}g}8_difpE^YRj&-|&r6qMg5lz{C&V#)l;E(TDcQhF3E5OO zZ_meTe|pc*#%nMUyZO>PTh{jo6l%E+1UT2Q#?ryo8@-+Lm^Z3xLL2u`Q+t&0LL*Fk z_6nYycQSs9o!KhMf5H1P;!@)o+yl4`aaR@y^wwJwjov&soT@OWj3aWyNInhS7F%z^ z#+YMO+Qu66`=wrZxGkx^;#;c^q+$vCgC7A_DGNe+OZ6s&sGAo;?0#=*#l1 z9;Mb~U2amS%5RS?q?)oOKC62DxwYy}PKAz^)R|lH`6hCH#&F<)(ZGn#ijA)4zI;E; z9?h#$6X2RBd#JS;-8zTmg4=RG#JQ*)TT8a$e5`J~&o{~r?-};@o|Wa?vtfbt)Wu?- zVVkO%c7TlkA7eenfBm1wdWcpoq7SVRw}UE@#7=jA*uy`lX|XfKwt{MFq%wmP@2-r%^T?1(-(>QakNzdQoffUYo;{+^C=WK?N#<7_gpxP=I@NLOSg`^XJb>4NljcoxTz#GP5(LabyGc{TT&zbL7c4x*srg0a_l;p1nPJjbeOw|=61p8o!i5u-x-xjg!Xaefo<$P+A5 z?=+3iIZ*H>*ACCjuNpi1+jzIYs@{(@QhSK3L%nXv@?YLZTlEHd3x!EIyWta?b*|x} zU}HXHw3#nHyPmI6ygj;IsIn*cT_-kZ<1?pd^l*&i<9g+piwgy#;nEsD%^5G#^35|Q zGv--l29$BqPRxz)-6%fkkaJ}}O~mR!$ozs%O!Syb01{47m1hnchnjfW`(p8MVc6bQUfR$1sN)P_9@!>`h<&(c&o5m2O8IoZ2?Q+bP)l zYkaGL^ZuEWsD#S{bt%7ISqx;6M_d-;NQ9Yh=R0EXY)YQfo4preD)h!;D5Dev-92rv zc$ltY5MX&vkc)?xat$k8m1WF(_+Pgd30u9KG^@QGZEBR@o}-jAY(Q1exX%h&5kfk^ ztRRrX^vUjsNSvHZ*#vuzx8+*z>gmd~@7FWM*@#8j#tsqpBJJ?ssu$6S@JQ`59r;I8 zF+y2NA41Z~c=x$l$hTuOz!Y&TT9pZ#qHEL9#AB?fv?-(Dh*nTF_>hzsA*2@P;IGQG zOHZW0PSHJLdF*Tl|2AcNy|?8|#*=FKPWFWC26?pcAi(=fY68TwCT^yh*vOdHIxe)b zv0u{KQ;v)oNBJGHRF_<4>BDOnE2gFesN(CL z>zv;S*+mo@9P0=HOLfbLbI04tbaToo_o8s^_0^YgfY|q8-is&eg<5AI6=D~ro~O1^ z1yn|btTwj0~@8Lo9LaGtIDHgd^K4BYcShpfnPQ=4oTioDW#yRCBr1Cje(*9umv zlH@|te$w_|$2jhzh_{2=1Y|Fg7)P!!6x)?6Qh&?R!hu(X9tosqD*hT(S6_~c6-jj zx8Eu5?A6HTzEkkOoSGs1*#t$vBUD|WG#K0 zct;jLM#lKJNo4wEZq%T_kTBPR9>(5_(2aHo#P}c43P11^^{f%i1Nuw}XJ}5nm>W|| z$yh46dEGSD{+74m79Gi#zLP!plv4($>ec<$)lFSfl6|d*_ARlEw3hc>Ry zFp*k^@NKR2vw?wJLd)RFqhsTm(aZ5umcMe2D$?pc?8}km!9KSo%G@7+t4&KSQ!#f_ zZE>=6dw;^9?`|J@PdfPP`aA{#1{SLM=J9tcioeqU{A$^@^yeN;*7XaOUnlZq`!4U3 z{g`8bdqiY9oK+^ktAc&1b=K;V#}lvtruFxmK*)+h z!*ssX`qa2njEeWM^)j!DT0||j!yrQg+0M`AbdhtxLZ&(* zX9*LJna#u;EJR*+O;{2l19o&J{61vZoP9`3$g;G&645i9EqSYNBZJ#tf)bf4AIK59~3VTwwzQ$f^EHnM}tGp^7QXSxuI%8O?vH9iwjH2I{kGtn7fF`B_b{zMDw!vxafG3)U1P zZ1Em-NJ%wW z9X=)qSaw7##Fz%D9wUqM_(OH^*hHDNp@+r3@;O_U)q;?P4T3+L(B~tB2f@J$ZXr2s z$R1TDl4SvAK`;{sTyJeeYc9KYgSo0Fvy|p7ajPr(h#`^(r$xCNk_aQwfOS&uyo)7b1Pt37F1Y(j z&Qwg6)VAfC7s=(?+V8Dv-jtyY5{}ZIqaC}+F44bl6>g55`>K>agZ@N>F5vh}wdOs2 zR0ro013P_cy8X-0{X)}^5{hJMdnV-~vS)Se<7g^gb@3hv=O5I(8r)k@wBDo`@3~I} z^cz^z$G-pd;rycG<}Bsv^iXD$hiWRa>=tYO)~T{A$jLcDhc@K2A@ljbO7ma7ELdKi zu(A~|m#v!U^qb_rQhFQHQ&jkIwK>35>Y<@YN?1Wh%;*te)11$oMd2kr9;11QkBEy` zp8>273}pge7ps32pgmpmGqq^q=6y?-=81PJ_{xG1eAmNjjL=l*0ezsUyY$Hy((I#IjmA{xsNUi|a0WmmIT~@rr%i510@k02I}DrXyT(dKnpF&?;*uPQ29=xx zErE#ujx#ePGD(Ahe-Xai6V66lED+~?6@Y+y@p00JeVAJTO#s33Nrczx0F?;E-qr;` zA$;#IpQ3iA5MFb_k7Vp^<2BY>prZ^q(R(jZN}fj|5n=-YP2(tc04JL1#QCZEr=CR7 zJy9#(3rJH=x*4^Om1;hELw*)~j4XOcFKxqMK z0UtA5zRW19{-90}v*k!$zySh6w~y128QdrlP(cRZ0M#jKuv63DCIIN9F|?HaV8Jt0 ze=TUK#Q^J_aTJ`N5Z}(lmF!q!ghM}JR&K9vRdIJ&dp5F~%XxbIm&UW05`p#&J>DAB7TIGkz^nz*&J6pXk! z^gmd^ExHM^*7T#t3v-djb=9A#o~fC5)7Z7-;&hH%EQAnNo98FcGa{7)_se-0^3q8@ z)UHpt`#wRnwF}5hhdQAvMt64nn!Fp(JDpp*FWfRQ-&nSMCiO))9b!1TA<6$%m)~sP zUz8w(!0GLPVsytbH*THnr%~vb7Y0^P*|jb?6zBe2%spY63W%L|PgN5|t(}?l6pG)y zRs_6YfOU<@@U>CW%bW#YwhCS!-VJEwXd(9b36{f-GCu5dJue;WE^ zudb&;#LHuYshs@U8_E{E+Gh8{?Uj#c_q{NP5xafUcukrswlK*pzTv)EoLkZK*b}V( zk#W?V=funEKjD0u)TXrDdw~!Hu{0~#`90*80-rc>D|Co&bR)y|{a}pCa@75E4b)(2 zrc|XvenXk5VtgRmg)F0?BsYzQ>eh6Qyj?QSUB}Df+?F^m9K|&#^}4x~*qZ2_fGzlP zy|wcW*GrQ7oaNfXTg^I)M}CcuIlhRA_K}b9_#JwyH~!bSneDvZ9ih&sFEtTLmQISg z?oAIKJRde7ur*-4Qj_*oWsQ%&`UeVh&(bZWHfUy|c?}e@ppk8<9mJcl)=pF%zTyc@ zowcDORhV=%u05R8$tN;Xq24*rY=g3zcKKAYY=Y{wfyJW+SokxA#5$q-e`I{?ZtWxe zmCBUZ+Ob^i5u)56{dBkk+r69Sn;oq$F6SjdEY`#Aza8+wM}-o8b$?8C_A5>gqE?d! zcMEmBD7X&o54c-8)~_$hTqLGp-1FYfowG9v_}Y`#i|iFQcfBZZNnrgY1^q zJvQ%UqOnc^-%9bxS&cZrtgko@`JfH1aWhMeIw=|6D&> z$G5UH^RqRe!PsJ7s^Syz<$La1ytR{;J~Ky1?N^H52Tzu$$u1yMKPXyp+PdwxTAn@7 zf1CMHcS7aP;DQO-xB0UY*qa6iV+(6yVgZpsKScDUV-&jgC=?ouPmxfP`%wJ%lvt3o zyY3^gTXHw})ea~`Ma3G_PFT}YWV$w9A2!hlr;kWVOn%UQZjawXjr?tuUlxD;Hcm6R z!)_VJe%Sf8kJ;^*bNrUoz-{%>^vLa@BO!ZJs~m56wl!r{(wfIxE|+=@`R7`b0|R(^ zHvdeSr5?x0%D@A|?h&~^QuaG9{eI3fS)Xo2cnHXX*kpp?-PwB?x!GR#k+#gfp7xt% zjcXfak^nv>0V-FQToOK`eYVJv4)ypS{2qK=x7REH4qZA@`RZP=9BzJSHIJFu`=7qe#0kB%5iX(Gj6WRoHPOw^>THt~<9Vq<>ob70zX1cD!ZZtef@YvG+ z8>y{OTiJf?YlKw2VNc3C*LdT(nJW{jMB;Gsy^j~T-KU$9%Sg}3{Vm~Y3dG5?qYK*9 z$m-?-%_z5c-K2B}6?<#(Pdn!%HNO5GiWrRh>tY z7kUAFlw}()c(E3<^YhL>Sz5&A5wsN|yqvq-A5;ns`|Sm7=g8{lL(MHmxbMfDaq8sT zxAf~Rg*q1XURE4X6pd(5x0-K@M<7PkK5M5@E;UKOnLP?m$0c4!BmjEbOw_<@FiQ1+ z26)3u)F281-tqaANl(#DvtGbJ@EuE*@m;NZpMno*yu zIIRRn7F#~6Y!41Z1z`a5ccK{$NbBPN}&F3ThPVZ7&CIAN)3@#B8lUVojqUppQ-5eaAUc38^>lRAijm z{8*tL!Z}7d=$^I?Otk31*wx=v@>bjdSbxWHQrk(A;tu|R<%$X8!`Mhi>n7JgamO6m z)Gp^sDdn*@3hioJ_8x;FNE|0|0pumxmy*zRBW>0z47#UeK3ke9ac#;)#989URIAi* zT|8+x>u~Pm8~Y3e|E9dHp+n$&8*fqCTwh@R>z?YBmf9jAXMVf(WQDP1!Iw@PXD+S<#WlAryeR!P58EI zr>a*1<1L$uuiS3;bhD-=)npaA{*Y4N*lVi7Gu=!*ZV*YiXEC}g4Di&UH@ah?wcHNr z%8#+>d)ncwOtA6xusoNis!hYxe1E z^w%st724LrbnmP08hv{bm|lDIPP=6T@Y3K?nq6LTsqEa;+qPo9RF5=YRjM^AtPUEs z^IUvy3cMsWpF6tXMsl;>3Hu1bV{bFW$5}0%&waA?EoX(v#xWrVuB4}g@P7C5r%{&b z&x?99X%daQ0W15{5#X9OwY1Rb8#bX>Cd^-ecWQ6O-BNcp+HTBI-Tnz6*wu%>07V-+ z_z5XjlOEuIJU_4a6XvAQ40k}1)QB**`~z@@b@ z9Rn`uf(Y5En#y}`hg}RZ?*#@^KHCO_izz$RX&#lc+8h3Ndn@qtefe8!{Ko060jtYH zsN<_Cdk+5eq==FVT$k~s)QDx%(73dg0}tGKS@h$IFkzdt`w)m&3=W~3TIAy;3v8UbOlhl_51!>pZvR$sFKTA*h;4V@<{ zDFsKQ>TmU#^S+;>NqjxUwV+I+Oa!wYsL2>9rhJISXMW$JwU<)9;*<=H|;LN!;68KiqIeLeqWx%0%{$JG|MOsE9i)+eyV)HVrI0 z%?-rdd|_Zc7&YF^166(KfT}XnJT@zN4WD+&Qsp8lGm^gL%@KOHYSPn64JHo8uC-os z^ff{n$aM42ZImS~rzTL~EMF;1dqe#~NZ59Eq5UekiJ5AQp^B{k$#$KVewDLj{$S_k z%YbW+m94ESGZWv10kL`Z8o-PbvI`5=$6lAanko}><-N=%LL@v+QOTc;_WLWjL_x%y zow!xvZiOS;PJC9TJuRzw`KS1)=aUwCH7@Mt(elvcZ)Q8V((sOPxj8cey`32>ss-aeU{sNap8yn<@?zkvuL87o z+UpyE{wsW2*W5qSlUD&x{necTJO2q_`9mi==(u&j&-mpZWT2?Qw5|3qi-FA+0YL5r z9!4EvfJ=QZ&cI&7+k5ZY>KQO?I!ad}k6x@?rIh_^CL9juyOlpv2C&M3(NW5$Y>kK; zFJwl(y4W5{DNG9($#GIi3u#!{+?Rc<2)_RNZ{uG&9kmRLPi{{8Y*H(aDvoaMxn`u| z%&9l}A?YaoFLMWn2NAGiOesEX>o>ydJ|x9$7pSlxJ{R@cudD>Y6LSmS-Mu8;EAO^w zZn0{4aS5b_%<(EP8F6BXZ*(s#Z4&SM__0GTWT=d$(;>^W5A^mfe3u6#baV_4TyG(Y zh^g{wn4=%KtTH z%7xcZHk_q$@8MhdRYYp`uG!*rnj*1h6taStg|YHaMcglVm^lou`UQzeLVr=F2h;8i`J_hX6$awPw# zy1sHGf}qrW&V)~yT$g#3D(38aMy#K^)_j0=UraG7FlMdUMd#z2y9QM$9`A08*gm$y zBi^;SIo`}!6=GTNy1#<|tsoTQ&(|&r(adezYkE~d_`}JhS2}Dnh&FilJiA3@a0Vgl z`f+sk+Lpxay{YrAvDcQRB6{L~ZKgb@7>~=(T)*C3tZiV#bDgR#S$}SytTwD|#56Yl zyG@;$57qa(OJ0<#b=nVFYuTPdQdSigy+5Uy9Mic}Cj?&xqQGo<9^-r+5;fbAlsZ@Nl82i8?D~}bFqgaxN$xw!!SA|)R}rx@ zKHb56f+)Yi8`Q1Md+Cfa&Od%}h1PyoC~drlhc^CfH*Ipfd6L~ht}K;Al{4(ny$EV^8Uf!w^S;Km-WVUx4PI zcj5mP{=aWDVrPlP1W5$qJVYc#3(cK_V4uGb+VOIEU1b{ZNC-Z6UsVdV5)mfGcWmn{ zi{D!Xr+?1W%HA(Gc{CSo@#3ZA_kSVu%1h z{eNEr8;;8eM2LHFukYq2&sL}`74dq1NE~8vHM;um;@aP7+WKaZn=(S}0KJNDx;p{X z4+Z)~*VV$iB>w_bDvZc`7a)oVfZP1F%uRV=}#|l_G)v*zW?w zFk}~6VgQgykEoZ{JbvK!x*?L06QJN6O{@QMKE*PXp1dXkZ?H0gQp7F))d^Vb zUC9?1b(X`PD*7jikrUzub|tzpex{G-x`{p`=WPgKuri2dh(NU7x0Xdl{TyLfrU%Xj z|HPqwudbU6@k9v2*mtvoMyxGelS+JVHl9jM0l-7N7Wk{h&> zjP_oH_SUp8y7YRdBQx)E9Yb6c=8H`wk0D+Ix50m%el6(8Jn>ey0$Lae5oA7*)ez_4 zcN{_nA7O4JLGTHn@`tX7(1Snl(8!v`!Q8g#z}wug%s{V2R;NHw9}l2sTxy0qq1M9?LoR3O1g=Ab z^8jmt4!H|J{Uu;H2)rV40M;Wn_yNw>2(iQeO$S){HOc3pS7w6vz8jI+;D11X^_^dX zd>*0>z=o|aN?L*9_XE8DK%}-9BncvnKoofJ$FGtT-Kb#M41p0PvJgE&#@ND}rG5Tj zo6;d?1A2G{3}S0St-}nA=e6SP<5KNfbq&%WklUaa)$sV^;Qqh3<{6A@myJ`uwd#QO)pwh-!o9 zV%1?(=-)0p7aK-^nj{^Wn@*yZU|Il5t!7gbU=E@HrRIpi^zQ{xfKs!j=wj)ja7;aL z5_J(eNRLUu)&ykHON9uB=a|HuAc_J}c&7+rDL5z)77>a33XA1G6q@5dO#cyRbrrmr zKp;B^gQy|+Ki=wogJ)p{!NTMQK`$`Z@V)wXilV0I4?bUrD^ZC=(kD~>0NR1YQk@qH zz(QQcB{B!RXDhhNkvZmyNI4A9Q<%etk;rhU>jBK+@tOeobS{G{XNRRaeLCSo)r-d@ z(x;R96!lJr+(@5J35bbF*0rHer{@6c-OD2M>2!dr*fmiUeLBCvxX(o*>CFr9kU>+>@2 z{~L@%hA#dk6rYV1688|?Hk2O5C};cGxJ-+&H}Xzft#|QB6hPzGy%cu?s2| znxX`x2}p@lDTSLRpNrv%uIsN@DL{HqLEkzLnCAdQWHkJsK$*4J0< zQO(Mla~sDipjz2hbFdzJ+$)|oO?)xyvBOPK7UhA*?&IUVwp!_>+;L1-2=@IT+FJ}& zpr4{VRUgsEF!CyUh*Uzcf+agcW&Mj2YWIshDEXkpeAQZWprgd<+>`cd%AjSqMv6CB zk&3*T=?mY-y^wProS3EUMBJ=F` za}O5+UGKISkJ9HqYTzZ`+g@vNS8kQ+>PuFSn>mC(UTweU(Q3Q6$tBA_!3su<7k`Ca zSPA=9Q68m~6?$HwW$2xICQ4p&q&~P2w#4Cao{<1yWU2;ULu^t&-L0;^=XO&EAU?EYk9a(_nw(LV_O6VbR~eI-yvcNFEtEl)O1AOtXn3;_oEn~J>*zS3 z36)G%5Fl!=eK6GoBMYtGK9IFK1%) zeeYDbTIOH(hg|7rD<)_foUT!ahN90RXf zk+}F=m%iGT>CMANZF@Ws{+f|k8TSJDh`S=#G+fkR8c-!Z>j-^#=ruc>s^x%MY+G9v zP(hsvR9$shG!}kR-;vmv-{UojLIN}cwgC;m_WG*i73!VEWhK$`Jh4ac5A{g3io$hI zKWnrXYCXXv2{kk3EQvFbnE6{jXNHpYg_^bWdeh>X#bHUDDOReXts=$J0q-vDoS)m5 zm(iqmq+^rKqjAPeQk%zJavG=mg!_Ym9_3Za53BqvTEq{Vhm+goG$T<4XuGGgCjvv5 zZSxaVWgirI>kaxTUwL+Wsi&66ISf2r`pS?me#W@ET$AicHV@a{`T39Rp(7$?0s3jV z#pQ#!_r?R*YW)|-8%laNEl&BmPhFMvx^DE=TGur;FIjRg@+wCBeliYIU-2dIO0)qG zmWGyK>)3JseY}ALZslcjQ0AKRa0nW`$h+m9G?|e4qp!AaEu_w=KBP@T&5Kcth|v>4 z^H8^5S3I!TIZfn^Hztgu&-A5oH6GB^jXbG_RBA^>g;?p{5aI8fuU}SBns_hl2nvTP zIHjYn5A*&4&w&$v8v@m|R+udnQpaLv_D{z=E|CIK5MShHnAx)3o_txF8%GxZ`GYYR#4m_`K8IjRYqUdE@r%^Y;k!Z;Q1z0;ksGg zEnc99a+6g~9T+zg*pXszQG$8NG~bRkAkDO9?$fvE*lRT)yDo?Iv-nWEr`5v%jHUw62`ID(*^n$2!hihZN|Vp)!MX>b+*1gtn3WtG4+ZE zwy1*OzxmLu`cng6{>oEQo~E_Go_I?acdK8Kr5^gs+Qkfe;Vh$vY|ku**^Z_d59exGz&Yw4xI>;S~- zcuTJlc_$voTSzprQbi%7Y+yMi>Zsg5neQC+GmQ~15|6Wy(z{Yi&R{9iK;X!ej?TQtqQmwCVw2rXq+tRJ}(>WqOMjK z#ZQwd?6_^ehQG60D@v$kWp%Dt*nEh|d339zqb{?;P{NVqWET3N=fVR#Uz=?`yN6|A z)3eSu*t4qxD<0+tPU8GJX}4!JVDY3oPIH{3`z=wt#E&`Pk_X0B@+s!K))&@1&ANM# zUfErIqo+ibFvcFoFMq%Kq)rE2=2-&QEINfp8c6Wm>8sfCrjsmphIN(5poPQ(zV{)EG;3$ z;jL?2)Z*S?y4S5Ml%5>WKUr{0OjcVWt7p_%cTtITMMw?Vyw|0K&f@jD?2olF&U4VihkiJF|6vS73`r;$ zJjgFzx-rPpa6n(4`9v}P+n+-YCFvtPuYeW&+6^IbSH}61@;Hbag}}4*=jqvBRNwG z`#^U{ztR)GIyN#fy%pYlSD;q!6~!IyO0YIla=ku{C`s#HA=B1tbl%a-;wmOUU&Nt~ zs{vF{b0TDqn!X#I_qCQRBs>1_T6nfA?0^aoJpRmabzQ`#YQIsojc%U{|J|o_Fa$uA zG4%{%dsWMlaEfBmnUwf@BYjmthEDSrSm-BhF`i_XA1Ah{)mjvK)|=rI&*Tbh1+30^ z9J=ph^%n*GQnV4qplZMwvjvyl)*%~TxpgmqrKG*sGl6;Q{8dN6YCS++x@XK~@}RbR zTA6Ir?$8F6)lUz&%4u9nyyVLBC#nHFKnUWGafu%tez+yV6qV3hNj~`CM&1+s0i3S@N4Wfn^ zpprZXINk$l3XL}c0)@u7%29#aZr&<{k2-zqgW`RH-ospgn|%8T-3J1UxP}0oL^ui?TQy$@0;RLZ*g-er9;X+(br=_d z3OiU$Vlk-Kv3A3GqdW(Jk15%ekO+!9!HvjM@Y{Vu5!?8vj35<)Y{1~!0+lv$TuCtUF2H1gCglvAVG4Ba z&Mf;?!5Ol7koIC;=XBrRC-8oPD_L2jU4E@g~UXNkh}mtk9JBmemwOQ=3L4>v4ku3JYxW2nN^+`dI;L_u&#* z$|;9Gqa&f_%MxMmnBy{s5OSnWRO6|q9&+oL?tJ-m#Kr@Uq@;4r*R52MuFKs1mg z=5iWMRAKd`&ys^c$3SupBp}(N8Wvw*P^-B6ag%M#r?T*QdQ&=yxxiq;~tsTTPB-e#%j6q~}@ z0W;*A>;XmH!j}t%aUT^jIDMW$2WaBy_&ymNZHJxBi>U4U_%79pBp*VH&n_AkBq6t) z66{$qb~SOJ3FN$_xg7+k3Vs*%B^)CJTG%*_l1 z+{cSKdY_2edu!gi4wWJ!qukI{y`)ZdE*a>IDAL%A@Bgsmt(@PzDpG6O;G$1+|E$QX ztnji-HsuUW#0atypGR?dU3WwRk-~8iU8{XPSBZN)tHa%vL@^|_TGK}Zh?_HS>J3k0 zR7S5EGZs{-hMP?w79;dJ6?lZ;^JR6Xs!_>iB{z*IQr+|V>!gSu{D}sJeP3$Nb3{b@ zJe|So@~#K{^;YaZJiyV2jQCZcQ*rvzICuJ_!?XJMB-D1JQ&omtvN0V;3MRV!AVo3l zK8i~$6T{U#M_!FG-O}BTze#VmS`Pa{%5Y*v%x7+X{VJ<7g%3?pzEvhq6Zs2CIKZc5 zmW$!i4&R6q;J8)kmZphRu=G9xmW~I?;g=yQSolxsuA6#B?XMe9{rV@4j&B9%g zLY~Y9^(lcrMPDj$7*u1wywPg?gJAkk#gW>W-ti&jTNg5Do>eS0nm%8o~>?@R=pRH?FFa28lxwNY=P1%)e8d8@8HL>vuS(KcCfyet2ap6bD!rDAY zJ>j)3=f3Mlv=yLCmEIj}p){XT;8hZ%9}%0;3H`YXYcAN?cwv$l+yM`y*hMn6b{=`L z>y}7fOxxR--$D9LeG<`(%dIhDX|8X)&HBqKdDZ1jMVeO~_;6m1_gt;5O;&C$r{n6p z)-h^9q$}Lp_jyWF#A|iFGm=eK6EdY6dT%d#xrMK&5sGfaWo0s)&qEC?^$L-&Z{bPu zl#PJabRnW_C`0OAI6V($JQpp&t+QV@Dgpbv9D`)pd^W8vb7Y~4tif9)FB6kZXx_Ys z&>O39z$;V>UzX+;pn&+ZbYk036EX^BF#?!=2}O*fn^=DXG=zVS1cYAJig!=YOBUuE z)`~ar68V-R05K##M8zrvxy z>qGq~ZU!0Z3Z7+_G#O;HM8DElb!xpRkXjqdWYvA)^+vpyC$nnaum|^FCYM?f}s zzt=_w;-FLYzLiz;E-r_@u5A>KS(&zPiRG-m>{)4Y@@>GfAaVkg*peHsZ2b97iA{>T zh;;^5s+}Cr#X19|^?bC;0fE4d03uZ1<3AhY0=1uK{}50E!%@sW@yFB0FV#X}BqYyXFh4)9RF__iS&2kA-;q4|vyz`o^G z+$Gkq9zqhjL~Ns1kl^uob!9`z?IkGReWPy_#3iBWxXwn?U?sM5$}IQ|;x0U+k%bLa zI4Fw5vN1NA8bgugyzWL*P@4MiZi;nTEWt7D){V0Qp` zBtP|A;@pY&K&kq#GUeS+C53_1f0d_=YmAi%)V$0Gj<$eD_*dG@uyQ1n-T!yLHcFrx zxI#^BRuk9@u$rJ>#S4yF`ET-?w%#6Vj_p!Kh>kAOdR=z2eH#I-xT&KEy?4#O{?Ce| z4JhA0otTDG-p6Y|*3B205u|@-o=rN!W-G!~$$`Da3%Q6BI;7^f^wF`jL&{a8+wurI zLO_?GVcfeF@7Wb3h`3iGkJS5f7w@S$DIq4a1#uj9g_h|$2v*fX?;`Uk`#d}16l@yb zO&`SQ^0I0nu`22I2%k+|N!zCs)r&)2MX!8y6RNYbAgs@ zPf<{9F!9`1OF1P{DiAV8rCQA#iWsev+@-YkyVeDX`9?J@8Th8q8(n*Klg0_%@n^3b znU{&oj&g%XJFQ-xG0=LaHEkhVpcEsdc-`>G1R?=hCW{{Xp8mC~bkZ#lF>LsTU4Gcv!3+)NO*yi`;)mzHbDHCvyJuakC07AKA8HentIcxEEu#0 z%-lNxRyS;8*yPtcj_G%N@pqp+L10|!nCJJv6N!RFSu8)q5zT2ZC+8(5m5O)!{tgGSgUqSKQoWaQv?c7MI&ar0umu!IQbrPe~vnc!k; zVTM#X%7<|uPuPA_Av-T;wvO$K%L9t53CVblPRC(4>lVyVf4I?vkaoYezd_`Hv~{2k zC3cW8#TuwOSxrrWs2Qvt8o7wPB-?X)`OsnOi)pBo?yji;_XCO%xpEkZsh&)|_M3LA z)^^MHOY4b03_c<3r6KCuDU#km%eS8##HDzJN2yJHs7$e0E0#c|b(arr3b;Bnks%*t z`7=l7?wc9pHmZ--_Gp9sh%gAnwpe3+*P;eI(@Gn*uP2Ye>=I=tt@N%tv%B`zbO+S ziPP)tw%pvAdI5;Zqj@gJ$uR4%1anV}irG!Fe=nFPY}yxRGDwPYmrOX#bSI}Bmr{BZ z_ke9A2>O1)B(1+sa>4u%qU7f~x9#|7INVWh^xBJ6tWal|z2CjQ|i%7f6q*%589`}iMA zYFIx(3eg7&ZSacsp^)}PBpLs?XbhE8^gu*|fkaw2srh1)FduedWgT-VbK#21RK zR%WSpmhP`uH2q*wqqT$Gzp~F$l0}jgdMj>4B}ZsXFH~hPPF9fHrF#VKVb~C^kUCh5 zYS5F8tTa_WQs@|X9-))WE-m}gIKKAkF$-^kg$*=<8nEKH)9u6ktDk0MUe|1q^X?DK z%*{n0RNikN=-0)&V5^P3P9*3iTm7i&b75>LreO)DRQy^N%E#PAS2xU0VSKMHd2tt_W0vWu<`Uv73cHuZ_3WiL zfa*CCO^B;Gs`}m7gT#5juqJdS{K#nLiI>s2zRJPRSp?(ry>z01?Bgy>R9c zIo|17_sSYGDE~o{tcl~#>FIaYmD=>C_{$`2#8z!WYHXjNh<{sTif0U8pyrb%nN0`{ zv}nOplDu z@<-+oJr7=HMi9S)GZv!wutU?A!1yIjsTb%Upg4_4y-YYQWTiliT$DCh@^*~&7votz z<(+Rn9sm92;m7AaSc1Q}MqS8l>bE%F$un=8{#|$EP~r!AVLD0f2I-jiXmJP6i1d4J z<(Cb?>fqgaW9-fLpPY!1pzQ;lIPCVmlsf#H#fkFIqV&Sjz|Ml4#@O9~mX5vl2%$iw zN1aPmr5)_$&N|{wW~+x z0>^cJINQ=3qVCtBq}dRV&BEny(`PR@5e(dzoDEOnn)KN~>UaZlcoc$!ygK4^s`J{+ z5c9Hiw(f`CaQOlY=4}Q2Q1zhe;wZ)~56$b3{ZOCpu9TGNUwtW4#LAYGN1S5zbxh{I z_T){H+=Kv5X`78~9zP`zP{}y_a7TfWe~3i#(8_wzdhWu^%o*PJblmP_)$n7OgPZk# zDpk#3DP1!PnB$u@mDWq-@w6&W5+k*FyF_7|aVFv5eWoTNR*02gwN7s_UUW3J_Ni5< z!l%a^x-KMg<68BLW1pS3B3vLH9p*ATD)lueg4ET*pj9odSP*jZpb|y( zXc;x!0CY%_(W5%YK5OL*mpWa+m5WfL;&^i&5D@k!9N06BoOR6AFgw-AgkJU7bh+=c zz3&)S$d&sT-|N5H=`0wWMrTKKYe>kB3?(M(8guGeZ8-=g2*mQ>GgndR^+Wu~;a)4T zFj~gS;zlNf&`w15zN<5Kn32IbH}kS!a^EZ+b{#&o|ICfYyHg6hHd~EFoiCWkgDY*x zmfF}%USu}Q)!SKqn&9{qN<87AFWXCsGw~RhtYMS{lzmeW^!}M!?1t)14De?_nE&64 zuUFj{6)y*jS^r#fW{B#%C-XaLw(@q~(u}*gi4mkXh8q?czI&yT-6n59+61fF)WA(L zxTp5gJmqO&3E{9oS4-yF>grPF<_$j_sX!jx4wcY8FN-%y8+>^m?Kref=n2F$(4k?? z{EdO=+)MiAojl6#-=N116TXq;I=2m??;6C~yT#ZMIysd81|$9_YWv>-Ql|+#uHRERF> z9l0j0d&gxmyF8`3ed<(w=KYDa%L9Y3f*Rdt-fl9Dp$88Y{h?~+WtAWJ&s_yZrBYr@ zjw0S6Su$VgzN*|UW20LGX+G9+gJs1{{=%)7LUZR=$uX#sOq;lfS|dP|0k|z|82!_F zy+x&fNwdPPGozciJ~Atcto^b>JY^i00V&F|S1*$#<`G?n1qWShtTFwy&yo1f!#cO` z*!$)Eb^Q#w#8R*Pa|r#o_t?|-Jd;+_#BCC0FKsFYTBQ=bJ)Jb}MvunlAl44g_lz8^ zD4e0dw&yNHXs?Eq%@v{uM~Lm2)+MT_DDjf1dH5MY$ydXX`5xOBGEUaP?&f>dW|T8l zg}H6=$S=HSB+af01%razzskinOsS)7`ij$%N}QZDmb{hCrxZ#8KNpU_rDGYoD&|9x zjOfSYtWdes>(<)J&Mw`Dg?}twva;QdFq`#rUH((aGhk}iKsnOu`q&0fs^qcm@v!bD zKQt*+sxPB78Me!Z&I$8VqmGy!F3bt9Gma4GPjj)LzcDd9U;nTuYGlxVpoV9FZJ8Ay zDLQa>O)BOdBPy}QNZQj zQuO$GmkN0ZtyWlj$+{7z!cHFAUc}Y^yGTpLZ|^tjRU@~MtsDq$;8=_;udQT@upu1( z^I@#kEJ;WRlpF3Yp8Q|1+QsWiY1Fj*dBlZ(U|$s~M|I?CKgx^V#W4L&tr5HfMbV+r zV^O6?q^H-`15=Js!>2lqaG{j%7g{X0>?9f+s-?vg%yR`?bXdlVvk_LFti#qC2G2g6wzRXcCh07N+#JD4Ou>)~E!s$YxLElz#I>j zykQoU=%{da+I_8jH^u0HAT<&l*KfR&^N{rwdXNJ62}MqsDhFT%!_Ii%Lixk5%d@k%H(~oFgAV zN@cM^OUL&6O@gsIYZIynM2Pf+oD}(c;VsE2GTHWSYs5K)JmUL|@ z4IG88wSG+s5TkiA2=Y1y{^1dZYm4j)%Lx(0{{(s1D9U>7lH;!Y*X72l1|$aWRsi54 z^x9k>xKVsa1KUj3EzsVMwdAbaw&eQ_$`%f;k04qxdg47oDW+7Es=0|ipf}!jUya&6 z6m(zO-*PLsB4`+sbyn(irg-ZjNGZXaGT#=J_cf2mtt2(U#{%Qqrrq#mv7R4YFTa~_ zP1K3|8uO_NCh&KI?HPOf!8~m1E{h~dL;-6U7vJFL!qe&F+Cr`#79F1Z2TQL6E^cWv zV&$62N~=@$s#pvDjMq+TKc1t0J!ti)FMmY0zMFGP{yr@N>?S zKvj{sLv;(cxcu2eu=cFDM6 zT;gYm{z9VCbS7$hP(-c44%VW8$T_!ck|v)O+GYe{+V)2cv3O#=+E-+L;ZFgPf=fxnbw_WQrkMjKM)-<+yw5LrOh3n*3@#^GnwTm zuCShm%QxsR@qFo5q**Ek8i^3$CwB+V^Kc&P@VeH>#`=I7e!AH#hd*bl_&2W`a&1C1 z%oZ3Oci3OIvlKMl5%$GHt+!HIMdkvtTii1jPR6Rky=hAd(ee~bVRs++@ZVe>fXg5h z_&in@{ODHq-U!Mp!Gn5o(AMVZy|Eh>)tkHN!dpyXZ4;{Tebh)5tSsacDEDbd*$dz0 z1{NUXAgE{{P`dTr1bg;J-erJ|M^rg_&t@<(nRD7aC%rzmGWXKyPLakgCm@*EQ<8?6 z5x7cCVb*FsE7 zCZ(;~;-1$tr;gnB*Y8LS_byvrVSHGpZY~{}po+pqv3Qq!eUP0yWgpJq@8(&n#Jry1 z0d+I)Lco{SZ$=19A8vm5$lVdVLYCR6Do9J>zZsid3?I8>1 zzS}eEgw9Fx-;1TBezz3r#G4MIJ-dr-d&RWh8Q9h5nppD|1+P~SP?5Fr(%$#YNSV^; zAe+f6y=A=KoJ4QZAZpliXOwx2w5NBrT{4DNve((QfbO}C`Rau8&GYJJ0V&2u4Brr? zt(=Xa@@0SF9qC4W3WFwbpZ+(v05eRGN55ZXT2W40tB?(X{4$-Sy!(9({&Y;7l-toH z%^1p%R}CsyS*_qV8T}MYS2xR(3K*Tx{4v&`ALl{<8)Jmhx1JO#O`?CTK{DRtbJW7| zigdRF)xPp}x>XACPwC}Nm8V6pg~pId)A6#|$ysfo#w5K}a9Qbb`nu>=%x@APm`1)W zCs8=~rm%Yl>?-=g3a~oed6WW2W?7y~T_6_IJ%1}t&BM_uW<7nX|NQBIZtve}Mb`dg zilziL@wMk`fQ}$4hl#>Z@70yAqZH({Yl+TD43Dv@)f!$hikUEXMlQNNn@)drCH&TM zVyp8L${EapTP=S~TkAljTwqN|+o~|i-_~(!I6z1cmB#i{^A<0ovA3JV2LV87U9S=^ zu9{VMKOWgdU#~@J$aufWbM9UhwsKzab)u|lsg>+%Xdl4~;{4ZN4HtEzo*Nj_2e4!B z0kMzRl`yxm()L{hCoS#x;F=hU*Zgni^6C=F<@4s=Licz9e*2kSDT@hfuPjfxj!0;} zrO{1tg#>&$C)W6hF^OLBeVku+K`mEhYpN3YP|AJpcT-Ekf!N>O@)TpNFEiMgEclV` z?{Y6Yo{Z0Jcs?;*wT&s{sqR4zX&b8Hp)HyBPT97tXe1UE6|5>d2VO5KqTcW?myhqA zs%2FA!jm^JdO|n)T2~}PB{a_->JE{ohD#-@(96A1E6L#0B*8SoCizi0uI)_E7iZ430p%m(3Zd)U(xJ0f^;KAgfG85X>qB~*?~Be* ztlOpb=YIOh34_@*1m`WOK9~v?Q0?4j6JC~1EOe~Cnz2YmvEB1O+zjR4O<#EN3N5L* z`nx9nBF6?Nwf-&yUT$QPxQ?2FlIb#2y+&j*iUfNhZE9&ffNIMwmh!SFv;i zHd66^1#D)N`bE7*4~#sc7hj{$S3{~B$cJQlXIPW@PeRcXFP!Q6>5ol{k0%9X)J^@{ zo%9{gsvtUdp`*HP?sQz*`y9k7?v~j%S73tsA62N&GIy|f!JNChRzFrBXXv;4d3?SJpz3x5c#Z3;%gClzG(!>7BIaDmp}8mC==-tSQ0aJsk08~| z@Qifi>#XLVPg&(>e@HUJd!oGA*9^w@s82MW(A4#L@TF#Gc!}#NFgKc)#DyDQ*2yv) zS2+-ehpT{Aw<_xf%@K7l$ftS#SY5D8Zk^QF;E4fn3gN&nwL$#KVilor8&opzH(;k5#6&-}a9_2DF?#+A|w`uvb7x^Sya;!^|5|I4NGj`&Bn zV{z%)%Qre_ap|Uj^u(gIT>vg!CjeVmq&*gwP9XaqBM`J}a?S$A8TPuY9fMm07v_#ep- zpnG9|yf&IZozjP0;QB@rfI;;A(EN7?;2l&N_TA`FkD$aB?D1ryIh1^So(q_4G~W#1 zf~}UbKw63g1{m<_MBoIv^Tt;4?TaLs?f31q9&LMZs~vFZxJS9|uiMRv?^u z6+Z#~6MY9A*q`W$upC~LV8~+CLtxW`@Bn;AxGYKw7;YdXcjv3lYZh+~9_02gt+Vi` zW#RZ0{5}vB76Kdu6a40(t9v;36AYMOR6^h$2D~~c7=RFPf>R=7bHVP#M}B}qPedPB zWso^7ZWhBC!dWAMH}|l5=o0iQ0#Q4(1jh#-K*a5i(|HDGorX$RK$w;&9EXA_@ai~# zf7J*7`6x^aT$2#+n^&@2ELt28HDk`0;FC>X%u6S_iYEK68^K?eXSc@=LT2^1}mwi&oCLV)|m zvUrLO<6=dIhk>Sb%<>#OO_t*6R`5|`jS4uhX4gbmPZK^{4WVkk1h^IeUA{Vpw+B0A z4Y;xA^hAt+U$zJMOe+BN1wtd#L5l0bxU%7C0cgBWx7zOr3>&a8_D3vBCZPlPU~~Ls zk8Xty;Cr==)!E8o-a&dPQH+Njt^l$Neb=k~zzJgmSjumC&K`yZFJTV&O0l?yN8rzA zvL8V*af42$i*MxnN2mJ>y3*j-#RB=91be5e|LAm?qC98d)nn1=PKI%H!?;*HVAb}0))@hEAPZebs7y`HB8;Cy| zRQqwlX#c`!_gR*h!!^DJ7%TyNX`nAluwnvJCm@BgOreCsxIE$b6+k3M$@;R~tbpje za7`o?L}u+lLW)0(>o|-BsIG6pDoF$`2B@wqt{glhpt`bna;)bGRM*tA_+r-c+*HQ( z1-NSP?g7=+_f|EBE(`^zuJ`b=zQ^GFKy{VS6VZduObE)gAUInN=Z6H(5*W>dLstrb z%?1GJE%;xsw`93GZHNB_ks}tjOA1bVN0{a6)B;@vmT{rMRq6no73EO!8E8;oUvuvq zVcMvg!xp}I0F9^s`%MU%nhy9OuZ!$?4aWhe&S%Na2K}3Ws_-?DJs|rH5Sz$vGy8zY zxVlU)N@v&>W@Fbzd3lO4AXdIkx&Z76TAm>xYnu~mBBg??a^{}Qc*t7Xwf;DPdW56#)jAuX@qpQCN?kUZ$cz(s{#BKu3@@s@Y+QhpO zxEa$%|KAPjKN&9uPf)R;>%bZsD6zIyQvXYcc$+>Ch!9%ATvkHiuX1yd(fQqi2A+YK z1w%8!@7$tO#pjo(F}pJFh?w@aQ&ZG?aj1u83n=2BuSL$2&pAbA0zOjm{px0aguxUQ@rQZ2X6x_6-_kbyDgJ;%2Q7ag{V_~8YpkG zOm!9#stbQ@5+)gV7tVi8u><+iVjj4NCQW?)LhVDUwysDu{sz4it7p2BP@8EJT-5h= zTk_bl{-SYz%JOeJA1<&kwp5{wjL#<{*U%h~D`!zFuyg*U#;Wz5q&&cfEk z@qI_)JGXW7B1vnQ;R=8OSm)u0=Q}m}R^;nauefN;^d-fa;0t}rJgnKRV_`^Et7zId zJNwr~$`U4da@YK?EsMKya=V?tB5x!_>EnI06fUJa;DOmfuIr|*BQAqIGugvE`q^3Y zG2V~oxL6_=N!LYEv;|VG-Kt+A3#cAi-z?zjvaK+=tW_DNZufy_=RW+k#MdH&t*K^7 zs@pv$(+A66t4Mp&^b}1?C`-*&-z9qvp|G^L?9AK*ddk#1jU0NAL}4F|ms(i7K_6z! zfX8SGYI5|ajt>sb$O(Q=fC5jg0y%yDJ;)3I4Z=hN9(sNcM0xJP8VSPnA9sRX6MPvR zCv+czsHrp1a^G+xFU0HkY=R!(D*>tFK3Fb+By|ox4JDYCGcFSLgn^wC+})jfkr%$` z_&kHA;$>XmcowS!O*tISNmO%ugOv$12DV)o~55^Jp%QaIPe%F6Ew zyt;yas@>wO3I)#$f|C^o^C;yy^p!%haCZ%>w6Imh7FKo)dHjr-D+wf?Y}p^DJ6C~*|=Qs|~8TwDPDF-`)oI}Y&QA;o0# znB+(xxi@WZ*zd;slBeM1-EZ!n^dG#OyxR|m>IIW}w&c>9<2vuVRwxv^rw(}kb-zMF z?`C^Nhmwn7BZZOTc5BR+f)B0-tV4+w1e6{V!vIOdL(jQ75C9-YPD34EIu29TfIKFC~(je}- z)pCSzU)N~mjY;Q4kKJBjziJC6WTSL@XyZd2_FgJ1UdS!*t$ZU`;>t8u%X&$AzE#{S zU9zHdKu+gBN#n6!VIX4ACp+Lb8%C0{4p~pk*xkUVA{KQ5Cr`Uu__y21bR0a9G>27$ zPhjz7I6=&NoqRvS8_JVt*w@(ajBmOQ{%>~(lS!)69zTnhnkE_Ut+M9i%Eg}Otcs{8 zdMdq4pi}uvopC{~ef_rPz!R-<*pz0EuT_^SYh3qSc}A_=^q10WaOJv)@vpvl^wVBgD?qS9?sALA&zqX)TaJUKXlg)ucmgIY~JGhIC*UFH)o=bzK6@L-B*pA|L{_aPGED>bK{7v1Ep`% zv+`;z`#N!6)qbATWs7lx^ir{Sb^lV&m+_p^qQCl?L1+2+cdAcVW@Yl^8c`ig-Exmw zDyEjyWM;l9Qu=_G+%W45`3V6w!GZk z8X`;lxm-pu;VC)1IGY`O*K=(-vt?7M@F0iNswZ~c6RU6X5T;YHYQN9*{0atM_6mv3 zUdJxFUIDH=oi4}R=Dd*C){l+n@nlNAAqE(H+7X|96?@lQ?%`xr!y{Bwu6*4M=|623 zFH%~2@8d$0j-JKacl;5E^lHkA@m^cvy4h+(96C~ST;})Ve!a?RkL7ssr_kX-?|Rft zOJ!fU{o`R=@!`uuUHgt!5cCW%Ub4r_X6KSD?>-Tw7-ld=VUJ9w!s#oDvuL{+y|d^TXc|;c%#=>TRD1lS+{$=!T67;bzoN z7O`tew_N<3U!%Ggxq1oDA)`8x%J)zvujlWaWT(3)qq&>wHqG#-^6#<30=E{eVeLyTR#lC*?yX>ABu*gcvR!h6he5U^GquL?=tdRIeNy!USSwTFtC*uNm zkDAu2o_^r6nXx=?JwK2Yz*V1wuGirn$Brxr`BQEmN8WA-I^*)|Z#eI2hUS68M5TW9 z>f^84O-3gK_)DJ~o-gG&UlF04SF<_J#n}aU8I3i2Hath%o<}z1cY0bM5Fd0aYd%3F zW(lAFr8NEJ*Sl-SB?Pr}-nV(4wEl)pjn^;Q&=8fom5^v0j-PEUKxA zZ}^&j&^Rw~QNh?GVcfiyOW2aT%(1~SOWbqHrlXtD^XsV{lVc{%dTfMp{LCq;y{4Jv z?WoD%Q-gbxg47R%;4OB&AYOTL&uZm5YNfKKduCV$H^;HR%PXZ~{AK5JopsM+NdgcS zs|i}p?lLU8f*q^Q*NpIRr{yoP5%R9mcynw24u_2{1!OsSc8ubT4PA_N(2e-Aoy?d! z(WTt}vdK?6#=`V>MeWeP6tm`Vz7+N~VOs06(ggY7x`PrSh%ZQ`lk218nxSCs8a#FM zx$zC^)Dbyl@gTj!W4I)9enWk}7}XQh(6Tqxy^=A^pV(9t*U*?g5yQ;JYJv5!H$4pn zPo+Bd`^zBh;&pn=T?E~i^A=`)uX0V@ScoJ!4(a?(y4)~nc9`FNx!jst@9gRfGH>yz z=>loYn3wn~Z2D@%*qJkG7E7DgpA3xhIU{pQp9UD#JU({qYl?~2rHtOQg!~V(T%uM7 z=SERhx~Br)jv8DV<(f6lf3cc-F=%Ccu*Q&DT=H|c-tu`-(po?-64m5?o115@zp?K0 zgT1n9h013gP{@bIM$4`pEjd377YFsFeX{b(MA#>62(9xb)yIFzTzgqS{h-`P#5P>G zVtQd|`L3m!s?3?y!WRP3{Xg8kN|})QG9LM~7)fbfR4Yn+ZvXn8bH$%&Y9q(uBWqWk z!q3w8hCd5ltWGCuJ9(9nti^DtU(JtSnfx=gi&K5EjKC%JYq08)yUL|@V+KK38>Ovs z#@|r0L(hnyhVQP6jvJL8bdhcfUmBeX9!>Xhv~0d@#r!*p9=%m?$FhF(SAde6uw_$r zlIowbI&>4dmU!>MxcPfLpCOL_hln#s+{9ckSz_Tg%e&JolOp`L__QPq6%lz+>F z81Kt3@At~OG%C>$R^vI)ZN%Xgq2$JI$=&OEjy%^e7FM%XpJY7LqWU)}YWV~u3%*hd zWDxR-W#K;UfYTjkhQGVsGYUy;|KZs4d!=We?f#GGrbUHEHT<9ZBl2<$d281s7Svp% z6AY&l@-$EEiWz!xmwD{9HfM$Fv#v2?>3AL^gSn6KqBy=f>`K`D-~AU8u8CY#a*t`5 zRp1@1VM}2?bi%GRa@52T)EqV-h^iXFCgGK*fuvbtGa8jY&LtgMcH-hB2Vy#pk_|wn7QE>o~83c$#8;}9HzzNyyy_+%F}H4r%a^y#+=q(MPtoKo}fC> zq!G>)K5X=| z#)>;wmTB=|nfBeVgMQA-G+~@)y^1ueFO2tps@@(k6=RvAt}V)w6EKLB zOUI`0U-XUf3&zdlxh{pYXQP*++!vZ#wC_n;MVjCn2J&dtMF$qW@F!WODBeG{KWPXK z#I$Dq2%Ond(|nGmkOR7QS9SqA4_48c`%TG5!#3=4jdukyb7iwu*Ws>)jW_@R3} zt^8_E>GYW#)C2o!iwD2V`@+SncZ4LZb=>A6E)(^tU57sIHzck?KxP=m_1m~>-o6Lz;Zr~Q zbM)c-Pr<(te|LH5iDaBxx5qVV>WbFS;PhS^Pq%lbE#Dg$eLgDmy5&&)SDo6&^II>~ z@05Anb=tfY$*Z1dsXjKU?>IhrM>l`vF;zka>)@aD%t{OwXp~?*O<(is{f+Au=1%8& zkob=IsGcKn{8aRlsd-qnN@>)Qaf$?oh z7Kc_?98Y?8cSTFK`6_L6eX6hnrCn=kRId`Cf#1_RbyfF0PpyEKX^`vUsNwjjH);Lq zue(BCXuiTJHX;8Q^=|I%IAqLxc<259tlbEEO z5ZRAEj&Y)!I7PF*suwg8;_bpg%O`921iYOGF|&6oh-ET8$EK@wz7~b26!8$1+O9ul z4(?i<&yCiO9-a&MtaUL<6&t_^q>BechH=TPd1BG)o+2)<&?=Vm?`pj-t9So?8(}{* zRKTYm5Zkvm=$1=SQC_B={U*7FmmS8$&*&85jElq{Y2gx(5wK8lofXSgRIj&hDKF*K zkCD;`jZKrQL>0?lfkqa5Zp?El_Q`I^epdwp&ViCc#xbPCex0z7Xl`VXYNY7X^Mw3p zH8-PrC5rfa^7F?}Qx@CDT{fc(f@4H+KY!)lB+pAGcsAeyYsP5&LmcM1ru*<6LW;a1+%4sL#bu8Vj*O@g^do{jz5nR{N;N)M@)d9Y%i(~_6xd`C|E zbx~hUw^^BBgcajfTvKwl=_PP#=b8sFgEeDyzQt0&<3Uli>w^&Xw&8ButAk;w0AFE# zMxVi0hNxgGz2=v*;w$~b>Ovi^Ry;q|tj>@V%X{VnJ#Y6{HIaSm<3K6h#3WD$PJ3AOb2)LFr9Ex)dWIC6rJ@FQJ9@es?(Mcb@lI z>)!Re>#n=*TGziKd-lxi*|TTQ{(e6tNr;dwZ1ItApu;aZPY`_~+Zm@IHrSN3Wsswy z{(-w$#3tIWeO+&A5Ab?t?W5`M{|5_DCNj`yFd@v_!L1|KKBB2XDhzUSKdbvB`>*AN zvGE7IMmlDPvW(x+)wA-N4AkYbvimn>M4n>gc?+34kVI5UNiOatp7%w)XA}HyVNQmh zj>0i=jgu59c!rgN>%ujh=%jr5)IMORErHL8$w- zHouafZQivO_%Ddsje<(NZUCAc!ZB>%ICm62_`rrfRGj(22Lb!HSNL)EA4Px~gQ(%1 zg3|HX^=z1&HC3fsVugM92kmovz(mMqx&+xrvFQ#4OFyB|dkPk2D_YVNWxY)-+L!3V zlMvU9IcZ!zptg_3G{sy_^LN93!>zi581w+6CVkzHz%Ze_l~? zgyEC$;6VQ8MT6YaS+(D$T~mNqeAzcm-zFA!PoFYk#G`%bwo)CJi;$VjS$9{@{7sXA zSeY;4iGwm8%N^VEzQxD$^ne^)411!p{dSPve>G#UR_{U}-e|Vsr$rt&Ry-zD@J7Wl z`bInbSq6k@Q-lkC?XP|hDh!ujNcV^^-m`7?W-XEpE+ZzoUXgbXxHbRRx64c8fxLFe0Z$Kk6yqKpkzG!EI^3h#be-h{*VpzqoD3qA6we#8gHo~A1P&7Ov0=GtHk z_o0@XniIZMq0*pvG<7^ew4(g4<^1?TmzBpqNu|=aAKwdMQ^HgI+G)XwCq?fz(L2Y6 zo06WeR``8zycyfQePrUo4H)j-S|jhkCay_}oj>R9yj0~Oeb12uOBZYI4~rd(B{qaD zH-7dP!eF~Sm5$wm*S=bJYJT~y%h-5AS1vD!C+N!DRo$@*kw5#IU!SQ37d+NLGzeXq zrWA^*;9j>yR4q1D8>_W0Jm`b1FsllqPY0UR18+OJDz|}a(ZS>TJ8Ja zB3{&JxftjGf;`9b_}|1 zI14=mKx0rNrnnbINdU46G<5e_C~?rfq?FD!38qv?91~(+AHbajLY!M6&DR8HeJ*3W zGK7u+HMv~`=_B9?)&U-Wn=o1d@YqQKPkSm5UD61G`Y=)0sD1%-w#%=;Q}40#5zy2D z^K>K)763t0PPo&V3(fqwX=!N_MR>Id@ft-&Lgbg{gEHPF-|~(hs0dNbI!RcO8Zh~ORy9? zJepu_DuiMMxOe`&<*NCvwb?%&nFm3B43*;8=U1LTwg7)r?ER?oL&|Wn0a)4%8%+8o zY1b=o7oA_e>odu?e^9+9$?GQ%Ng>;Vtv8OGhP8M7Aui=Yc%0Ne+q(a2hV0sMXlsPQ zfPO)rE#fuz$ktJRwhp`4K@`BG@)JAbqZJBsrvo2HM`9JG7Aoxu(&NN3^Wtl2V)~OO zo2x_I-`m1afqwhD>$Y-5_a%u#r$;N#Ft%|O_}LH8{pTvf*}e#Ui8k1oxl~54HoUt7 zaLf?8+38_T+VK=q{iw)Bv4u@0$oBDf>3-=?gJ%Ont6JZ+9>e4&fzMow1l2FIARmYA z-@{iDsH7rgDvOT_I$MXosn%2%D;!~b8pKQ*M8fwZJg|j0atV!_ywzO${ED__(LhTJ zKvQaJw!mtQa#AHWJd&DM{g^t`+q6A>Z;kzOV#blmeHXwij%nsMRWg%T?&u~mUX;2z zzGf|IQAi^g1%{`tUJkQ2&-I>wIaX1|w&JEr+M%g^Tpeq=1;?wfnnA>_bll^8PTW~# zNH`~PpsQjWA?-tgw;{A}={DD_v0deC^Ioeo-WNI3^z<6xLh)OuYeTtc1ExU+qOj^t zHYfd6|E9jL?0rA!+>GA%wu$j?qTVBGsyqUfuofofPv2Ne{}P^YAyCN4d5#mVH{W-{ zo$>qU!a{$;aC!#C!O6a+on6GXY>F^?Bds&R?tbPXSyb2JK^L7s7PUGm^GnLKwusO> zyd!H%D`rcV>!d4EiC>yWZqr$kljGyA#@R1?`7q1fIA9G^@(o+=_Zh3NYvm9cqZ8AvV{Wd+*1 z9C((!0?aJsG6*cCK!Ll|Oy@Y8!&bcK|co+%9rSDT;QAsuW8V~U_rE(5(Ckd@wl6y z1Jw=ACjg7M1dpvDWVQF>S(4y*3#559NW~zefhb>C4x#tMMM0X^?hV!#UWhRQ;NeF@ z8-p1kBKWBeR&ro`2a?Fe*BO4C(3K9rfNx;<>B7p=0fM#%7=GcI#BX_akiV5vVH>$p7a_4Vyqs=n+Ul#o@L>Zzlm63Atg| z2BYMGX=Kgt^MSJfM&ss#J%Y$70a-OV7KhwTkiPJ(7((;Fz$1EKkhK8gAq1e)ZOZUd zf!oEh2Qw)Ty8>tnq%Ztpa33zhSuh*|;Nl0zkAggJ0JaBSJw5;kDtm{^200@{nW1M0 z{S>YM(Q`V_@I!7VqVyYrjYT{O&q2|6TsF)xL=-rdhDMf6M9;~I;fDw=0eraug)c#_ z5Qt8&5FdfJWDHD_h*;boxCxNZ^9>t9??Gw>RpR~lk`quXNMC$aA)SJS13_Hy)6f^; zeAxNk3B~pxe+0n96IyUMH01;kf_6GYtpKqB3Ih)qeUG6kBWQIaei}AP2<&*s5Bq=( zYK6sxevX9rCpJjgvlumE5dTCcKvdY$RT2M0TYz+&W4uTF6T|=Y6!QS2Nf?%f_$OY6 z3N%Pe|HL!YO|pqNrhnoQ0G*IAO#ej4@#``Ty-@iuurMK*{)q%>U(QC%cj%%qOu4+F z;Lp$)+(Y=@g9QPsFnwKrVPfGH?1w)J;`hK~;sfl|`CphI=-??p)ilwo9>F=aa9j>Q z4dadiV0i-uB;8Oq0Dx4lLISiGLgqxAp-#ZmeFduRYeT^XP~Z}{w%Ed}63!o8SI>co zr3CXexdF2oj#mQW%9-&VsT9x(u&0JCHL4pBWbcQ9$#4=EG{DX1gNcd41fan+QwEW8 z0rX1cJDfP2FAb=o9C{VFULxD)ZWR6v%zW5Bvv9@XK=~I1`4W^rE%*Q&zW~Q~;KgBH zUVbXQ0iY59eL!6R)Kq>rQ<5beXku0OnUXB_01q2}jOe6vvxDqRH)0=yf?e+>fnGl5 zPb?xvv&0mT0lZ`gbFD{ZgD%ZB#0Lp%SzrXFWtnO=&d}^W3R8^c9<&h7&XiZtgch!l znMy3;_ADJqjYwUWfeU2QVu*;YxB)TX-klFfUGf)*s8Gp9M9PH+M$7#nv$vK2Ap7*0 z*%|2V2jQE@S)f6U;@H}R6nzMKIuZ|=&B88f@@)n4!ax+v?bjpZEZ9Y{$dA5e%FY0$ zE?So<$OF45pIx7rvNEuX+I8XaKe99|IZVAB*hTr=uwjbXz%J@Ak;=RRKnMkVV0Hj@ zQ5znDj2v_Tc2PdrB23vC1RVHv{bJ$H1&p6Z`U({24F~O*R|tyVDChAxZhYkRS)>n`lL037Ot+$K;5 z(m$$*Lf9}vel{Br;ky{Iqni*V2Ol1ReTcIAhPGoDPzc-PPPEPjA1uf>sXhen2Ea0A ze*6msj-v?2WdYgd+LgDow7_<@)LYt(g?Z2J)ZgF-1(Kx+5^S?ATUb#70NH#&Sci=A zB2&Pe33YUWsr~cMI%E{-!cH0!y@_=1;H!1}1!V3xEiyrp4e-wObNRZCL4`S;UAK;- zbc9L$J9Pa_Sg(%eu45+wm!TvbzYcKS3o|r+9bO499h7g!Ivf<4?oXH2%^E;p$Is4{ z*)7EWVX$f4`~aSq$rR1jmiX%A}`esC)RB)00ZaaZpIvn;$Fa%s`fE^?iL1X!{6=?98md~{(AfH67M#) zUPr7RgQX^J-#We&-l84x6y~WP0I`~Nr-0e7doWxN033>#XaEXRAUScp$bWHv*bBGv zt?@c!)&`3F?f#G*3JaavIcC>kn}X6UNL+`{!XvNVsl$Y$y2ASNVB#bb;!B0j{EA%f z4D2aDaudM@Z-cIzR^L{G<^=)XdXaFFhGyPr0d03XfjKJYw7sQ@4~N<0T(pv6Emx)G~^ zI0hhqRmbZ00K7!Dg_DQO9YC-XB?CWs%^i5*XCD0IFn5rGpRli7@C;E!`U04^7>!SW zuKNtTn|8rq1~ionTbg^-eS&Z}3?Osi;tv4b20K_5(Rhuc5XV7Yoi9RE!7u=Ik-EwF zU-t*2b@vB&hZz6I{lQ?}{XrC*1NN@FKiC3r)VumUwC>aLuls`>Y$*PAe|Y?_`vdF; z{&9cU`ycm*{D0jajQ@3iK(-nGyZZycr~h_;$p6>TK*gX7|07jKCu&{mN6GXcsX8~hctlF;-dN2<2Wrumm zAvg>9UO<_bc)-tb*n*!hFKI{qdQ_`>k9mn4QWoH<^5!Lx@DqWng0w1(mL7Ezhy$L2 znfn)f+nh3IxeBcU5P~aC<0iy9FGF89hOk*7MiiL;*f{_lVBO9EXmG@0ELb_fje`%S zKfp|Ya0;?h(+*)H0$xuV{1G6~rVBlJ38v^;gkoo?A9nD`08s_7DDrJ4V~+TOBT({% z7vjEhP!vI{28sJF!A}?XtG~E!9u)Y6d=ge~x(9tZ9)O1P0T~1P&z`Xmw%-6og%4F96kx(qkyK(fL=kM0jxT9fjQ7kHR2@f>8Wt3&*9q>boCEHSCxp9pgd~^bI=q1 z2>SWR&1|U;!k;z#q9xTc#H-5t7VJ~;WCUb5CL$`f>?h{nAWlt6x#ce@Z7@}E-qs15x>i7n8#PzppAl?83z01lT762ywxP3Ou4LD zUD3+scwO5}{zb>^1(C$?$hCx%md;M!ggO>)8@}?+Krw)B%eXRQmD(EJH9YkK_aiFP zFXj5rR=?%hdYSHwJhi1m!3s&~2CinB4hbeAD`n4K-58IRGM7m@H=!QIku#HrsM`*m z@3-e)tkKt!X&ekN>H)kA?{e0=U-OjrmFWCO0`MRKqNn74z1lQZLrsu6d?+`b!t@ za;aluif#35#!_mU4}xSB!$_X=A`nlFyyAD6s@VP4N&&aJX8tjfzHN0zpxH7T0`O0H zYQj%zl5^*aXNb2bT3We&7&|DRSK=yKFc1kV0^q(H-Y=Mo>r?Kkk5;7^*UWx@p^&?3 z{lH};Gpw`PQVu|E4QP91kAo+Iw`H|tU$0}>QhtlBZixmwtc|W2E#3m_*cCawV6O-T zcgC^Zt$k#k-Z9_ShH20JIM<9z42~)^SqQyOq4-f|aisW6ck{kwUD1MMik?wdZv902 zn;XFtwqLGB^6rbibVt{&kg|iR*l3xlN8aaFo^5H6HR=EQKK2&B5+1?BLLl%!^`=XX zUYrQ|02l{K;N=7417%nqkX#1DICp`fG_sn%*n{y)(C~rP9Ek|p12z^`a|9cExf%#K z>)6=W;v0}RGwi|q;xv4~%Ll>6ei1--!)lHMXq7_PkkuSEt4Fq(|00=q15~WE@UZU? z+NXPUp1!SkFixXei+rBWX+0E7B4rh^qr?Ky(TN(La`U4JK zJP8`Z`&~2=MQ5j9pL5)=)vAxhpJ{bsoO|}jfQI&Y`&_0+wfg$V*w6K3gkaqj};qc;;8;>qA$%Qxb7Y=fidp0I}XE?NA~SQdLVSI$1P*Z~G3} z0RXZ>&>A)~@jU`%+%27ysGc8uA|x#L%V{_}`>t--G>`EI*hv5Z0)(uq!b}xc$qQp= zQ-f@Mk-zQ&+*zo2>*_#-V`vPl?cYoDH8Rf|oJRy@r zObE4nJP}U>dvOeYEX@GdWkmIJ4^b`j2~2nI=^iOJnxln9{~7Kd=Cep=#Iq2l` z9j1?7w-VNWd$GyhCfG4-D6M@jESF~?^&uFn&7%?0KRz*=ksoF$X}u!pXrtSV8Kh<0 zRV&!OI8#LszgCzD@p$fXZb_vLU1&E*c`B9 zf$$KlupzSx-3v_@I88`oxh{j2)!geVS*2Yc5nwswTk-^gAEw@}n%_59@b6Xd2&qo#A{a3lJUMcXQqaVr}dCVJu2?asT$7b*m|c4fj# zUnEyh-C>*@e#55}(TtR+%6{3PrRtiUf_c7sjZ;%D9M80$rxB9yW+QuWIka!X6JOzDPe7Gm zXg@OE`@_KWBx`DjepS;VF@RKdP-HMO|LDx{NAo7Fc&!zrDr@?+jS3ixt2{3AMP4gvWyO{$tWO4Y*c(mH3OC!D zQ~k_Bs`fg&9B{Phw`b&FLJF2!VsrXW#xKyUH(R=yv3M(_jU?cnXztfH%alpF@`dOy z`}%zC<<{)p&Me~3j%uGo<9A=4(W~}tqzaxR7&_rAEdeejO0{4b2AN@EHn zPGO0%&IK^wV&vj%+SKb+uUK^h2X-z62RoXA%GWxv0Y7!%>C~n^UlyB;J4Sh~SoI}R zp!8B`YuRpNuDBkPlDrPIeaVCBO}~~m(+pVg;^%*4?=^@F&Un!)nW1)1q0&9aA>DJ9^p_ZZcUUOE z*gT#CzG8H0P{Xb=$G%g>^}LL1wda}sA46?M1z9u)$z>HHl1|(IwG-3(J-EKSc>esY z_fqmJ9oW86UQyQyOU^HpsF;U&6HRWE;q7}YX9X&K+}I(^ca;(pWJ1a9H!r-GqIEJ! zYGLWCqi{l5>~o>WOxga>QvTA=*OuPphXjAY#UW7SkB_jevRl%ffAsWUbx2j~2wtX8 zO?hR-sAjGYBS$+86tcy9Q;V4BMslt+B)+Wf&M&7@X41m&ZyX1MD@>k7iZnSpc~;W7 zQl=CW9REc5;Q7w~g9YejC-d0|Ck38=Fnn=hz#2TsytlHSI{5Pb&K47%Vzi#+CQ-9tF%4{5is|>NLf|CyKbV zt;x^2Fc}6p!M2Q=1f}%6ys{3_O9fD&n?5AOu8*uB+n~5TC zPkuHLFVb_aGdTkPR{+9yMsYHS?UkR3ABmXSxVSPO^ayi)UkKe&zB7Vox{xD_j?xqNkX)8jwe|?CyNiO*8)XEhrjzQD)zC2QzcuU|cFB zx?VeqUTd9co+fFZQkUg%q^i%nx!j_0QXO3Fqyg&Q_Ya^i`&@-RRl6iSxijKZK&w2O zRJ1I}Axy#YLO@x+aensk5luozEP(z3g5O%KJ?GQfUPqd4_%$}O;ji~NH;&R1*_A#E z;@eh(GnQGcMiw+yL;S;1H3I5Q(Glnep7I?an5;GVjF|Mq+e)qAd0@^@uMoxl*!cFI z`xH8hPHCQu$8DMK&FCzPJlXHV4=sYnv_9V_mX05^Nx406RVT}J{=y&kDUO0@!u|fV zdvVVdbMEZOr`OymuF8ncRh#gTPI=Zg9kj|Rk5eKz-OLn{U+HSKS!w2(9RBe@SLK~3 zuj+*9gw`wD*+&sCeak#`Na8vj^dE-SDYQo!Ep~3zZnj}%=)|DpLVR8%P6#CN8|&*8 zS!1eKEncNdvgtzg^(q9XBL!)mjPl1ac}q3i^Zj0)?hnyPU;BrM=<8w9)4^U;%cZi6 zbBkZ|)V>0>h*MZaADb=U%Io>B4D+U9Px|+fVN@ch9J# zoKh#AIU~pJNN&?6Unfk^iI4VAkdZZy5I=H6Q86s; zY$m&$mF8!;wW;L9{y=Tc{@RR6>dS$SY_oVBAL;U$ywU67)bpZ4=XCAGqbucB%1JK| zcQNX!^AAr9zoMrc@Y-kcTDMN3Zmmv%K9;Oe`6W+Xf!nf3)&lSPkh1p%_f@a1od?K{ zd2W?M4P?ufQ!$vAq>k_E+fqg_T4BCl^f_y8E}V4f*YytlMsygFGPYisF3&kixv|P9 z`fc0cIPf|D-ES*e9@Xi7W@K_J`jxIKtRu#C>q1ATW69cNGMtCdJ} z5y_=}u8I}B`{S)Z>8g9wN_Q)tO?V9@s`~@A{WN#?7w4D($DUl4M9q!K0}dZc!|m*i zweJ`y+B=MA^5}-WU84(0*%YN|Y*AFyI!G&Xx_0p+eu%PRD3KH?akh?}RsJ+&<(j2& zx|J(dW$+XpC&77EsQgg$vR(BwAH|)^{ntu-E>_`(s($~~ltM38#-W*U@dL|4Xes0tkcM%JBZ*tSyWFK3_X;Xfm_?OVO}<;^C!3d@W6;qRYc$YxJU(#WWP@=MCqCi# z<#I=eJLb%}9sHK3I~MgApoZH*RCgQLPYeFuOYQqARaRwCPJ9lV&y=jWGJ!?P1D>dv ztLD>UtMB#p+8o|$7^^XK?lp10g@Y@hj~IVZ$>>K_;6~1jn>V=g3rBtFscXXu+SB(< z-54zq^?D~?tQ#k1HJcv1Rq8#Ode6~V z{NO4Ocpy9k*>xV>zHU_p>PIs@kuze3o+2)jb>3`vSi7X7V)pR{2LW{M)Xa*)%FLo? zA?&Hbi^YW#D#RSZ-nzMavz~*92c)P`ze3pTcd+8N@m#NzNw9AVO$-`4&kF>&4Zwi< z^G+kWsXspV>H7=cH?rTFwE8*r{e0{pDj2O+MZy=A0mn!Pn`^f5U!fBZ9PIU0`uQN? z#!kjeNQPC>N3pyc7X;ACxz2tU9p@id**v!Fbo`hKAHT#~%T?_fK?KjWl{nR#?=`jx z)rkdDX1=AGu+|EiLNpLK=AEU7b^@qYtV#azXLoXG3fr{hx7qGyNlo7UrfNjHXzT+H zf3=&cgk9tmkKsU>G~?6KYoqDJL`;I}D-mmC%k^B57QN~RR%9U}qD|Mknzg(cVUzIH z)7#Uq^k#{b%Gvw z?y}$o@I%^bIz*2Yg5O^E(|W{Mpypu&&l*9ih|xW)VR1j(&}pD1J=Z3XFG07rp}Nm@ zv{`@ALl$m5;GRzVjcxvCV#z^C&XGu;&w3-oCJ0XD45u|%AGWzUID>Q;C94#=?W6_y^SsH}3 z@JNU6w9gEHJnUr>IX4@=5~Ib(85LdgQO=kAIBMy&sdS~eYSD>o@!iJpg@VC4+1EHB zyADtVZF0l$pxagBR1xDJLUlA_nTj}v=D{JZa`8rtD{NhHyD`6Ga3|h&`hB?+W>~ah z#`xZ}`9&Uuwv@ybu_lvuU%NU-_!nmW3cu1WQ9#_P5HG*L=HRAI5`M~iQ=1e99`+iK zN3X>;y_E?UBN&8aD~A{9m;>6ya&ce6iQe9t6K;wNWy@qKUPB{>Qv@YA++|{0(KoZC znfe@Wv%=+D(iydt0KRds3EV9m_{@1I|oX4I3o&ovJpG5WI4jxq>iUug$IpQR+_4e8BD}YQjs3h>j;q-gD z*^vJMQ8)TMPJe18%k^m|;BG+Sh=Ys)+BO}M;$?m9gk6S3+Yw-eAE>t+k@jNw4WJ%C zR-BK;`Q$;%fUJP1NjF4J0a>AZlCdfK5dhyFN*@8`gb>(YgIY_#C6d+RY(=>4lLNeg zlJNEM3gHue9v>WpRjBneMO1PA?t#IEaewRV&6;^50KR2Gd@}<;XvPJpI~5&j_W{mPIJe<42Lw`IS#_*n2^J!Oq8Bu^Nv8i<$fd`T)6(VIp}pfQrY zKk?%1l+|Sq7Hz)`n4mNz^l*`yt(OTDhVYc|14~kJbVbwm%?U9lp;Z9SeI5MSixXj& z)_cj)#b_LyuX*^B$MD6wrf=1TRENw!Ys0#khMM&2PD$-~a_zM()~+^z)?MzgdotX6 zOm;yPtX%_=v~PF^KCQ5NqbP%c+cHAI5=m39X(G;Mbq0=?(p2j_%PU+>UeSXr90jIN z;P;9CZu~YJ3D7$*{k!u^u6`nyN@X7j#PBFYh($TTwwSsj>!D5%@pfu$V^Dvi<6P&1 zYsUf9Mn6w%_~bfQ720lpl9Zi`miL&f?z9`&qnlVu8a%d*C+bF2RWq@o!nQz6{Ty7c zWyhf36Tk!~#O;+TIQS>@YGw63s?HOPmasamyEh7;5s;Jx-?1v!JA+Ohq$}_Y6hJCb-6#M8z!ZNvqEYAex?8B#*XH-d*HSpaAp|0n2gV-())9ZKG$T zc066}*n_K`5xkH(=TVXL`#u8M1ejdzJ6vY+neySWwamE`vgqiv9zk8cuk7hwf;aVE zQPQF8_>!dGQ-db!=s4Nio}ML6gU++qkqq9k&fwegTgC|L%T~Pcs$D~NE~Z~7SzgZb z`1$xhZM7*a*7svoOyY@qjkP+um^A=2jsf2%i6XWy01MGsde?|vwP=)oIh^zWd>>Cx z{cHfR^&z7aFlF7=Wo!y$OumXYuQMD|rux;<(4BFK%wti27$jqy(lYEPq2xEn8^qv- z-U7zx(CW_a3x+29AHvv*4py>>QWx0HgE`+WO#U;hFYppjy!IvAY3KlWTES%{|25WE z8vb~UiH{3_eF|g`-GC-t504N4Z-8-7d=-wXQ~k04$_6HcHz5a$XSptjMgh8F!IRGR z8!qJ1E+#$>Sr*?zwj>DayCkrlIC+8fm4ly}61cpu(NFZh{D6GkWVLh>(+AH$h}6hI9}> z*gszY6Tv8H-&=I#?TUK33j>-ni2ewY=&k$P{Sb0YphN&|0`7*Gh;x&G!+>NFX#}t! zG!M9&)L)o18!-s<6WIAId^E_10p<m1$zHWFJ1GpPNaU+I@fV=VaST{UGGIH%S$kS;s^K=K- z4G-ln5taBDehV-H^^lPZ!X6O^q#Tmyi^qZ2Cu*0kv~L@otr409+|48kuM9|L1idVO zjq0KiJHntan2K72C57 ziURNfmSJfiC&9YQ(nv$Ugo%aZS~t?!7NIC$CmOckl@U`($g4}9BfWyjdI#V8U?YD1~&XOa(*G?wf(U)SR-RRz_{=o4!N6d8|_&N8Zj$yRrzpV z*lEGhP(kN8q7sR&$_o{wfjW}`<2^ESv%p(X0{;tuV8z>^E+Y0WQp9`0a(#Z2GaHKaKLOsK}yt!t_tpzC?qj|iF6*Iejs4e{erFv zHZj>2wWAC6S#ZO ze*_ZeJroRYVJ0Nbn_h+7O9T=J=s0fp*#~$D@RoynsRX%bK*ztz;R5cwipDvvhLJ2xfOnKje~ za~VbevJO7(N145?K)`WtnJ_mND7r5NX?7Dd%UZ^~FnehBz-}gr3_--zW-yB)h&bBE ze-LpzI!ymJ1Q9pb#KfjS^60RiEVC%A9p~*`5g6N602umqFdgWq09ZNc%7okj2;nd> z9Vr@v;E6t5Viw+>*I7entSUm&A?=XvoAh0IpWr5O+&zU}E z(FC}x12m+A4R8;?-cCX;3js#G&|~84umIOlATy8eT>G#wg#rIOv4v%*m_wikXnknU zW)ypufB?9;YAXo~^-xn#_3m#w#L3;kjpKeCofdP#8 zs(|cqMQKH6xATi91+Qjekeq$zhUDK7DT~E$>aT2Wa&ZuB%eSvu!7B- z>MInIk8L;;%7UUSOi##C?1JpvHm+H1a2iE&^C9#W6K{orZN)EEmuKpM59M=6CT077 z43f&J;K7a?KTXf`*Hdjqnp46Y-=_t2C z_u0;Z2&FZIiYZ$JwfC~3s1gI47+Qgl*{Tx>K7Utg5^v zw*WbSg7hlO&kRm>6#aWsZntw7>y1597plJQc;El{`3!HpNyBhP0y)|EhK#pP-pJ?t z9c|5?nftOBzf!b3SFBl(kA!^6bo6tTism#e#i+67$IpX|yQLkkCuV3SmJFxdbkV() z*+%}xn5kFPovQlyU9(kF_0hlpp3I6p*(^paX7saxN~mZN4JT$-NAX6DMtkcInEY5X`gW zM48BFvuO9Bw!MYfpHhxjS2XTHb;3(dQETFdi0IwDnc8Y8_xdR-wf1v0Q*`q5H9^q8 zdW#103Hc^-M4 z^(dO4acSEv`$pS3?e8@V?~&l7|9-j9EX%6bG29>6%iG3+tfXs;%zrtz>P(7iNDD7R zY3fE$_*3fral2493_k zh)qtb`0EUejMSud-WhQjl|fr}K0GESKy64*2ue17VC8zY=E`&p%{5IcY^|rNs0mN) z455YJSv*e~`%dy+rIDBhRRb2n_7xqOZ*JW;vuD`>{sCZA^~1J0TJ_uMEYmKtIda*% zsIJYZ$t{p<Yh^_N!Q4gkRpI)x8l2WkWnbaTe;V44*CdY$6<1fTHTde`R2NZ0c zkAE<~*=Kmm)!Ei-_En`rb;>c!>~_=KgRt&(E9?je8vifB-L|N~`XcNa&zxYk6AD$b z2Xi)R$oHAt)FQ4+7I)HHP{qG;1dZ~MveVZMK0&(YY$o7ohV_pwl+)lv0VY3qOW){E z%-oVLYoQ5o@UCu2`2v(Klt~f#TFtuChO&e+6dRqH_-Mxd28GHe9K(!N6(vjx%)Bcg zuL)zfcG?GBMjp+nqBX`y8f(iY)Xlz1)%NnJRpK+J(1^iC86Y1|Ptp1E#W_`87OgVW zgHcGPe@{!`5ndTr6~az2-thFjhX}4$Rg-^V;%-0Lo1G2*K@t2~;FwwA-8}y8ts`sc z-KWpVCvBCBWb4bU!r8gA4%k|yGo(={3L0Y{C0#+I795=6%})=Z;x?dSILiMw%P$!A zo3(78%|+a3BRV0ms-7s|D8I)Sv+#%jl9U8TvduB2F zG|vjmv2=ge%3-g$>F-39JsdV;6_z4qZ_inftWwISr~04r?fa4^9?xUi^3PO)m2RPa zczk-;{z>|?OB$X|T?$_^WmIhUMCN=G*F6Y3(qecmd1d70d5Y*}&JBcf*!%>k!xG^! zv`}!yg$xxfCr*UXfLf~zd{GuOU7wR{pd}ajDe9boWGZ|V`CQ*pwvn%Y)TKo=q;(Ja zzB3*_J@`4_3x$w@iVu`(%~>US=MKUIILx}U{JN&0p3v1-!Yn?`(6Q`$%|~{>+@VoF zs%9dV?4bKx2;Fm?1zA5QgD4dGi0+h{=)1O6_qti=>Og}A5*BPsuWL3dL6G?z~UIln({VfD{N>69 z)jO_00&1L~F5~(bmsUFdDwI57WoG{02v`y>|GI67Pfpsj*f=LsLqi~vAL50hl-u4n|w;k zSB^{U+3W*I9Rw@ysD+ZYaL%e~F8pg-MCxt1k8eM%c*M<3U4`A*oz~<7smo2IpZ6B( z`*&F1{_BgouWwJ6wNs3cK#22-M=!IvWGZb2B`?}*1-7>iAamw>5lcRS7LtB?4tX4PcRLCjsnj@kl1?*3!)N&v6 z&%Bn&K1F>x&|*0w`dPMfH^4JdKV`oP#f^qIZ1zEOHE&i*ZE~ct2D00~899-(8Q5kh z$&nOpPBMjG>pWl)OLI{Nol;rZjW)X_ztr|XS*X_+ZFMlK?iZ@gVVQ2;Lq6B0uj76# zsl`^oRc633{DoHjWYd9xljg49Fvi`Xi2rg<%t5x)7v_8Ug_6w^Nsk($~_f_sfhxFUC6(O#1AVJH z^K^8|YYclVLFek~7W37I;+8j>V|x_tI)CdDTl0(T>?h+$3T|J!bU5igwU2r9**+iF zLgm_x-=efgSzGGVH)ofM-z+(CI-i&_8OhT7Wf8N_)%@x23;$pN>`(LH8QGb*kgC|n z*c>$pUFAt$0pmeGH=j8p#ZgRh#TloEwpS=(`E-{VicnwZ-qqXhmQQLq7PU(}+rApC z3?Sf`?X;9y_q_fuuUSnl)O*W%k)z}D21zdVr^}c5AGn2<8?s$1Gc`iJFt84yO^x&N zx@_6iBm9=9HFO~}XW~+9-XL#v7X3$dR@gMnBJ|f5dzOau+Jd7~QifqF3Go%4j2#1X zWt`{`HfSeMJMFucdELT;u=wTg@+}x$k*D9}GRfkcf30QhoIobInxVX_`_A~3LH@#n z6~hTlIhqCikI3OBx6jTCp3b*7kXbeG{=IQxIwz4eCskjNAQVezQHRJ#jArk58Fd46 z{p!q*pXAyp!yK}5okq2YK#$pu4uyQ0w#7D!+LRgZZlgL+ci?0jw2yH5ePd=m$~%~nnp<;u1OHoaY>yp>kK&&3mY{rz?CzPA0*U#y1T zc=a@VZmAI^X|qvhcKP$N%JzN!?)nu2I$M?#hjTDC) z|1*=7ZY&0sb!Z}SpmTG>TN-80iccvlO5GR926Em-T@&;7XZm$6$X?dKrgb`FSDlI! zcHQxV2YUQErFhQILI zf+JiDe;i>Ehwny|kBIc=M1xMo)AcWaK_V(dY9+*G1f56Qdr3Da6W66^y4aLLtEc#z z-e0A?9u;BG52Mvvqf}Q{FbWE>4{q9Z`Ef6-rb|FYM3c)d3G^)OfBfa z{c6g@AGD%*hA21Q_rjC#w}XidIC`@|k>_HakNfLy9KTPjzS#Zi(7j$#asO?ZpyaxH zGE>dHDkdeNvBtc84hE)D!&=End6wcHRR+Dd{CAGWM5pzq{awN5qFM-$q>kpqK`+iI z9!K|Ux?X#e3ty!R3C~2PREmbtcO;rrdl$$T!~CE4!?3fN#q~~;zn3MK2WIgUIe#EX z$jDYU1zkxi3UdT3UX??CZF=8ObA8TZ8_frIGBHoWd49#Vg7V*&(}pf*4T7WGoC&wE zj3@gRuiM?Rhij2&7v?PT4e!cI??Y}9H*yIs!^0*OWu)2^(NKnv$DI!bhge-X#IL^o zQINj)CZ|EWOI~H~BEM?=gIU^U#lbqR!C7gX)ciYZ6#$*3Ud5 zcyc^wvYv3L9B#wfE9p;(b$rj#;R-6?R1D5otB;a956_#=49}8KVTaAuLO~fT`t9h| z_ROW1f+vznv-p!SUC;W{LTtRRJ%Y9HyzWG13Ei<(tzvC%m(5q_J&HdMbuno3O2WLJ zlJzpoUY_{T8dja-qrH2v<^DGFj-kaxQeOD}$;d%!o(uJ;kqYPQq8Limbf|tu>5_1e zano?eNR#1A6;6*ysnV+Cax9i|8cyg@ zSp3wRtg(f<>2O$+kVUzCDvc1ETKDPcMu)=tHb!Q}w$abdRU`5%e#E6)Co6JhDcMWP z7UC7l)rTUAvLm0k&L?#W7oYGdkgRQEy|;Jq@@mR`F7Dj9D~w-Zgm$sKAX`E4-wz)V z@psRx91!+g{bD3BPQlZ@Q`p&eMl+7ss4%L%^Ipg5k zQvGA%Pe1PaxX=mdd}6EdTF%?TzJAAztC}>RV&2;;qjh+y=YH+V&9n@C<3ZuWEbt5} znbJy?t(G@H5DitlokyCD^~1|0@Dnoay|QHB-`M^@{MI+rMtBmxIn=RDaVz}Dm#R}{ zFOfO{!JQZg4x_ti=W>}etWUsT+t#;E11kV}oRJrk*L4l{&avFWO#aqE z2z;fk+aFx!wrAPo9M3#+7=l;m^s$PU=H=^!SE z@aiDaj5U7)Kz$@q3=l>HsBiaDdV_XI6>wNkT5z1-+~F~h+2Vj=r$s=xCp^K5b8Up|2JxH))_*Bl>(-+_`4;-Ft3lmUIJ8#wnu)yILS zgQ9M7kJX?;l}?~kzzL*1D5wDG1+2L?CvcmxAdaL(_zg!1a(!CV;&)x(1e8ss2nh`KBYLI{HBI_UIoxIV*bKOrdE1eu@^@g7+yY6L}rWCTpN z%ez5dK>%G1t^S7VJ6Y|AFs?owhE$Vz2_DWuQcYm;09;5VFwy{%2S{fzvr_Yd2oM3W zeJ-vEVMFdDgvq-#tNpT|0z~TJpm|9iQ~(dF8<;$>Kn3uu8pmh^AQb>FF;Nix4tfY% ztL`h+es7`ZZYcV4oj^ANd9(9`T|mq02C0Tl6G6(b~3+|kccq}j6g(WbmP$M)P0x}q+I}G1x41p3nlAf%)%8%Sn z@MG}}4q<}}uik?J`DS0_ejc`G@fSoR_YFe+YC&;Pya(D_xqoW$;2o03AUe2`b7Qh+Ldoo%L7LK&_+? z`oW(+JYK;d59(JOVOK}?e6JsWQPwP*Qr7D19ID&6+Hc_WA~%*E*qeuPzg5o$5AJ2r zB^*ljxYnI8@z`y;FKPF{o?{3w%aA&41AmWPYJl;Jx^BqX$Dq9^Q`hCBjqL#bXxlzL{Hrwb#c^-;3!VMabT%ap zuI39@?}-LCT5Wf5UMn9QwQ$ulIQR1b+hT8hYfpyU`xQ=v370lrb_rp^6%5Z9aCL2N z*l4dT7a$4UazS1`-d6st4%L57%PLg0ejb<%hJ!G ztr-A(T>N^CpP*%1hznwOzp{$D=V@tTQjm1dFj!*Tn*|moL0P zeX%O+{2~){{$ot3e#5hdz;&xH@n1P!Lq1-vImju*=a`cb_Zu5(H9x-`^*{*tS|`uz0#uYCzrj6T~~>S6RlZjbK_4i#}+>%)yf^EX~&{vx)sPivwJNSRttKzc<~~h$r5j+9L>GVce@D z)7zChdwZGFjY<(*$larE3w{xI0@2$(;*x;VX4fIR@BWB!w}DBNmQJ5nkBWcp(jJ!z z5?0N6wz_ih{yL}4+nMBD7*=-41#_v(`AZcq(RZn0oQ3}vO_OQ5 zxuoc2XgA;kY&}cp7@H?4HlLEfd+9nktX9DW_6B=~-~Lrc+3$ni zJyQ}>D_)60_b$Dr>5|+IgrF^1FF8LK}N_MFm_q5NdGF2;Bf<(ES8GRZY1c`)0)Fhp( z9JE9&*BX7wbZ4KuSflPDPVsBmInkaWjnsV6Uaq5SJECDB!;R!?gt>;@phyz&8il## znHHgLYVQ1t^%25f7n;Y!JyK5|T{|r@uQi&wm{PT8dsKmDUTLJ(EB2DOt}_N?rw?N* z81e{9TJ%D+^hKdB`i7tHpP+DXCaS*Gl(FLLvcDBpyE)~hqQa-LUW1HkqMQ2EtVCqb z@JK=Dk(FH-`A;+AiziP|TX?%nXPg!)=-+7>I#NG%El;m{$OKcaL^)o4_uOEN+!_sW z@#+qD`59V9RTx==mrxkij}8Zp*%<9{0&fr5oiwU8dPr5H;nUH`MohwilZi4RTn3R7 zKdgoCIKK+K@`9XJIC-vD&uGY@fG^BVYrs>aFk{6|S-$=XzeJ$djTJY!FWfSbP2=<$ zf{A2Do$Zbkp^S*@&9l1)l52@8x`bfvgO0M&GyNweUuW-#P*lWz;poe7`O$-$-Z%Xn zxRCL=7Y8k7m)2JEmda?7o8zcoY8+Fd$!{d=KMDqEEk9j2w&O=Mn&+KHSovND{7)jb z^W#FKgFB~+`;)9{F6-52`dS$sieYZ!Ik`1@A4uPgQpOcq6*k>(#U#y#rj2Nq4DM)u zZD0`0;aO>YZjQJxBD}g4KKbR(61b*ob93V~F6!usMA)Tc~%y_Vy*2hS~$wbl9KU4kFww=y{{9RdmLPwOevUWMIDbP_52?jfs z$Hk6!hOKl=+Am3Ntgc%mTIvkUjA9-rjp4=9+xrd}glAK79lILusy+YmzU;2dBAR4S zMyV-KUEaOrh*tJ9aT&~7x_Zmr(csq3z!Ar#<2dQ6|JB}?heP@P{|;%rQ z+1DW|iI76FRTA09zKx}nq(~#8VQi%cWz81ZcOqopjj=Cd8{2t5}B8mQUVcA%L%s(BNf#orLNM*LN!-N$# z_i%Qe;?k@vzbe6wTV{>73>z_p>4S*O`_|G#(&=h$x`K48sxoN>& z%6rx>yp6T#W_q|-?wmA_-KE6p_8e{6c#m$P`Fzk&N3g-z<-j&t+{u2QVL$Yw#;U1z^N~kd<@4eO61Q0y9X9C`>DjU8a|nRgwU=T|M?MV{`Gg7+%^^CsTEv`RhrjGdBzJ8 zckBJ+>)Ek=gQrwKg%k{w^>~HFzYWa%M5?oK5-atqW{?Ws)jxNtX&ZhvWQa@MkCAsj z#`DNVnaW-pcF(4X$m>|;@F8mTkx$#_bXFttXSD@p{8#!dEFUp+v9H86=2nPZjgjM6 z@9EgGG{2CkvN(k~h)a2>5{g1MrvhOe@|-7UTY@($1588T)qyx#y@)~GOQ#|4<9@G zNTD_(!yxg|uYa5t;3G-d_SVclhc%b7255F${R1{^R#MpsF1Di@g#-sBdZ zYAx6h;yhrIwS}J*Lec(Q2MpKgn2j`kDsy`PIVeJ&U+89x^D zn&4QX*$nT*2Fy_rqWkQ^*_b(_d1+&fT_(i}9AL?uyZYm&K(MHcQ2E|ga2R+;Tl3tA z;QW+fE-?d|Xg7>je0`XOXs!JdO6YU8ce05yJqa0n@TQ}n1=}p&THm=jP#T()wQ?3s zOSka)c>hUPWM!CzL@hO_Z=>A&Xg#+sA5Jo#`%7?f&X0&KH&H0Q(Lp_SH`O1l-{uA< zZvL^n>ogEQ+@u(SwvfS@XXc`JD~H`kle*MC9h?-n5qcw6YOds#mf`p$`;tY<>gs%$ zL_(~T+_-G^hc50vlOsX;%b5$l#P{P3d;vqMZ<`}9mScU3Z^io@StfHHWKty7>f5rn z>vH@`bW0A=O|RZ+>7Y8yw5(Mi)_CU4`})yaWj-CQ-hLPl?i4HM7U!vo61#Fmt)ASW zlXXF7#cp!j8=J3q%+J&iD^dc6gqNFx?3mb_ad+6%Y-W<}LUf5~6H|5^vAG=Xwl?XTgUjCPio`(5?R|`4wg(u2y~%KA_s^86BPJOhXYZ#6d)Fq`UPC7ea9K|lis_-;VJNWop zxL_8HUXjDCWo9N1p63Ogq_iUG@|)#K+N^7aZtgMG596@vYUaV&RCXbixFtLR&NzGg_jT(szyyOkZyJdcM+|@))R|}HWLjEdk z(Nl6+WXQ+apm%jiq3rSz(Kb@2*@lX0CRX^@&8$fL_l#K3A;us&#PNVI*@YX8tj6*L zt1kU-><4wl?yIO}iqsBW)HshTlA}eKckoZ|IXXwie3$Ch+FMqVb-;K-~CMHVivKIwaG%i)Y&O@C{400qj9&d!wh@5 zlgvHyIVUlchyK$+1{!TtguH$WM1nk2@ zWDZ!VHdOJ%HfkDvKjoIcMC?U%UnI)*(mm$Xw%a9CQVTb(~ z@E!F!AFMQ|m$0iH7I@$>1%zBLsFcSvVd(qP==a$m)P66x83|L-@;M}zz!XoGQ={#^Y1*>^=$eaZt zJyH-HjaBnP#522b^6#k30 zcb~5M>}WV^TeTxAU2Qk_+`(GT3ritek;BHQnqHs7r;bsp3#?;BF`}%7lDNAS#{`SN zT$?_MzcTPQm>EvCymZpzmDME`?aWpqtKP|g(dD!GZr6Zcmgga71OgZg*jP7WBvSBy ziTaV{vy%++@rnn@6OqTH7A17g;S7EQ&0bCx3O*l>_Bd2n_{&x#_=_e`g7kASsj|dF z1I`A9%B67Sl3$?1n@vbZB^yS{XQY{ zZ5Yz)+NP3(JNfdsw)*uz7RtzH)f(VZXw@ayrF{N~5PXk;UMIO2tJ5WoUmV6b?LAka z+%4HKR&rR-@FrmY8J->I(O%f)2{pD+8Cj1p(PMrcPG7NNGqc*(W*!e54jyf!+_bEf zom=+{YdodIC6(o%lSGt}_tOp*U;bG(E$Ck1MQbdP8I>m)fQpy!{1Dk2rCi10_K zB8FqVv!Ew%=JT7t7Z^4N&O8l@kSa(KvUlz`W*YGqeggH%5vW&!fCB23JyhmS&@X(0 zqO&ag6{KrOZ4>;eL9B;ZhTVx)i$b;n%J>+SSr6F#;thQbj`_QqeDENQS>|*y2tZXQXx2{As)}Z z6!d#gH6XQJNMqOnKYFPj3Eu^Ez*!ic3k}JlrCY1{VWZXH@TFcr5t-D$0lSAH@&OPD z{i9}~g4)LhTx3f}(Cwg_02f*Bw;(%#{Ufz$P_rz+_#=T=2L8#)aMuQcv-B>!JO?z@ zpyJGimj~N3)L!s$JB63ehh{xA`SJ3vp_u~G8*qEpho&`X6`1hyywHTn+4MfVygLlc z$q&lzn{pPU?0%iJ55zae0X2gUC!&CW0QJIMRAw9up&yhjEO>e3YN`WA_$p}{3S4r+ zprrsMl|O6+MQ{Elj5J6P0KD^F1d$6oh=38u?V$OXzzz_P=Vw?XpaCHnGl9o5xML$8 z&)-MTlW^)j!}YeIG9#DM3?xM-@$&GR_5#Yqj_V{FFqDeogV5?m42Z^2pW(_iP?-@8 zgxP7xw}#m+biM9}{tVXC@XC!c06-;5tpe!^9?xbYXplyuG5|0^2oR?^E zXP~PGC}#-R$8bAGorkU;)y&||g^IR!2!-GZrvg4wQ8-%MRp^R^y^^qB;9MiLnVcv! zGdR8xPzf1~5SC#45de7Yhr~!h1{97F{~L1=HeUh*={HzEkePwOom}95#4M5@)lS0K z17v1VhO|L2;zvCKKgk=UJowTTLP&Nug+PYQL0FE$&z=QhuP#t6A#f%hMh`NxLPJ;* z!B55k0%=Vf1W764&)3WKf4P_?lI|=Fp}Y@qJ{zMbvdOZy3k@fg7q%Y#V$g?05DKSU*VoXsYtz>nFv;7;>2ewdW~rqD-I5VC1@$R1eNABc+Y?f88%a=jjZL zZSYibCvI{iq~MM_wdvf9+-JueH{FNfv+!ANlMaFqTz((hv}HyJF6rpaErH-t;;>1L zK?p8Ue4AVogy3>Bh|baY3rTi+AW<5Mf!U%qAU$0bZRqQZ_j-@IiI1_229ck8}5o>t=UQudy+08a+b= zc{*KvvkS=TI1hfK^F<8cN69{$KMLRV-V?tzotu%jiu^%(_i5l>bSB4aGE?9%CN=dp zNiD$I7faam%jN`@O787Vat=%|3VGT8A-Lo{`iJ21_50>-;EBC%miixp%LDd*2rjRN zH%9;jm$Oj>dUxbCn>?EEAA*ZAfRlijdm#ctgDmNP888V7 z;pMyvzE`m9k^}$o^%xaS7{GB5W0C>|UEaXY!aH4^8>UcnhRalNCBnf#7%t(0F2Gtq zOrqBy+aCUpe1C6Mod$zX(0((@Kot|lcNo6e0fNlPpulX|I#}rrj0C>Um&2G~ATa&* zHaJV*fsU>)u|_9LIMT4!aPQ&DvklV z!eYdfAPC?ZLlhTujZL~7gCGTHK~XaOOgx`dAfWVUvYmI1X$V^{vnZCK@11>ieMyv#n0mF!f{5JP%FfF!giL1i}$`TdIpd zvlyCJ!XVv*u@JaL;8ocGE0IDYBoju(6d?0n;Gpfi#VriZ&C@U&JaZ1s4o>RG;^ILt zb1fWBCvdNhRQagF;ebgr_(jWW!{La*;^LiPG;%?aHOu9&23|OeFku3=i#-^&!n296 zUBGVuiOvvL2rEQREjU^i2Q$~et`M#ND?x8f*dEpsKzBI~R|&{x-g;F`$OQ%R8A5k~ zRT)A)1G{u0(Dm;4NHE{e&Sm9&XJ{G(PG`MXLmJg86V4-~g?mbXMBXpN1 z@cZE3j?i6TP;V4A=`Mc2acNy7mpdL_g?)5+to2Q)?vM{sU3zva0^v>qnW=Eg^EHep z+sP#;BIDh5*rXV4tk-ua^a}Vo{6YvZmjb33kt7pvkA^x9>|#aMv9SgGd8LdQQ(%1X z5F^BxD=@wzo5UDZQeOYcnj{D33rL`X(#$0|3s>p%m>s~e60jkm0VX_td{OpG1s&Iq zE&8-r`{EnxB&S+>>xcS}Iiuhc?Jwy4jmkW;hhqe1HQ}`AoH;Cu)zJQ~z7;307+hHl z)&E6r$tID4d1v0TnO5*dc0w~tNX&oW=wbfHonL`vKH?a^5&R+fL3PY znV?HIkV3Uums{ds4ak|#_Y^hn?lZ(oZ^|!P8t3Kw0XpdB7KWeqzUOIbDqUi$WgQ`&O zF~tW}Z6&SzejhwBZ>Yi5DJ+5G$;qqF_%L;brZgQ&_Q!IUks*YD^I?>i@JnE+uU(zX zk2SE1TWx0truBwlYpTXc-ED;l&AeSYje{ysd8GzXK26)Mrzy^6746N>8G0hPz2Io` z5>LonXSinFj?ln|eD;sVhB1kD92@zP^+XW`Yp5i-W~pe|_3IgRhM)adCLy(BqVxFN zl>1w|soPtWVq8>m+jYQ?{$6R3?4M`pRPKgiTaCQf-#=$o9Hz$i?Ec5Ob05 zf+puu-FdCo+)Kv^`iVvc-Q>ZxOA$983eif0V-YjopllL3Ckdzq0v8k7B42muoGDAo zX=r{Ia%Y5jZ(fqFUP7Xy>Wlbu(E-EZ-QT8YmMdjpuij}?QE-R+KDM!HJMFjNH8=0i z{TS-(y7Bj4m_&U|UTiF0`S08SdPNRvu6=aPJUbdQ@ek_1*W+B@j9x%e3fAUffMpxHKP7$X)Syeh&sBR3 z?%0!lw#G!Ksg(BtmQFf;Y^k~j6wB@Me}>o&>zLtuwXV>FKV)4V`xowCm1FxQnOJSo zRTajJDKUIJqgVlNaNZpn+)b80FlxuJInjDH#DiULhaUJPMXF<^Ok#UEA|iEzTB?Yt z*V+%V#j8EpJ-{0nQ9a{jvao^5CECf)KLL|z>gsE$%!x;sm&C~WQk-i2?Xa~e>Kneh zcwTLuljKp&`lGYLc@*QGMRDuSo7NGYjeU4l<*u5C68Pa-VV17)0c!uwy9UR_?fNxt zO{yL}ac&^+O%@cF52dMO9{3VW8K%rcjGX%EaH*TPTyPe5S9QIFdn!<#xx_Caj-~+(R^FS}>vXul=SP@82@_rRcW#`+3q}@nD}{|)adYNt z_KZ#xHZ{dzI`X|;hYiAGS2RO>(=B;N8hV}Ba@+;*GW{p%w1nB0d>&bj=a+^I`Ce*q z>wX9v7>EyCJ(9JK8CRYICM>*sSL-d-`^#5>V_R25Sz}Svp8fg76Vzqp;&T5{bSds>oqpn4XYC5 zJU`0X-9!w#akmiGj)(zxhCyx$~ zPo4rxvC=c5o^#~9`HxKgtb>B z3Kur#h+8-cDD+Q1PtrYQxLUgZn+mH3DQ9+kPS=}UmMz_GGIlgDsdlKx-~W>3p5x%` z4w{@ZeT(h+{N*~w*zeRm5iCe&XRUVlDH&Pv-o);&tGh!cKke8sO&_J#r|q&*wf?Jn z^mTpvI-%Lau=x4|t}tPe{z`zKTgaI&lq?X1K&cWrXi$q|kcU(e;Zf13^ot!d)OWeK-2l@`ZIP{pItKOA0@Z zu1154N^g4d0_lR%ft-E~{aTwltqNLMze8QVxwqJwdn7VEGQ1wA+MifzY+&fDS+9E# zbw#{k*Jpomm-gSH*Tl*urL|Yc0lB5*3u`4Y6QT^L5npkaiflXH_4vYL0Cy!R^Yz+8QQ$)ds>Q9^Jbn>hAd|KSF+&28%yl@Klohxyql(fs05;Bqwk z*|+|B-hAM#{+IU$WFZ1IFS7b_NCPHx)XY!Z#c=(MmtwB0cUstiz?d4l+dJ1kHDF$Y znVZvUPlr+Tx%_vP*FriU{> zyBGbm3k3TR_X0zcVxc#8ZvSGQsh9Y!9|>aiB#tfCrDe9 zu+@M-!2wz4S*}{#@}N1>%nFLRSHx?7kWQOKCT2bDv^6cL73iQm!Jc8Erfx9Zfq3mc zgfKAal-j`n1@1_KsaMi2lOaPbbBA!E3n$I)r~BmOS5^M=GBCG&1*CV?)+-$4r6NbQ z>z-S`5|6%~v8dGi)K?ua7-e`hCM94u1{X=5<(eP)8KAg1xbh#S0Isen^j}=S|H2cn z9A}A90?@3v8$;UAzqxqoX0Nz$YW2sQ>pG%^3HQV88;Ktt(JH1l21Ei(2)w(@B=e`X z!;0Zg(kP^-zB${prK~Kho>NKIT(WHzW@A`V(#O+>>?>L-{9p0N^d>d`Sjo{U54rQ8 z&b6AWCVCNPKAfS>O|-EmSDw&qzWFemx0*#~0u5ZzXjho^#GA&{h`M;L%|uu|SKY8n z*Be=?82w~J`}IU8RWh!NVGwq@QD0`(dDr`vpY!2eX0(8DmJ6N&xl;oztJ+-5st^&L zG_g`LK+2E#Hya?Eu)WAuUYhsw+UnA;Z~G;zoD4{P76T6Ru{5vk?}jX**hrqPwAeAs z#r6`ua4Nd#Y4$Akx$C0cS#>KAy~9FI?7{7pFhty8-KWyR30;d5ltz-*?e<$cJU}_B ztCqJC^~Ix+5#XN*(=YVo*FI{%ii4aOCU-&kZe(Oute1m`ZWO(AF z@0BotAKlw>y@OtaY!^53iS$EDf&z59Hv1b9qI7B>Jq?TG~M6x7bc;FygXwR|u?z)6bf_M+-fI3>crRo6r zN*64_8YUl)Nr(yQz*h%|763nl=w0_b6ma0RYYh5O+fcKBtm_KKWZ_5vIAGRVq3ZTGTf?MLN z|07L%_tx3Jk-nin=cCsXhA!SN4Wvj)#>K`;Ufi&=x+|Mlmy}-q+zA|NFE%BuuIiqt zDjKS;BeZ9?#o#UDsaL#E*ZS#YkPem{*kI(ax5pW>xie$Z?|Ox0GKl!zX^8 z4EKJ&&~H+at0y8jH6N@ey4cVnu1^)~@I6?_$C(Nw@|!uFOSB7^BY=}JE2h@2f2I7s z{_EKe5+BT%AYPfFUY)zuk1*^<2o!$|bes|S^nu*|S4=Tr-3f{t9-KUX%sew-HuqX5 zvk|hIydo7b)jRHcAlef6(}rew8_a;F9T@Zh^uFH*ir4?L*P~t|A=%O(B8rAkBX~6w zGQ-qF;D3lo+H!@CGD7`9j#L;M%#6SvND3|D+jebQk#T)^eFh9qXT7Fc$rsY=t=~Rh zmSWh5`d11q%h(f78+W!&`0^oZQzF(o6?Op&R`B&whX`gQ>j)|a2$cZQZ$LQidH6rn zqQKV+G4MxnXr&;B7Cdy%0Tij784Ep6x4nY3I_#1=3&k#f7eFseW;4~s-BypM#a%nRllNOE)fuEt`!~~hN5Ob=|hGWE0 z(8)+wC{CHueC(jB2&9@tVEX`4L+V(!mGm$ci@02{U6=X%DQe3j=&?X#% zSUx?v7OJa-)sa&O@^}$o47?g0a|K__eMLg_9sJMS_jvws-#hdl_dS?p{o8#H!FB%S zzBl@h`yPZe{>y#uGuRyQY`X8kBEw5^(|r#rSW)bB_dRt$m;P+J?*Tgt^={LB57=2z z*iHAn-JXBE@8S!3>d6D4hf%`Ck?1HZg%}iS&%XbS@Y&T3aO=6Is=sT?h<$Tq$vKq^ zEREmAx0d&>#E#iWNS9pQ@i(TGXXEhV=V#9IBj%@38v77H?SK!0`EBikblhu~5c9wP z|L6Zh=Ol_FrFHBc1M0X?AJa(X*!*4P<8PT!R8?_?oSDU`p2fX{qZ-#PpyU)``l9Ur z_~A8vlw2URG5y<7((i2;rF}XXQTb?9ueu#4P}8@In5UUoccG-cY#6DE2N=8*SyjCz zD|t}UyNj3`&Uk|%rlL3ljr$|2&?s+Jzu;NC&X$H__-(xw8=YNV_46zBDY+j|sGB{q z9^G9`z0121!>a%O-uOGcHW1%7UAWbUz>#ObpVr5Kx+ni%ZX#*&=`R%%gRl4G{%&Vs zp9(Gtz&q|S03Hc8Laj8Kp%=NP{5Q`sRpXXD3JJoJaWAYG(V>Dk=Ty2|{ zyXE%MU3zWr&DqsWi}RkrU_>pZgLnlzrbLpiI|yitaW~(6)#V{6{fc>}P-OSmX!A5% z>nS%vlNkF2Zo*1<+C9^bDcJ%((+_Fy4X$w)8v76uMe;PYo$|Z6_e@$P$zFeZ{@TIG zm)l)LuQ@2ser4Q(V;c#Tz^FAi?x^}QNGR{fa2l_=^gQhjzzmMbM3jp0i=C)xZ+K{& zS&VYLl9@4Tz2^-~M?$K(>mHz%1j%vcK`G_j#w}LC!kt)=JAyxhYp?g$nrc@5auUJi z{Tg~ZBxa?m%k{E0m-37;C?D;DRmrQJnQx~Wgx0#Pi|S})4`&#~($d3M9dWVm_7=R1 z_}NyJ(IdK?{v|VUw&Lb~Y1@9TKSPK3BF%3^{vFWLM~+DuIi|sGMRO-Be1RZkr;6m! zV$W6suGZ|O?Y6W1dF1a)t}eJ6JUSMGgGbm5F;d;V%N(stP7Qg34+^r?kEc4F%+D$< zn$c#t+Zp-cM0DMQMcmBa_gn{O(%d|_>kRpiE;~r!)1Ezhcg!PsJ^f1>jAZ$+w8=oO zt$}ABx-mmM>eh3I`Mxzp)77a(lA#+xDSNu#@W(yT7mM1X;lK0V+F~Btb!2L`_PT`4 zo-EtY*DZMv`HIc-BcF4YnVL%?r%uW0h4psHzb!>30vl{~Q8{YQ^{ro}fBW_|P1drd zecu;pTkN*_Hhr!?xtSQbcpm~b&rP7XAjMNk^25|!cjJlNiEDe}%~F3%{^Wn9M|*L= z?QKPLhvQQVgQqX`;&?s~OM`YxA9&Bkr`MWbaF8l;MIa;R=& zGtJr_p~EQy`U$yM^WoT6Ek76a&)et?HduN%@BL+jLurbBNR+kOm=Q_%OUb?aqx#Fr zkn>Wf?x()3q#DHwg?vRyDJSs(WBiL8_ulB;-~Jjs)vou#*4w<@r7U&W{32K9#{0US zWQT0q_$i0cOSUuno!e4*+&}EutEViUty+(V7N)Cf-d(fC?kE@P6rJwpvMrYkp}{Kq=WV>QVpaANPy$*9UZ zQ8R8&w{jZU{e5UycGG>T{7Pno$7uYG0xo0Ytc};GzokA)CNEW5EKXv@gx}2^eOi$> z_@wqr;tg&MHV=+4{B3;&_cF)xd&oz)tjpRfD&6){#+;xE5njgeKx*OyzbuuMJoSMKiXaB$>0LUR0; z+mn!KaguB_O~fxeXZ))yPPvm!`o8qqV=krbP~zx=1HTr>qtS}4TM88G*|_*)njO!M z*H&;~cJArWdp6gC+b3b)+LCCc@|U#~y~4xK7I6DV_DIGak&WH2ZabeQy60-L*<@pj zDm$cN(k-XO7nshcC|{57PN@DikT2@?Sd{xsxjsp`h}kAT@TYF;*gVN%!8M;drKZ|* zP3#S%xn5Os4V@MDEgcg=q4c8CBrtmjh0@x28&cUJg6CwFxRcnopiuU9Lvfa@cM>P2 zdJ6no)!yozi1zDizjpuL;+|SY)RJ?T4?&3Ol<)b|^>#Kb7YaqnxNiNFVZdGzV`FNs zd(JgmmRCgJ4k1dO;6$|xa&Z2*qGqQW?s?r{;rsb86squ3)qQ>cP_}N_V7Jv7-Nk_O z7xsQl_;GAHSr^5>KE84qg)&X#ChjgKcFU?fxqg4&?dg{Mxt+Usm&1pAPQ`xP`Lula zb*LH&6&2l8oO5VpWX@T@v$Z|RjeG`Ez;ijG$hzaMMV=q5;TqJ}h=lyMDB?xYF})gD z6KP&-sy)hCAg#|kcbo-tkfgOeMWCeT=kkli+K=OS>p06t#amEvmj}p1J&8Y~<{^9P z-kWLMdkBVM!uDs0C8T?Y!wgu}c9>i~-kKuuU|qUA>!tsrfrJ1_*6#|QlaR*Q{@fY#(OrIfu-ViUWy-4b;Gvc9po_O{8By*5A=;|XRafH% zupaqDuI|MG_Nv{jN+%uownd7#rF4{NGwf*pAZAsGIrc1O#)1$Kz6WI_(iiuJVSU|v zV!n3u+Z}EVa&o$W=hP$3=IWSJIrgpodg3`l{f%39$qYEVl|2(TIaydPX z7FGC#UDjuR&ce?dO(I<*`D({?+_qg?Z9D!!Q#0LcDpl05=j#ORja|1q2dDL%>q(LN zMfpCDh8aQsEFVsfgZxJe4vL@xPY?@bMu%OjvCI#r4$LW4X3l(FGkc+W~uI^Vr>FJI?ZyYGWRR{blP6$H-+Z0$FGbGIbvPsw?)178Lz zE*;imLXi~2y}wxKkCrBzL=L}8k{QZRR;E>SI7Z;|G_AX9#R95@E$sNqg-|9!U;RzY z8e1idI{zdqql@p3ubD}|>pk#-peOWjq-?=K-Ig7+C{7_Wd20mbQ4@BJyQfnKMa#OTD+_I)i z-3C)v_7Ls!h9tZK-MWA6EoARwIz;^QJ#q4B-JH@x(YfSs>l&k>7Ea3hPdmf5Hovx* z7a+JSYZ;3g=TeTM7Fkt$WnC$CKY156Qt`EV&cWToe@1gTi=Bpy3Ae*+W{z+>7~f6b z?-h88Shx|r5Q!dBcx8tyQ;6PlCRY&qRmrn5|_9Nu||hySQ_ zQq|>0MNBD$_EbCrYJ0v4Udm4bBa<+>ZQ|I~jHQqaEUkac+)zK1D@=@E;6UP?nc=;u zFM6zgJa}5(l$|ittNlrrvBh<3rE!ZeX6|!!k(NGC^>Wyv%7HU4V0I3 zjfBLr`}(I%5-x_etwbH1d{D>y=9esJr+#>Mp9sZ$W7uZ`CWP*ip*Orv`w$jhQ5aEP zJBz^N^vx3+m>d{hG7VB%yWscm n_S;}T^aU85Bl%w!V||{@9WC4OyA)esmnfwxs+W`H?>+f%oV7;b literal 0 HcmV?d00001 diff --git a/docs/_static/decision_tree_kundu.tex b/docs/_static/decision_tree_kundu.tex new file mode 100644 index 000000000..e2192171c --- /dev/null +++ b/docs/_static/decision_tree_kundu.tex @@ -0,0 +1,157 @@ +\documentclass[border=2pt]{standalone} +\usepackage[utf8]{inputenc} % Required for inserting images +\usepackage{tikz} +\usepackage{helvet} +\usetikzlibrary{shapes.geometric, arrows} +\pagecolor{white} + +%-------------------------defining colorblind friendly colors +% Using pale color scheme in Figure 6 +% by Paul Tol https://personal.sron.nl/~pault/ +\definecolor{cbblue}{HTML}{BBCCEE} +\definecolor{cbcyan}{HTML}{CCEEFF} +\definecolor{cbgreen}{HTML}{CCDDAA} +\definecolor{cbyellow}{HTML}{EEEEBB} +\definecolor{cbred}{HTML}{FFCCCC} +\definecolor{cbgrey}{HTML}{DDDDDD} + +% -------------------------defining nodes +\tikzstyle{input} = [trapezium, trapezium left angle =80, trapezium right angle = 100, +minimum width= 3cm, minimum height=0.5cm, text centered, draw=black, fill=cbblue] +\tikzstyle{process} = [rectangle, minimum width = 3cm, minimum height = 1cm, +text centered, , text width=4cm,draw=black, fill=cbgrey] +\tikzstyle{decision} = [diamond, minimum width = 3cm, minimum height = 1cm, +text centered, , text width=3.5cm, draw=black, fill=cbcyan] +\tikzstyle{changeclass} = [rectangle, rounded corners, minimum width=3cm, minimum height=1cm, +text centered, draw = black, fill=cbyellow] +\tikzstyle{reject} = [trapezium, trapezium left angle =80, trapezium right angle = 100, +minimum width= 1cm, minimum height=0.5cm, text centered, draw=black, fill=cbred] +\tikzstyle{accept} = [trapezium, trapezium left angle =80, trapezium right angle = 100, +minimum width= 1cm, minimum height=0.5cm, text centered, draw=black, fill=cbgreen] + +% -------------------------defining connectors +\tikzstyle{arrow} = [thick,->, >=stealth] +\tikzstyle{line} = [thick,-,>=stealth] +\begin{document} + +% ------------------------- tikz image (flow chart) +\begin{tikzpicture}[node distance = 2cm] + +% ------------------------- nodes ------------------------- + +% ----- node: 0 +\node(0)[input, label={90:\textbf{Kundu Decision Tree (Tedana implementation)}}, label={180:$node\ 0$}] {Set all components to unclassified}; +% ----- node: 1 +\node(1)[decision, below of=0,label={180:$node\ 1$}, yshift=-1.5cm]{$\rho$ $>$ $\kappa$}; +\node(rej0)[reject, right of=1, xshift=3cm, align=center]{Unlikely BOLD\\$\rightarrow$ Reject}; +% ----- node: 2 +\node(2)[decision, below of=1,label={180:$node\ 2$} ,label={[align=center] 315: voxel counts for signif fit\\of multi-echo data\\to $T_2$ or $S_0$ decay models}, yshift=-3.5cm]{$n \, FS_0 \, > \, n \, FT_2$ \& $n \,FT_2$ $>$ 0}; +\node(rej1)[reject, right of=2, xshift=3cm, align=center]{Unlikely BOLD\\$\rightarrow$ Reject}; +% ----- node: 3 +\node(3)[process, below of=2, label={180:$node\ 3$}, label={[align=center] 315: varex: variance explained\\by each component}, yshift=-1.5cm]{Calculate median(varex) across all components}; +% ----- node: 4 +\node(4)[decision, below of=3,label={180:$node\ 4$},label={[align=center] 315:DICE overlap between $T_2$ or $S_0$\\decay models and ICA component\\peak clusters}, yshift=-1.5cm]{dice $FS_0$ $>$ dice $FT_2$ \& varex $>$ median(varex) +}; +\node(rej2)[reject, right of=4, xshift=3cm, align=center]{Unlikely BOLD\\$\rightarrow$ Reject}; +% ----- node: 5 +\node(5)[decision, below of=4,label={180:$node\ 5$}, label={[align=center] 315: $t-statistic$ of $FT_2$ values\\in component peak clusters vs\\peak voxels outside of clusters}, yshift=-3.5cm]{ $0 \, >$ signal-noise \& varex $>$ median(varex)}; +\node(rej3)[reject, right of=5, xshift=3cm, align=center]{Unlikely BOLD\\$\rightarrow$ Reject}; +% ----- node: 6 +\node(6)[process, below of=5, label={180:$node\ 6$}, label={0: Uses all components}, yshift=-1.5cm]{Calculate $\kappa$ elbow}; +% ----- node: 7 +\node(7)[process, below of=6, label={180:$node\ 7$}, yshift=-0.2cm]{Identify and exclude $\leq$3 highest variance unclassified components from some $\rho$ elbow calculations}; +% ----- node: 8 +\node(8)[process, below of=7, label={180:$node\ 8$}, label={[align=center] 0: Uses all components and subset\\of unclassified components}]{Calculate $\rho$ elbow\\(kundu method)}; +% ----- node: 9 +\node(9)[decision, below of=8,label={180:$node\ 9$}, yshift=-1.5cm]{$\kappa \geq \kappa$ elbow}; +\node(rej4)[changeclass, right of=9, xshift=3cm]{Provisional accept}; +% ----- node: 10 +\node(10)[decision, below of=9,label={180:$node\ 10$}, yshift=-3.5cm]{$\rho > \rho$ elbow }; +\node(rej5)[changeclass, right of=10, xshift=3cm]{Unclassified}; +% ----- node: 11 +\node(11)[decision, below of=10, label={180:$node\ 11$}, yshift=-3.5cm]{ \textit{n} classified as $Provisional\ accept < 2$}; +\node(rej6)[input, right of=11, xshift=4cm, align=center]{Rerun ICA, metric calcs,\\\& component selection.\\If max restarts reached,\\accept everything\\not already rejected}; +% ----- node: 12 +\node(12)[process, below of=11,label={180:$node\ 12$},label={0: $90^{th}$ percentile threshold}, yshift=-1.7cm]{Calculate upper varex on provionally accepted components}; +% ----- node: 13 +\node(13)[process, below of=12,label={180:$node\ 13$}, label={0: $25^{th}$ percentile threshold},]{Calculate lower varex on provionally accepted components}; +% ----- node: 14 +\node(14)[process, below of=13,label={180:$node\ 14$}, label={[align=center] 0:$\lceil 2:3 \rceil$ depending on the\\number of fMRI volumes}]{Calculate extend factor}; +% ----- node: 15 +\node(15)[process, below of=14,label={180:$node\ 15$},label={[align=center] 0: \textit{n} Provisional accept\\$*$ extend factor}]{Calculate max good mean metric rank}; +% ----- node: 16 +\node(16)[process, below of=15, label={180:$node\ 16$}, label={[align=center] 0: $\frac{(max-min \, \kappa) \, \div \kappa}{(max-min \, varex) \div varex}$}]{Calculate $\kappa$ ratio on provionally accepted components}; +% ----- node: 17 +\node(17)[decision, below of=16,label={180:$node\ 17$},label={315:variance \& mean metric rank are high}, yshift=-2.5cm]{mean metric rank $>$ max good mean metric rank \& varex$>$extend factor * upper varex}; +\node(rej7)[reject, right of=17, xshift=4cm, align=center]{Less likely BOLD\\$\rightarrow$ Reject}; +% ----- node: 18 +\node(18)[decision, below of=17,label={180:$node\ 18$},label={[align=center] 315: Accept if remaining component\\is less likely to be BOLD,\\but varex is low \& not worth\\losing a degree of freedom for}, yshift=-4.5cm]{mean metric rank $>$ \textit{n} max good mean metric rank \& varex $\leq$ lower varex \& $\kappa$ $\leq \, \kappa$ elbow }; +\node(rej8)[accept, right of=18, xshift=4cm, align=center]{Low variance\\$\rightarrow$ Accept}; +% ----- node: 19 +\node(19)[decision, below of=18,label={180:$node\ 19$},label={315: Nothing unclassified remains}, yshift=-4.5cm]{\textit{n} Unclassified $==0$}; +\node(rej9)[accept, right of=19, xshift=3cm, align=center]{Provisional accept\\$\rightarrow$ Accept}; +% ----- node: 20 +\node(20)[process, below of=19, label={180:$node\ 20$},yshift=-2.0cm, label={[align=center] 315: \textit{n} accepted guess =\\$\frac{\sum(\kappa > \kappa\, elbow\, \&\, \rho > \rho\, elbow)+ \sum(\kappa > \kappa\, elbow)}{2}$}]{Calculate new mean metric ranks and \textit{n} accepted guess on remaining unclassified and provisionally accepted components}; +% ----- node: 21 +\node(21)[decision, below of=20,label={180:$node\ 21$}, yshift=-3.5cm]{new mean metric rank $>$ (\textit{n} accepted guess)/2 \& varex $\kappa$ ratio $>$ 2 entend factor \& varex $>$ 2 upper varex}; +\node(rej10)[reject, right of=21, xshift=4cm, align=center]{Less likely BOLD\\$\rightarrow$ Reject}; +% ----- node: 22 +\node(22)[decision, below of=21,label={180:$node\ 22$}, yshift=-5cm]{new mean metric rank $>$ 0.9*\textit{n} accepted guess \& varex $>$ (lower varex * extend factor)}; +\node(rej11)[reject, right of=22, xshift=4cm, align=center]{Less likely BOLD\\$\rightarrow$ Reject}; +% ----- node: 23 +\node(23)[process, below of=22,label={180:$node\ 23$}, label={[align=center] 315: $25^{th}$ percentile variance explained\\ from remaining non-rejected components},yshift=-2cm]{Calculate new lower varex}; +% ----- node: 24 +\node(24)[decision, below of=23,label={180:$node\ 24$}, yshift=-2.5cm]{new mean metric rank $>$ \textit{n} accepted guess \& varex $>$ new lower varex}; +\node(rej12)[accept, right of= 24, xshift=4cm, align=center]{Accept borderline\\$\rightarrow$Accept}; +% ----- node: 25 +\node(25)[decision, below of=24,label={180:$node\ 25$}, yshift=-4cm]{ $\kappa$ $>$ $\kappa$ elbow \& varex $>$ new lower varex}; +\node(rej13)[accept, right of=25, xshift=3cm, align=center]{Accept borderline\\$\rightarrow$Accept}; +% ----- node: 26 +\node(26)[accept, below of=25,label={180:$node\ 26$}, yshift=-2cm, align=center]{Remaining Unclassified \& Provisional accept\\$\rightarrow$ Likely BOLD $\rightarrow$ Accept}; + +% ------------------------- connections ------------------------- +% draw[x](origin)--node[anchor=position]{text}(destination); +\draw[arrow](0)--(1); +\draw[arrow](1)--(2); +\draw[arrow](2)--(3); +\draw[arrow](3)--(4); +\draw[arrow](4)--(5); +\draw[arrow](5)--(6); +\draw[arrow](6)--(7); +\draw[arrow](7)--(8); +\draw[arrow](8)--(9); +\draw[arrow](9)--(10); +\draw[arrow](10)--(11); +\draw[arrow](11)--(12); +\draw[arrow](12)--(13); +\draw[arrow](13)--(14); +\draw[arrow](14)--(15); +\draw[arrow](15)--(16); +\draw[arrow](16)--(17); +\draw[arrow](17)--(18); +\draw[arrow](18)--(19); +\draw[arrow](19)--(20);a +\draw[arrow](20)--(21); +\draw[arrow](21)--(22); +\draw[arrow](22)--(23); +\draw[arrow](23)--(24); +\draw[arrow](24)--(25); +\draw[arrow](25)--(26); +\draw[arrow](1)--node[anchor=south] {yes} (rej0); +\draw[arrow](2)--node[anchor=south] {yes} (rej1); +\draw[arrow](4)--node[anchor=south] {yes} (rej2); +\draw[arrow](5)--node[anchor=south] {yes} (rej3); +\draw[arrow](9)--node[anchor=south] {yes} (rej4); +\draw[arrow](rej4)--(10); +\draw[arrow](10)--node[anchor=south] {yes} (rej5); +\draw[arrow](rej5)--(11); +\draw[arrow](11)--node[anchor=south] {yes} (rej6); +\draw[arrow](17)--node[anchor=south] {yes} (rej7); +\draw[arrow](18)--node[anchor=south] {yes} (rej8); +\draw[arrow](19)--node[anchor=south] {yes} (rej9); +\draw[arrow](21)--node[anchor=south] {yes} (rej10); +\draw[arrow](22)--node[anchor=south] {yes} (rej11); +\draw[arrow](24)--node[anchor=south] {yes} (rej12); +\draw[arrow](25)--node[anchor=south] {yes} (rej13); +\end{tikzpicture} +\end{document} diff --git a/docs/_static/decision_tree_legend.png b/docs/_static/decision_tree_legend.png new file mode 100644 index 0000000000000000000000000000000000000000..ee000bb2819e985c82252c2c55d5102a669a573d GIT binary patch literal 15078 zcmcJ$bzD?Y*e<&11`z>i6bVT|h7>_sTDk{m7)l1DLsCK%kQ!P#hmaULq!|XJ8>OVB z8_wYOo%@}8&$++z$GP{9SrdD$wcmH`^*rl&-?b(}LrsB%kd_bt0Fu{=vYG&ZX#)Tl zqxhKUUv?}w>d|l5Hm_7)0YK$vqN}$!06@rZCnKZrT1JLh!`0c^&cO-*6eH3TJ+!r^ zslrjs!p$0S_aeUzLu@rM6-k;EV|h9G3GOMieE}W|$!fdEzKPAp7vNY5%#n$$d>k2y z@uqSMn?`o*oy!Bd#MhQ(G>c7Md&ty9rc-}NW;1HuX3uY;0r+wijMJf_PY_6)t^eTH zde_HKQ8s3H7{q@8EC*o8rMg<0>P#Lp1=Dvb+98GFjJmmjl==LUb5t_A9k|W>iZFQ0 z4`?JuuOFOE8`DttiAdUI1xgg^_MMX=lJFsW|ml}Z|4H6ef_ZjN^is%7cGV-d$8 z5X7_W1uQA{f&c#G(;q}p43KpV4I1*^e^T&~hRs+j>{C2N)GriDbv>$U@ENJ_Czjv* zQzk9}BBCPC9C;jDIK~1@u2SPp-zS=e1nkntGM#mv==rCR z02TRGHPaX2j&_uQ$rgr2<8?UFj1P#cYH=s^d&k(|YpVI%7UGMkGeeoRJSF>SN|TqLuPz1UNX!nF)L> zxrX=rh`JT9&XG|gDaq-VsGu7^y?T|KUaBVnF%}B40`>Rb3~j@%3^4_n0XZqGO)0uR zSQKs8wQc0}0ND~2<|~em*ze5vmx3OA!H{L9eh^rb_3&#@nHhT_u6q`_V6c$OL$?nW zOM)VJydMV4u;(x|pOIz+KK)BLD0MF+wD%sRX1E)(OfP*^sQwqa6)YSXat->g_^dM0 zikOsATUnZg^u=MD+17uI{&2X_HUK0tby;t0?_W@Qp@fg2gmfvnJwIX2S7iu55e&{S2y0E+G zxlq*8G~Hjgw--&;b@+E+-|9@>m&_kuGA8p82SqeNd5EGk=UblaxM6t_<$7h$S5NLA ze^mS7@-47FO#k%@*%#{ctSzi>6s?~KJ^k?Hk>jHs_VmZ|kDHm}`{XUjTwfTdEJ!=C zWjPrO9ypY){#Diu*&7zurW0TyofOSZIe7t6OOy#@xr` zHw`vLNMxSp;|y!s<$ZUy4gUU=GBPq<>AO;f(x?7+lusG5=|-^rY#a$(yuEP05ZB?+ zQP*kNWE&S6=PHJeuN8U~?vE>t&5X;BEfhT;^BWr-t1I#-S{#+KOSTJvtk`dlh?MNR zkvhf<(C?b>KAdPH!+ef{K` z;iCCM3s(&H5iSmH8EzfPei(P>gHDsM>oEAI=uhU+LJ!?(Bm{JylRg^~b^^6Pq=nuI z8(9A|b45O|=oe6ZKJVx~O)OL`tju4+PbUzLWEjL*Yea_g3{$C5Nk`#E(W%@YjZf)x zu!=GK$rH>&#M8)~X+WdjW#m}BS$ARU=$!5}Z|m0+W=>(NWjktdSW=e4Gg;exGGlyT zyoe}Se~6GUTp1XV!GX6+jr`mnmS(@rW`ep`fBJ(55P>AmdDJZfjUVnwMF`Y1d3@3$8I@p{joji5^F^g*a;WHfmn_^XK2fC<- z5}vLb_vlA0IEOAi4fr1L&Yuv_8HhaWg3;xg&MDVP7`!Qe51%zTgyy) znc%*;v9U60(X8yfmlh!bE#qr){_8M2eYoz6O5Zr$mh@UUzN#b|qK+}OG}&%gaVdhf z`DZQTT{4N`GT@4b(}y?kiJR=wWC;8Ij=W45)oX@lG*Y?j{OrAo{k8RG(s@C#{!w#Z zY zQ^AF;Q@1~Nt@hlej;{4+;MS%|O>Y9AOFUbSw~fgj6weWRm3v&0$^L)NA0F-R7DTIO z`eU7j9A0iaVnEPhlG#F0QxyQb*#ICg1OP73KL!2)0QYAAuxSPWV#xqN?UL5;<^}o! zfs3L66aW}I?_L;9Nl7#4_P}e|SK6LwJM;53EBUoyZ#Rh*hM&3-a^Vy#F?$fIWox+* zrgw-@iz!S{JxjS#U-Ap%n=APe@+A`ff2}0EFJAn4sShU)f^3_&`s6og z?$+6+)tcY)<1fN;P(ns#6wUt z)_+%6-*{kIbtQ*Bf2tpD*~L9}wQ-y{K_6PW{Bx65EYTkERl)JXWCAmRav0m@z#&Bq z%CSnZLFsM)8RF3}V1%kFtE%dGCdDqI3^E6T7%HvzLwR4i64ZqS3HGo;LZ!PnCEbK2 z?NM<93|nj|HpNkU9=Inj>$a$SxDVl=@cm2~!KSaT^jLiCZ)cRDAbI^Ga>4!2X*t~Z5W0ONQ4jM6yk#;WoOr&AS8t~)g;iF+#P4?va*Ff9I<(xx z>`6S7L^pG9n)=1;OE|=_T5pXyK{nGHQuKK~_H2tK()WZUOJKxXg((YlUrLt+4ww@w z-;W(FC+>XD#zADJRy%c5!4W_Gn|l_-@~sbWKmPqVBFT~&3eLH$Rs-K+k7rg%hk|hu zb$yNFLJK&=?fV+5jvG&W!Pq^nb&fK>EFkfIh{*~2bUqXt;X;j4#s|1hHQx;tUfOJj z(nrnes?UC}x>w4D5Lp`fBA1epe5nbGFAOLXA>ZaArJ&4$)Pe`^jTn-0S9VuU_^cF= zeDsYKyZaxcM4pG5qdH*u_)e?i0_c(66JLNOjWqx3>CsX$8RsEP-7u8%URj!w602!a05Q7sDM_P2@n{2~ zI`V6)g?BZVG*llSx~jRLyK?y!hHmvlgzP#poPv}mF!}jqbHQ4y*X&Jd@X&Q$MQgxy zh6xTpadm!d@t%@xi#lcZ7A_)8sk&og=f0kW1WO#jEQeKAE*Z%%Am(^|^7ALW;vXszg9H9|4zvxip*yL*Gt10q6kSov zB|{vnMU^^IGmJH|&daUs2$VfU*A_Yy`K+wEs*%+=m>`i*1jWf?57WCTbiEW-x(*;B`WiQ7~OR++P*dX0~^O*m(5~+icIIz0apv{Bv(I0V&Jc= z(KqyVVe|-ppZJ^6GI;61!y;UN= zsp@NOngLG-!u6o)jDC(5kS~*^+}pR$>-OumcN1dBOC9{LP2(AzXcHWaK0eePc{ebV>)t11i42me&+Xn(^$_@+z`V9_U4bL2MDA_9Ef z!SQ3?<-DF(lrHz)rxP}+-r%nj6FzZ}@>{YCn-_wc8lj@LJJqvySPsoaMQBLAGpfS% zd^f(NeIIrn?E^swORGla%J9n!T@t4E@P$0s_d!$GM z{c>=|pHbR9+$cmFM?9pZ3602>>ZdcVEb>nfFWiWM2&LC}^u6|RRGqec2fD(%c$$%} zmZ8fPQ8s-G?+N1;e14ckKyelJElB*9HUnJHS14Rr*r7yi$MFV}c*TYZ!j9$fZVSBEa6@=0D`0-(v zqKg2SBJfQ>Pk^TNm3~?K$3bQyH0%^r8SOEn>;-xuqO$pBXuQP2fJEJMlSX;@SPt@( z*U;ejbr@iV6I&#>Ps3hB0I=OlF|fwD`*cX2f|HMZ_bHw{rK=ln{idi7Fa#3GQxb6R zz7Q)G^D^n7tAS(?Ts7=9VdavCYM9+czx-9l7T_h^6~S{I8G!`q5&!MsySG$b+1(qd5p znbXtm6c&m7h5@aO(U)KdW+1r{>1`!_nnbw>VgfsmoK4<(azM3O%;b(@hv!Rs=k|UI zDSy6WrYty*^I-WmFBirO(eByY3z2uOBdDPc=7MRaF=v!5qA!a&PsS<3waQoYJTaxLin*DVnERzQ_2TTT=$!3 zp|_dB6s?Sp>fdCu86-H|Pg`He#tBp-mbd;}X}+&yc|*NsN2Q|6?YRZgyDl?CuLp(# z4^vwFpNqgNCE;bs_Fww{xtGHFc+@?H|F&UbM)>yx_T89ekp#zzzAQJ5mny#vb2NdU z{d=$^52FXOg0idI1xSR8xksD427=JWrg|ckeTL+(&kt?dSH{)}8;2YjY9A3@Z3N%& z;uSm;vP;QNNTta55YLWimu3guk@LZB`QL=Eli)}jFEE7e*=ygy zu%I%*g=Y7+0br7!6&;;18+OF<`7emqX)7qMc!SmpC-rHpa7>9f`Gn&sA-uvOuaPzL z8)p~d*ExQZHn6Gmz%89}{2e-`lpD&o70Je2tJEVVG*n!Nz8fiy2mKbx8(dUI!A;h$ zbkR#7QIyUOUvJ@ znE^MI6UGyp!Q8mcgWtkjUG5bjo?Kf|Tp!#YG{s{H>z#{ZLt7LY`%yg#mkjTnoa|b6 z9D1oMw;A!2B9^I}khXMECsmWj+Tjyrsn~W^NcMi1O0EA4tf4h-L-Up~#+AesdQLF; z*PI`s|D}o)(bLd}sN{cqc&}rLS@TfDf719_1xI~v^V(FG)~;MaRWWM3+xp#)+AY4- z+tr%0YTb^pSZ)IuUyysE@&x=`MRx6td3ztj3nRimuW1V+b?f_6MNFw z3o9;XEH9Z1u$U{JPo+umPV7IJ9d_G<_z@=b&r>6bot?K(9){rS#7t`|0`##rMQ{Mz*Hx!ZM z^X(fRj_m|zmHe*^`ZxzkzeZU(7UmcCn_t+)IF5rKQ>!B%2uYNHCgfbi*8Q(<7M7pF z1q9NeG?He`@u>A`0|jQ^R+Fl~C5>NOrxi>itc~FCpm^cjB6}C;0rhYE%f|sZrWLdu zRed@H%6-$|t54p2I0+Eb4m2nDc)TMfbxo1PaQyO8fT4)M%mPbulsqk}z*^T*46S6O zSe?epxrFa70mt}5l$5gQ0mR562!irhp^aAsY(tlf<=$C}wY(t%Xkj-AKB%dd;$OFN ztqxWCuIV_dJ|z(<3TVAYA^a+V)pu)sd|E*Xm0@~n=gJ&%ClHb zkX5pQ5&^|+xPvR!*5;1LNYu+7k20jfZ2Vd72ev2T50!IU+I051_2jbw(eFH6)E4(^ zV>dj@34mq;&u*{$wSS&GY;$m7gX8RB9xn%G;C`3?-0OzcmeFs8L`r4HWC~=ng;luzz5&^)|zV9Dp>6#q&>l zoxK=Z>z2)*T{1j8JpYme&|d5>qeCv1_URuLy>2C+vsnc&1Wzwd7dtb~rs#DV6JujH z64g{6Q%JF6uXExmE`K1+fF_MLSvt%ADDh7YIS^~Q8jGyhQe zjCP85efM?n$Qd`3x?=R4wlod|w>Xu?$_QRMGWe*{!!VQAC$W9ZInPzW;WYk|=58kJ zzN+69%+m>hSG6KRc5N0$&T`o6U21oet$g=%uXS=08*x%5i(RFnUB^j#U^Q%w~` z;6k5X+Qeftu;@f~eg~uQK(@_t=`?A$cUbfJ5{`p0)n?aj_gsJh1rk17?#YBHqx4!{ zQWFf}kt9;<#?9F}MBTtFSXV_K_A~f8@rvXrfHS9dA%?G~;||v1xa@v_kt`i=Qe<8vTKnNvqFTvRjP;;=RoBOj0 zt8a$&in4|jXwHeom1gk@R(sv%vgbehW=?pLoo!wd?Y^+tF%vkjI35|((uy}rsmjSg zFl8A>OpjtEH9L>f-VNoh>|xC--ZA#>T^Y8^xj7#cl);^WG%(e9Iu3r=pP#K z_nq0eE*Q(bIjb!u@Se4E8Rop>o$Dv9Jc64ruGZ+8*{gK=4Euh~ zdwJnkH|pJ63Bm4vS*wwrqZ zdx73dFmk@#605eXKqG#N3cQR68Ih2#0lP=(6x|$6Wwh_%#_{lr*v|jMGJc}6s?AH0 zq!{$nGz6xiSB+{){&9<$q);$4_+K4|K0}vXav` z5fT;=j{1k^6KkAx|9^N9X+K+y5V*&9UuEkg5LWr)e*+tYJ2<-qO8I!Y=Hl2NT&;VZT$+SOX^ zW$at@M#08-0SD?66r=hw1UvB3=oZ?G!zU1lN}U>eqqh<_G^wRJhnoaM4P!AJ9>^tZ zP8LCwhj-1^uc6d{*N;20w2f#?D58 z;OJfrRAG%oeenhLjvz-y;)GI){PFC`Qc*}c4pX%5_7g6Gb}vuMhEIFLgd5+$S5#L@ z*sY^qwTedz46ifn-xoWAR#cl{g>U^fx#DXct+E~I9HRtVRSagWYY?tc*Q9>B9mnG7 zqz@W(lr0-fw5qpRZX;E8>?&xb=Nohz>jhfzhR(*AT67(a5Djt$q7NQ@eL&$ioshQQe>9sjmD89N>$v4y2|lh@N!vY- zj_++sPk`Zs)a~x|=tr1I5yFS$0>3T=< zHV9W|Yr819vPx}M)pT@aw6e1K6YPHG!n#yz%D{R4<69mLn4l9Cq_V?_T&Jl#MH?3J z)F{k8x-_`SB-NyB?Jvz@y8rm|mSvIQ+9I0Nw?)Q(m*j>^qjsnYV5pFT8U?eQS#gj!0M}S^4qz%eqhB{KU2Tt;w}-$kQfH zZRXRxJ_+PZk(t*oL}`Wg!MGV{#pH|LDqn&T4f1^`=t-&|M8dW5qLy;zN{!8`%UfH$ zdhNIS=I4>mEUk3-K}anMDGXe(c}3MG2cK{%AaD&WPk`_LinBy4;MO}P=hp_?5*nIU zQPJ3zbJ;k6RMZ&ue-@Cwv8$V#wI~jZqi#L=wmM66e(PCRaZ%qYTVQ$puoG+TYrx^k>fyPFwGCF`dq2X(EInCC@!Yl5el>ujRyE+G{|P!J z26!7?qLXL<3z|v*+yB?%njg~X`xFEC@hpC!)UIeF?eZ8Ez~;mNI9yzuPi}KE+Yd3! zpw?w9F@eNR=>O&0fA{j)%GNrVIIAZ5&LxPny+9xpP*Iq22(sye5rMdtG842Xei^Rt z`oe!rp*T8ebC=AJRT?@NscI@nJz?4{@hcaG6q!jjLN@V$>#L}Rzjji=512}8R}M!d zXL-^%eQP^iJf;v5H_Zwf8Ahtd?Bn53>3cmJs2-g8R2xZ7_ar)G_%F% zc=^UXp=qtu{!EY9-%FJSy)Ty^4>|J%iF|le_sX)rS(%rEdv&j+VgQcWUd=lt%LlVq zp&s4-x&~}aGG{vX*JZpT&N9I)ItRxEUz<1gU7zYHP;H+DF=$+;Ue$zI5^k1|`#$wN ztTwaB4Db@lZ@pRS<<6clAbG5{YvS_CN}S!9Z2%`|yn1!CZhfnG~ z56bWa`Hx7;kQm=5>9#N6{yH6v7Y5tsC2}V(a)B2+4ytr<@-dmPDj{=#MN=aJqLizd* zfAkUrTExPh(;$Kct7zrldl8-&ogbdhT`Tx~Fi?A$B|=ejE|kT7;L{xOc?(ysQ(8eX zxhnly4}7W|k0USOO1c+<@x6o@r11RUPp={fR{j1`ZF&f0-?R3q7e~HW)T@+%nR&8) zL`n#6m^7cs*dmvk((BaTBE~Xr%*2 z$tKJ&pd3K;#X9tQh(BB4pk0&E>9)RAH;#L0GJ9#|!DbpaGkxfivcb6GL_0jJq7+1o zW7r!}M6g_6+-9eU3?*)mQAPt&sO`T1i6)ze2e8RK&jy0Fhnf^&Ut&e>$$XgTKx+=* zyL>JimMsS0vxLs>0z7Cz@n1#$TVTiaADSCbla|1a)&X_W7l?2C`)p~iIIv|S z(y++uIC8=>Fr(mFl9D)sEX#V>RVt)<##m^{<98sPhizr9*{K9vOd$)2pheupJ`){ZeG* zgq!roG^Y4JYbHF)pAM28g8OM}?-{+Q=dnr%iE|{vOHzvIfTuZ}8om(Oms*G_ogsw8 zm;}s~aEjDh?ZDS35oSHBd2x7(9QPbDONFR6x6z=}K5*z0%RfNC#<7<4C$ydmOia4a=Zqh<$=vsRX|-F|l*(vF11fTx&YME^)2)$sHO%LL9pTaQ zHGxy}iqkfs?fkV5(FH@tZD$v^OZCx{%F53hsip6n^uq3WK06cmc}(z!^eyWlJlCR= zi)z&-B9_#|;6Dt0J6J2k{7ixt9|ZG|LQ$Hk$I!j6p=uZqmn_9;tupV!8au{cE4Fha zvQcKm9WEbmLT`;J<7P@zf?YR!It8w2wOR+deZ-Kgdi%U%rL|S&%Zm!I;`fd_D=-jF zV6th^^d_BcsGWYbrB0E*ibiL88MrmSqFu&X_Vb1&Ox2R|k70bfh08^HWEzUS+NW&8 z)mmkyRI?(k^3-TH{S$Jq1l;;Up~8WS3Rke3RJSCOfM>MUyzg~dALenKxY6iLzxlQI z=k-+q(xVMG=jI)FI{u!pmx}BJEOVgyro1SwDPmoud#UOIG>|%aRE+T25LR2&byaY+ zW`G9;Z!vMV#`QsEzyV$hF@Nn#%?~hsKKQ#eydv?twRjFKD*PG|V@3MVxG}ePFD8vA zY0+AJmRTUl`ht`P0P@{ltMUqB07k$6XIOTlVY&Ga2r~ssEgL@p;2y=@S%7~yZv0;~ zz(qH}y-7Ctkg~#*g?~dDw*NtmNQd%u;^+L;1`HFB8d3*$m{3x5eqBAXtgL+U0kbemULnc*G>^;IkrpiX58-YU&P{8TCj^s@ximfz5RHGEreOPAi?+-j}pRxMmnKA5L6`Yw~Y2gIgGZdu|%`riwqps2bK0=*3=t zLIA`=mX$Y(4UCCzA@VZ>ZbE4?rTZYR10@w2%tFGu=ciLWn5|0weU>zcz-OP>YwPo) zZ|$e)+bwmj&HIigCwU;yKEfZ?G&)1Y+D@z9-lsa7dQ{N% zeWkqU39C_ZdVG%YZH+h7`m>;o@+%^02UcDEMK?svPdb;56?0%mND--(40C5O&e$A~ z`7+yq)*5^Ui8%)~EbPP=cJIR{4m7gV2pP+oo%lD1j+UJZxAM7sI@NHUD_x-n(?5$t zXEWkKH4UcbQcTezs|f4%M&6Q3D&3noHy1&rsy11?_d3BIq8?C-A*$5%Cn&bJ?N5um zAeY^T;$O|!|FqK5&oe)GsDc2Ck1+L54#!oo=!|w)3?)Xa=PvdR5xM@RsHXo4Ge}B> z)#X)Dc$ITb$*~a;>&}VtjwRvXeYBRyEJ0wFL}c>YM5RN2N@|v*Sxk)^0qx z0;TWHPhB0BT64&DB3GSkuhu<~Zqz?G*bBe%= zSMB=B(K4yvzMbo$S=m_AdAdodLKG#D{Log!5v5~p-s536=W4RlME!j)I?MoT%5yBTKE>C<6S z<(6}92oFeo2?ke} z1Qv>V*h*%S#S#Ger05M1VEFO>ShRgXO;Tgo@}NT*U>NoVMx2EMaNDo>s!}b#i;$@^mVpWrUJ?3gZfzYig50&n_)o{&{`E0~*$@gGXr?3>%yOwjH2uOX zDT++N$Q5$Aez+9>sJDX#b8HEbKOy9Nu;F*@*|U4VC9tl8H-%SZFytBIe|vFU`fDrm zT4?j4PK6uD-#(B%@QXzxT~;QYynKB8Pzu82Vf)+{U9F)d@f78SbxIT zVWH-uOINMz?1RlOQ|mTYuSjP;Wkk1>9HMFYMbioYvPXdL&EO|J%kL|r$ zJET7>c~LriOPaGLqf#t?@;>kg@wnfbZ zsrHmiqkS&%sp>{w_QU>gZKI{W$=|O3P!@47 zBrlq8Gui0wMDag~d%tL0L!6?B?s{3kl{|&$*PS#l4AzV%LVofy^tIswxIaKt zzyH`V0DM?LZ?+g?Us+;KF)osX;3_I*J-pNS=xiEbh(6MG#~Kp<3)UD#vxX>oTLCbf zUxx?;o;*E|V(JC8ycTzSmkqVZF2+Re^We#J`A^~FT)`jH9**XwlYWbZ%G?mt8aVRO z{jGl2_DTG^zGlT`hF&_$@LC`XNVr;;PuJL}$1xlJ2;EyAVNh0W~A?yxz9TYA|a zqn~Q2vqi6|(30;X-R70PC93QSp4HTS%`si; z;!?E2an;}V9^Ms9LAKJ|TU=JLHg(+7TpSc^G;!nYpHB4N*p$;NFa)#xq|Y9jJ><`i zRAWUYmRfLTmKaC?F;)^Fi{Saa)+V*15~4F-zyBX3KH>NOFC{+5mwyuf6b{Bo)#cUH z&8q^{`7K(HH~rG(K6(iap4$$OVBFqz*d=hz9;!g?GR#=A&mOP6c8Kc3Y%h3q3@INg z=D)^M?oUN2KU}n%@YNuSBm0v7A2Bzs51B)!lRaav=cX}PH2jtx3Zo*~{HfV18;uu0m_rg;C zD492JT2}W%If&*3Zoy(v@42r)<0OpNraKqOiNi(X(^@k>JvG>LaXbLU9;h{cD`l~5 zR#TijEa1qMnD!%uTZ0c#4Js?+o`9cvn({77WvZP{%=$Yw{`HUVk+m#4M>uTn!8rR; zJ!340c}u|6O`@uBSZb30@}0zdVl%Oijw)~354ciZ2p=6Mt?%H&yE)mIm{l=aNmUnE zZc6&o2@@ylQkByqLF}(pse7PE_p-ahJ%uLJB!3TmT+ILDVBWvXoBL~3Cln@7`{N%} z|3UF?o3B$NYy98!8gOt+$7--ogOw0xQTn^~-rD6(@!@^Sy}E|$|5YDZ!AnrH{`DiO(pWgFgaQs)Wr{Ve zWWkqTYuW_bYGZuLQ?B?uQmN(pnBHgTI6lfKt9g?7;Q`-^L1{RA~h0Nd3p zi0XA7uAfBU8alEK57s=Cek}BTSOOJVOxRYHSY+n~@T8;D`3nXp(DB!OzQMBxN~ZBf zrlFyjm56t!)}EJCXF3PUYy-VGA$Bh+yFgY~dIM|tY$co2%H^#sP6nyUy39fYg|51J zu3U)OpJ^!6rrd&x0hY=EQq(%(cuj0w!}UN-J8ASN!`@sW$12 zvYR6G%Fy!VV!E)exFJnuqxI;j;2=?cVl6I;i_C3TLjS5K0#)HZ&-<2z=9QgZ0DXNuK&rhpYYnE(~wm-_6m3dO_HuwfK}&Zg%Z`? z=PGq>m^0iU5qay%|G<_mckQVPvVoje{do0Juj8+^*`G<=18>e}VkvO11si1`UK|?i zIG-m>>ynRfF7}1T+(#3d-?9hS=_5qX9qh(C98rBRLlZv!EHe4D1LNu?lcaG!BCY@@ zF%eo;1G8^fjm7PaX^jZ9@1?@1hctwNodD8!z4G55E0ca}^f6Ic&6UATs|- zq1{i#&1GDz<|Wfk@Wj~3{F_h_Vgi`CY^F}r%=cBt9q3&KR^!m&IQlen5CtCi>uE^> zB~>y18aJWZTKqth>A~%&b8~$tcr~=ksK#X$>U!}G4|XPqaIVm`U$`3Loyh5m#9>)K z{;{!?T2v=@Q3J;^KTdz7`Vm-BwTWL$fnuuO?bShr+PR@CvW{h3b<8723 zxDZ~`Cu!UzI)6L!4ub7;6TFv6chq9kRGIavCf7azOht%zbQp-)wPN&jg%zC*qyWTz zOGBUIAt$jp=SKxCP@DC0*tL_)Ag{s*=oF0^rWyLg`UeNgCobIRX8#1a6dy6p4B4HG z*LAcPcz_SMke2_Cy855Y&LYAcb}Bj%R)3kPUQ!ISS);c~zn|x!|0tl=_AiC^FEaRl c6gSjY6&YIsvb0j@{|^AK<, >=stealth] +\tikzstyle{line} = [thick,-,>=stealth] +\begin{document} + +% ------------------------- tikz image (flow chart) +\begin{tikzpicture}[node distance = 1cm] + +% ------------------------- nodes ------------------------- + +% ----- node: 0 +\node(0)[input, label={90:\textbf{Decision Tree Flow Chart Legend}}] {Initialization or restarting}; +% ----- node: 1 +\node(1)[process, below of=0, align=center, yshift=0.4cm] {Calculation Node}; +% ----- node: 2 +\node(2)[changeclass, below of=1, align=center, yshift=0.15cm] {New intermediate\\classification for a component}; +% ----- node: 3 +\node(3)[decision, right of=1, xshift=2.5cm, align=center] {Decision node}; +% ----- node: 4 +\node(4)[accept, below of=2, align=center, yshift=-0.3cm] {Final "accept" classification\\or node to set final classification.\\Classification tag also shown}; +% ----- node: 5 +\node(5)[reject, below of=4, align=center, yshift=-0.5cm] {Final "reject" classification\\or node to set final classification.\\Classification tag also shown}; + + +\end{tikzpicture} +\end{document} \ No newline at end of file diff --git a/docs/_static/decision_tree_minimal.png b/docs/_static/decision_tree_minimal.png new file mode 100644 index 0000000000000000000000000000000000000000..f11732c3149d47a0c0a753e3d0c1fe86bfdd70b4 GIT binary patch literal 87621 zcmeFYcT`kc@Fsc?#j7M06cA`cB}uYDn$!eA1&IO*lEs!x6D2em6)+J5B}kSeA|R48 zigq^=CDUYSK*=4eO2=+1QkZmZd)9IpyNneW#wBpm6gxka&@w@ zb+CjW)n`dDp11Y-&xE$sNY>nnI`RBAyxVs)mI{ykjGA_)ki|-QP*xeU(vC~sW{Z*-#8S1$hYsMLp;+uOHC!i zxs9z$oeD3%Kzk{#D-6#gp<8h;rzq?FCTuK?QgQ}~JZ0LAaiv!@TQ94%o;Q3XrXTG_ z5i<5&jkjc!UKV?TW@~b5{bjjG9^@MjlRj1Z^3^>ymZ^W%E5vGfF^?bK3{k{8kqg;; zwNMa$)S$?uo8AKlVN4Xi*dyU_95kxhXujAXdbhcasfqD>YqOEasf*c9*?3L#Lc*e% zUW~Q1X->WP-h7!sGVH=HWM4d+A;*|2WYxB9$c)3XeLgSSCnYU)87qvf!7n{MsU!i0s_as&{7-4SO_ZeOf@D*F2RwWchMa~d%v#zdy4N8*)26xUPUt}{05X7pFYJR_~2 zY>&BnD3klFqCW!SzjX54F`oy5?F)Xicopi6mKQ(cvI{S9p=+KpO&~-)XX+`$m)J*klIq$y7UsZ` zRMyvz^6v4yr*Tg`dF3(6ne|~1X7q~GQIVjgdxr<9bT2a`2MR16Z&p0NELvXbhd(~2{xy6-tYgiNPd{Wkv1|L|-jM6X)WfP5=X|7*K-qLtY`-AIr z@UM3_9TFOlJ_0U8{&B0U4qoZnd z5hW0GksHfB$Cq?|=zPt&=*All7+ukZ8pBGCyeZrxe04ghm+oFV%)iL2FWANZ@REs+ zSJqR5E4TmNHu)G=a5v5Cnv3dE&MC7-)n|F%bc`}@r$4;?C#0iT>`wm;>D$uExFXjV zlp_-=%tGIv{!$*(dCtGZZ_SUn{p5qTrCvpsd)JL=!)YmcW$~;d?Yg#^#ZESliv>WS&nDUNT;H0ob3~Pvfb=YsobT94A}4{j`_qW>Cmaco+lD5S1w#J zu$k}VXix0v=%IA}==jhflfH60A%89{=|he3)|1r?hC2jJmDD2Tsf<=k$nl2gSFP_K z{6%IxlyKj1=c!z&d|WB+jrZ2^4)&h&Ui*7&Q)uVn4(DdgrXGz94L8jZnmn2^`lS$| z2IdCSke!f%u$N)?U!qvu+2q9Z#ThQQN;*o^+AE=4Bn_=P?zy%wV}6KfiVtDE`{__c zlIqA@B)eE>3up6@$*Pu6;r26HXOv#hykOTj)fpY%;9wbU)FJ#>m{zz-D8-QN?ssEs z(RA6S4b~~iamdDx5OSZ%M$e`bvz(h3FWg&-UnQAPOhzhlr##yH9IYJ*Wdj(`&SX2T z`*mMxdv_l{PJc{HYq99~S&#f#*t10Q0)HHXwMz0-lEq2qdB{xu?dktwIP2TD!NQWI z{#N}(Q#ua6yuN&PDy(9{?lkqpqi^XSmPfEow$T#$=Uf!jtSA>km^` z+pNuc%+41u6!bb+ud1yIIDH#vtNP~C9{P)ELT}D(#Gp;GzpUHyN87Md@Q6S_aX{oA zYLA`Dh-xr!GB8JxQ2!>#?`lTsk*nQT86UMgy8cCu+gbG9%y{~E!mn(WO`*XGXY?2OG^w%8^W)cXFGPfT?k3i* zub=wPlgOiT>gO}$GvQ|o(rJk_t$#ZLGfUL7%N^!L=9!X@IvsV2TKFM4rG2r2_YI$% znuq3aGCKZOvhFL&?fNsfX_K=zvwJdYwP-a5QdR}zcx&XSV_(I&+*UoAb!3cG_aj9WTYhRzb7<3V)*tJc85k_%^PvOOW*6XI&qb2DXOw%UA z(%o-5c;>=q5b|feipvO@+2cM<3u|1Ra{6v26JGttzCL@$l%h46k|_ViefrNIawn!n z-FqSNnVegmXtmR#Lwo=7ly6(opS4*9-0;eFA#E#5xY+~K*-El=j$6Hd>iE$uE*Tn5 z8re{e&`MER(|NXJN#9@3x88Ib)D$FFopGM)Aa1`Jo6+fY8dfdmu4$~=_^rR0H&y25 z*fwbB#^P5-PVT6AzV&vE-T%r?q3QHFIr(WhnZ)Y#? zw_JGG_jkvDt-#7Gwpu5^ZB%#$yH^$G$s}H}P`Dtd5a<7UgZ1yyeD+K26o2ZqC(B#2 zSSovvOrFE2>S{ueH!lPQK7pW3APW2qLGG6!X!;%m$;3eri*sV74jTM$%vsgY4T2C2 z@PAa*v9To39=dt$>TR#Y*`WdVBGs?&$2!cIzOcN|TTf4B=1|X0^}8ym>?V|Ye0mXW zFXHX+ERVI61)&)!GPZ;2BK=o^f>G&sF%}ijf5Hc!G=se-Hn! zi$9GoZ&cSDQ@oMVMu?DxUdoRwtj%Ge=i(g5tsSUkxq$&n@5EJaT2HRO;gCmf)OBX= zG$;&EXQPC6owh}5hZAJ0i`VQQS_fv<$GZJq-Vh47TyT>@S=b(Zof8w`9c0{>+^axW z03v7prC`l(I$q9`iB31}l>RpIq4hcgb*C^!5j*`Ncyc1)2RJ zbJZ*>$)C%MCY)-vWfi23T-rz1vpK67gwE}A+j==b2C=-e`na5@zR&MEczRyowefMr89|2h$efwir?CFkYc_NDd6 zv*x?vYP;{3Kb?}N^EaY8H>gli`6QsLe~kWo@?(t5+IG83(1(G0kSc%wZx^Yz-sDf6 z9$D%`n>}N6cS0kcn+)tSeXo8Jb-lK7;w$$&mK3nQ-v4yOCzRcHjyk$+dX+;f#bd)y zVEZvkU5>SRSBds$&XgKOJbBe~DA@dQj=b<%0JZs+X2MpEXhA?gVYfSK+lLXdd${vt zz;+4G@moctdS$3zVX?FSvB<~E{@7RA#oIk4v1|f2rA!AVcO+&%K5+k{?y)CFvN zPgA?~lu&Q}(GO-{XzG`A8EMTh*?<~ZO4bxKmJy%*AqW3(G5BMx} z%?xeyU{?Lqmodz80APo+b4L4Et{!t;@?yvOT* zWLYuR3K^RY4@ljH0kSO~-wn?7(!Q*!OCIaj`917dwrG<%oW@Uy{6y z_pir?t#XKPE!QR*jZy!ST%ph?EK=X?ZCgK*Bd-%L?;h+snzGG$1)iJdZ-gRfx!ipf z`9zoB_P1_mRF~YmG8x3Bs4#w*iyE>q`VpV2k`h+&R90s;*9_4uSCKOkmSE?fT*>Wr zU8_d)yZ>-Za(~y+5YO?}Mo zpV3;G?fvF7-6CC9vUDWd%!s0GTFsK;U9oE|Pb0cJY`@^f0KMvHGtrc0V4U=LF1azL z6O}OIU|QqrlAz{3?T4diRtxy+4ftrYFaOHUf5+&1DQSC{I`C9R+jzg6sm~{-6sNf- zF1o+-*hS||XsQQi|9tJ7Fkl%T`Or|{(BD7Wf9Outb}fephbY0qb31F^VYMej4$Q-7 z+qy&(*VvfCFZ1hJ+NRvvaz4+Aj8nTs3kAzvs~RaT0nz!d1|jFo$onfPZQZX@*2hwl zZ1dfG1vi+&teS+l%J*hNMZS^yyAkrv)?+(0WtHp(qSFIL=lmZ#)kcKgAAZ^p>#<&) zGaT#niZEzxzT4WM{e6)_;G1K-_aS9zLd^=XWm@Ko@26M?_=L>F*3I*~NAYka-P zI7uYe<2?5>D?SB9MD7J@I=xJN7-z6Rarr3Y@WdD2kbKj7##~N^En_Li9y#Qz8O_;#^$aFT@Z& zH|)7X#}i9P_ysHjRIf~SS=Xqp5oXCNT^ilBiDllGaft#L}&L{r2z^etdfWJGpt zwhD(0Rq$qIp-3qc)+}zkXrwJwCwFJvcPcgdTeEi#%N?;rwZ3WTfss z4fbnhO!+Dr7-jyQ6IVQHhRvbmMItk!*bbhpb+)WNJd%6P$RW60G*bi#i+$9i}5_UpI^ z{jZ$&f3rfw>~i}Eg_Jg_n&q{vq@+-^zvo`z_@1dqoypiP{>k@x$}TFyb+vi=j;ykA z#p$S{TLBm1p4G{t-sIU|Wn_`x5@JEP6DZaFw`EJ~C6T&h{qIiOV_L0iB0?#@CdD8q zm|rj=)~|~0XP#(CNfp1aN}BZx;n;q2>-g2^d*w#1D@xz2rQ}7vU>3dx@GVCURn)#t zDw}oYQ`it$r#cF~)zL>ph-Ca}Mc?hLqAw6R(?1}LJ7*Acbb=w>{O^NQk&z_^ZRuJe z%F!v%W>v(?m@3ffwfo#s2}g~a++q$PKRIvcO;IN;w&lzXpBT&=3s)ZB4{szz_Zv)k zv49uVKp(N-vc-~ladhy+i ztC?4Aiw`AuYB+^)A@lakOW3y{8w6OD=h+{?D$AQ|)u&o0OLE zoh|A)mW7<}t&ZKU;#s*QQkSgfFH^sco$oPCi9?@~=n=Bqt_`kWCA}3baC1lG+%Bsb z+8Z4*54gKkFJ$0@otmv4XF*5}oDP+hwhEd+k=JeJs`F_VQh#EgbGet6r6{}F7A^tf z2{Y*Jp0V7%jZFtOmf4ao6S0wzVAK74X`Vc7f1#{ti@h!Xj9JK%#e_)PlDvf5%ENQs zrCOn$V7tf^_*+X*A6f8x-}#By^rsA8;E-ogTEG6i!QC;>BOsxnj@FPBWc{ym%F)Be zi1s5gCJr-&IlB6%s0XK-?Lg}Ci^9r+D6>HExH=yJ3-h%mLix;nu?18T@}&lz=GyACxfvSNngr2>d@kf4f^k%_C5%GKE!21x*dLUj*9i^$_&;27;Q8Xazw=JP|?eYOGLT zJ`r;+<{k}{YC&P`%LoOvTPT|5ohsmTNW2k2avy+-)q zxe*bhiD3^t;lH=(FQ4LBb`+#VuL$*(w(N||;C-QwCe%N-Jb2Cnnbe^E!LufKuA41e zQsf4)A#ilt+8PHxgn`B*zXrTgBdH*pXJ{HFsS!MNFzBN=8R=XYcJw$K6o?{Xeh(#~ z5d04y=Qc3UrSk@bpkzLu{2QI)ql_rKgmWH5v0gmHA4?Raar;VICQ!&u)iSbQ2sx6q0A zgeN~LlCUU-^-TfdQ7{2c9rzqr>=;E@Jc~_?fyH3Z7TWRBuvjFTuy_cY7y*mV;^?$H z@fEOb5nzNLyG;icS-l~JH90*jB?Bxzsq2Ua<>IAfcoi>^k?5=Jb#UA#z({zI)=lBK zSt+cP4m=MWH%Am9@)b%X6xhC(f(|h$cCm#U%94=}(!!cxdwu64xaF`WVz8S_IJ#%H z;}qC?3>le`5oVtTG~!+nB448NU%)oh5XxssQ{4WdY8qOda9U|?&EWaR8m zJRSVFOh!6%;59%zfu)U@yV+rex*!Vh&VN#3FM>y_SPjvTh_Zqw$R-LM67UoM4IZtz zSA<0cQk)lT7_6q=~%zgB2 z%Hd1ObKWaNI^8EL|LTS61;QKOC!?~c6auzU^SwFt5us)>bRTZV%NV!awQpL<_8#W2 ze__7YO8e4KnQUe5hYiMi)KOb6^cA|M&ug2}CnBMX7oG%( zpZLG&p-Kq8{FV zKMKw*a{^IW)-5cQwC-3g@d=4sglv-RZfpJ0v)W4XM5(#oZ5=Ac`i5o`%|FG+zn5cI z^amY>_G~v!d@%{nGt2vxtUPWw$RZLsY&6hNELU%NBSPRUD&%Sdh8SvnRg_kGQj4fL zpG(Oz8?G+VC=5YPvq+AxubT)_w&p0als>~(wwdi+>Abor@@8&?~ptHsDf$}(YL@%d27 zTfyC~3CoArBg4}fEwZ$P0KZaP=R&LF{p!lnOxgg3r!U*qyRGDW(7W6@U;98o#*jz4 zJwBvayj11C+qZW|m-xocl{6jt24yr1XogX|7S?F^(3Q|9d^haIdv|tMb~cvT_O^z? zxXq?Kecl<9SOYmse40<&@+EiUgV%RvWUEgq1ju79>D7KHP!3UTvd^PBC!J0RQ!^+^~ zNX2`pxj#KkbhXh-LKO@ZhhNpPoBH^EF<#S{4z-V<;!u~!-A4D|rG+q3jeQQ3H>}9k zYut4rR#w-r8bQTKMQ2wDr=NIfa@9R4Z0B1ZSuShCI!EuhoAU~KhalU=@tD&-8d+g( zUiZ+bn-}fJ2bn1|Rz>1ZA3*Pe(f&lrqK!<;oo0-Rto2M7Ek|p@>55hkaFDg?&r_PG zgBX<=Fx&}0D>=@yK|lB+QZ9#+H5l&_9%t;hB=>uU%3f-TG0%4nz`|_vnnF zu%7r_r0a*0FniWXf<7^R-9<1`@(83hSxe{_-df$-$zPqL1U1>2-k25hT*LST1Rb?W z<8U3x@|x}QbxM&H&Yn0*|J2${Brxdb_>_riXN{ru9$WaZkq~c2iX>gKtaHJ261yyJ zB;|^R1@`0TD-8+vNp=u_G`e-(D12D@z1&LQRDvP5!enbjyY#$QbzNY@HKp;nB)X$D zmK#2OK2*>|G(ps-R?9r`wf$zsap?+>K6etU2l2KIaZ?p5OEYTg62?hwZEa&>H-@N) znh1qUlykP(@!2CXZ+dtV4dpT;ncmXOC6)WhzYL{EJxq*5`41I`)vzq#|8mOU48NwN zq~tk$XTM-#OZ`a=p+FuJgmR^L^@!4g6`V^+p$h|sd5nrmq;ihor{vnYI^{usfB$d7 zP<&j)re?My@d`;{`fXmu;xka5HR`xDKSK}EcV z&^r%mQ$$hD)kWifTz8A!$v zyK(!re-z_2TCP89?mA*zjFbb1p4J|V()#xHY}s!y$81amvaNi1v6AKqRk;R2k3X1B zofLP8j+%H(MIVEA7X=uhWeZ*VDX=w)SQ|FHMor$l`3p6cIukc&Z101NqOu7=XW1NQ zqanqG4mpv0_3jo_ZkYHn97>3#y@zk?e%)%=lN!d}-d)_i)lj`=kc=tWxLx)Il8YwX zR^;v!$7+QAV3U;p*tz9)pY~p$6%jL?8u7i^t--<=vjXyNbAxA0KX)?K^nE)KyZc!pn`vQxe&~D57f*ad$8LCk zP{$x@t1Y^x^%5&b#Gvag>(~95#acS?bAKM#$?b1o`^_aIg>MnNfHuSo z(zt(uHNxN(Crw7q0r7QM43a+PU=3kd3~H@B9pYEQI`5MIE}~5VmzIAQzTXb zx_u4N$s4Q@4=WaukuAE!*Ec}>*Ug1z!?t|o>jE|+m*ug8*=+=_Km4!ERiIps{Gal{ z%`|d!Zoq}av%)sy zD6EsrVq36wG#UBv7SZ7+&;X_TCoS%x3=q;;-Uy3mB`Y=9v?ihf5!|c@95~pDfC^*maPS}_=u|s9j&6#djDS1E(1?-J zL_=ZixJ8uc0MeIaTSd2?91o zK@C64Oo8VcLc;YTcO5+Zpk8cHW~RXNyr6&w}`#45nWJm9}o70XZ#$f_m1cgm*7Zj8!`MZl)}@%FsN!FlwFu9@?bU;87Qo3 z8IfghLov{hKe!s6aiR)Bxq_MU8nz(_OzRP;VP!|4oE=#LtN{e`R!}mnA?Vj49gfcZ zchSB$0+3tx8e~8Vt_ZS*jDXp7XTxf`UghN;OI*13iidKpz<$_ zInWjXw(Zjow1JTt(!TaBFlNH<%l5^=k%acAXAi`n@}GO3yDz>D;GEY+`$leo%0Ew_ zZeM%^KrpFM2ijmLv6l|w{tm<+I}WtLzzYi=#2p+>XfHo?AO>K=oX>&FJHTad*@2iA zM_1!jyKnn6aQTYwz%~nx&JUNhFYX57q`}~iU|3=B2{9q~03ORsGVfVl@Sv_q zGq*ixD1oChRlh@o7aa*eSVKEXJz&vY0ig1M!*&`R-4o}MeakHXMjmNB^BK%iDi4Kq zJ!Sns8Q^_;r}Xx1tP(Nz91pq<<8RON_BB9_PCXN|ACgTRng*r0@4FpEsHzWYn_^O0 ztU3hBdp0?mRXPwVd|CjUbD!_r2eHcms3*URYKpl?>lY7PwscAo0W zix7lXMo>Sy&ajs+Vg(q#z%n9c)HZayfd(KKUdjLE($s&D-N~L(dojlp1~31Wn+R&bE8w^v1-RVD9TCK@3sgYsdl8p_3djkhc%y&{SSPE#`&} zJK7${zdyi?g7D`9G#(H5jVI9uI5t2SsSOSQagbt%44eLo{nJxeD?N%1^l1RQU=Vcx zj@tn^TJPAuJiuG9U-KNm?5Sp;L3GYR1AsIBR`2<@!NrKQ3yj4FfITOC9DF=bssvWQ zHXK9&5=-SRvje0&^$g$y*6ttl0X|DU_x{TX02tZobjJbtF$Oq+oc9MV0Gyb1^~ga- zZwbMt*}p7@7@!^0#tvLCt0I(TKiC}54p)E+9i0Og@KN|^`d|q70lqtZhjieA8*l=7 z)Bm~v%*K8CgI+Akz=v_xK`%T3C!mWtZ~9I4_v?}dGzGJoIo-< z#O~oiFX|hBi?rMW7s+6t4sjmzVv!Xf{WJge!kUQTH<~(dfd>2; z<7@!qRn&*mlrRGr28S614s5kDtN}9G&o<-AFlG)I1?Fq)Dli%iGYZV4b!NDs5ry@6 zY8XhcfEWPbex!96j#s=yVKvJNbK2MDju_JClcZ zfZcxsiDMGV{}m9Q1HgE7@zM;e4HCx(8DX?A8z6Ntf|~_v0>@ns(hWnK@k4NjJ%|`p zZFUwIE{Ai{`=oMbxWjiSthyceJFsb?MvUqWc7tm`?D&P?SFZ^TFqztS~htpUnMoPlr0|K!!E9|BcPyoqP zrzloK362QVPg=H^C_OM3ccKY@RoSDTfso$EqO)X4aUWsJAW!oH+yT7#z?8yCZ1X-P z0xGtec05`G7K1|W2)0=e)&}+6TqnL7UcPYICxvZ>CmF5=g*)-gu=m&6h)yA_h7K$S z?>M#t&kSn=(y;R!R-+IWgUU1SC!Sdg7T*MwD^}w)EDi#;+wr&IKEA#I>ReE~!(ts^ z8;I3lF@V73uo~U47`)u>o%kzo+^-c8dPuAWOoi0XfO@+Ff8_=cSF0oRc(5ALu=oWT zx$+Z#1@2Dub%Y)pRs&{8>IL_a@rQ8SXl;ZZJyyd27B>KfWH|K@0Izqpmy9cEcbNf0 zvP*Tqkc8V}DkWZW%PP%%SzBA1(+mvQ*xm*gr6FxMSBcC{{6m0bKoDRNnKjv!;Z+zB zNqDW!-T;pRnKwe7cRSJKj-S+-p`d48@%< zVhmy{obN0@ltVHqRBcDw4;XI5=xNIskL_YG!{3OZ-wLP#^*Bs*o})-!S3G*BUy%0Z z%bRDb8Vrh;Vv4qW*MGQ%T1@Z`3_}ce zJzyVs7>C78Sj(OoYzwB-9Ws@Jtc)hl7LZXo-vO>UWar+I`s@9Das?Sk`>huki*;~_D@97w= zR}qy9YXu97%y(2MHr6TTebEyE_k9G(pW{)RSeK8pK3os{pL;@{v*nvr&uCq3H6M!B`Ye$wRI~Yx!Te8oJu5!cKuAGg zqqsXkySO}lF?fJ>q=Jhw%bo4W;%kSsb*koAUTG+iOX)8_3n;Rak=lZ#VFX?dY6;hv z@re&nPR2IKDD~l~JN8+T3kma~mVBO;G2c3U&pQ6Y9Po9qU+rauGNpeb-%6dKOhWC{ zlZu5hI{1~3xKtfxu(#04!U%3e`QUA;E2wU2_(){DAa|^8v=Cyl!jzJSGi}|E=O?uf z;Wiu>t_U_R#Z*YYy_^+hUi@}5*nu4a2QE6h8I`PdH?Zi934e`YY<~U&qlb(pF&tua zmMkT0jB*tXPIl>$jCr;A3BQPxq4ynf9x|TWNkQXGe(sG;*t*U4?+zgX0p*X-b!BI% zvUuG6h5YHCChL$`z3Z-pTfWu}F&Bonn(Ff7m4>;FCfDF6Y?fVvXjX5Im3o&v_hAS- zg4pG3Z0HsJC_I5by`2HGw&*l6O8()8M~=yZKDZ~++v3^1(Oc3ZopPmZ2nyd1pZg(2Ub(J1+lV0>p+YLRngn6YBxlT&wzbbga(X#*F6%A@-VIQ-*Y zUa}h(Zkq8alK)wrc~0KZ6-Hz(nOB5JlZ>;%XzAM8pwVywB>s!fMlcj#ZV z{c!=GS@i8_)@*nNDGgr@lyAhi+NOqCGiax`oONwmX#T?)G2$&aGBmaIBjv4qR*VvY zO=kFWr`Da<-F9+kB)ZU!&C?eLiY$L|CA>+)1YRT~4R812c`{lrcXv`e2yNy=n;NXK zpW3pGUB|e29JU1}SSV7_F{ok5BvagNBlZQlbE0Kv6SXRig0)Xhb#-;dnDuemifRj% zU5!sNZL1U3Yi>s$wl1ms`H|@CZtjld@UXXb$j)OaVKM+rDXAVQ*=ss9y4B*bexJ$P zGlh$HX_qtkSIT=uKpam7m`B;>j%%Bm-RRXH&C?=P7ddbDS|>>E%+K=TbM1{Q7pDS= z>o3i*Jw=3ZuBhp@!xYx~tGb*qo)nF8l1|i0Zy#Rb$16gK!HD>eln*i?O8%zNp6=dv zZPOPMtpJ@SIe#z*%Pww%MPO=f;-Q- zuT~i=n`Sm^M;2OUcEDYYQmsb76EjK{rTCO{i3uZDm|<+{w5mL@sN}0GfH+dh5`Q!q zN4DvoHmGSk?aJWshnkL)I`Ub-cfrW*uj9yw*qQ286B6t6gbcaZ!~aX`874358uzvg zt@1_IbF6Zc04BR59+KGOd{(P($37iZi`Sw)C#mPw%}scfi~A516kkbU%fE~B5rhKy zZ$xMtGyHQcS&iik%Z{V4g6Y_pOLuUBc;<5~magS;NMbw1z%wghnF^-O-0%&AF$F6X z^w!bXqJ?~|M0sx*OLr7Jd{SdN5y(k1)5tOVss z>Kcilwl@787DOTEAXIA$f+p~O!K?IRWy!wBIHFy*3!DCHF~qN>hy`}X%|CHT}m%;T^{(< zUFJ~Q-)X?Sw?fe@>r^pd&psWHtC00SpuB!`nb;;)a(CM7joPl1#@l9Aw)_F#7-hb2 zWjCH#D|olosLv!)XXZp*u(<(Jh~*7Hs_r_i{}>op0k?!2C|NXvk~7w)U;;oDku8aB zZ8(<8l)pUtwv4oHBrZsfG24}Dtrq`mG@d#$o$6VbzQYJ^6mjf6M>*p9cT6Qejs=w| zHc;PIL!`LPRgO$ZEjl<@;Fie^H%zUZo71fr_dY(utzQZ@k#}nUa8_?m9;;C^|1DcV z8v;~WHnR0o30>EpGLPdP<&Qi~?p4>Qk!4~VYc7mTbRNeQEByTtGnm5V^aDoOg}^fL zIQ4fcBFSlvWp?_|s~NOU(=WfJTqDy8C6U3Tre|El-m&|mWbKFLU;X9FNO8W2w>f4H z2fl5@bm$S?={egDYrqz(dOc?{32p41Ic-|5$ zDnL?*aGs&uTj9~pRtX>B8!w+<Pt8?jy6yTMsto18IF?PDp&F*=4IzMWfahVO%Kay{j9`D>#>o? z-F3m(Z*L5+ax&Zzjrrh)#|5|QLmRs@!X6Qvj(vZf4ojN_tIA(zU#=$0N#{Fvw{V+y z>d@S~7=cq72RrUp|1| zf@DWEH@I4cXJzqw|2HGgWS$X!b;MlJ>G>3A{|&=OF5~}@%K5unn%o2Imo+VFrYGIH zSLQPSuN=s>CO0X)Gsq}yQIR=>@k9x^s_V0;uUx`2^|}p4R}>ES;H&v}pO4rJwiS|m z!MAaN$>7lLHs*FR4fS>76Ixh&XpPOT?ucw~tmeQM-MQ{xXh(|ceIxGZdpCP+>f4!m z2q4AiyhbH6zw2$UA2i1gM9eL2ZODk%jJsgcs{r*dyh4`l?i4Y_cC9;y;nsV^{F3A7 zL9NwHYjla6XO$4n-e%^ook^T9dEm3G-`MNenuvZ`HXLqme3uv8ngSf+m}!!}J!&^! zYm0Hj&??l6=KTw50kh5y4bxnvhge}}kIFyY6NJN^f11T+Jj%TTKbG^QLw4kDA=d?fAWvnyM-fW)#q|rcG;0QX@K0{3h^{89$Vhn-X++->HUe%+w%3TR0-R*pz0%s zv9thwG7Pxq2baCBG#K_&!vqsQs6$T$H>(3eD3C%%hUpL=0;&{3gJKpDtU&;ssMiB` z)^v#vVNMGTD&kAQ8rHD5h>WDyBR*7y#af6?RItWTxXbi_M%5!;fjPe0zzB7)Mmj8( zBO{Y;5wEDgVo*Ob25Tt8Vl1Fdb%G`7AFDPRF`;rpZ!!vbn*pj1jFJM zK$`+FOuF3$F2#d2cwq4ku&qZ#YXR|z@h08RmSFV96><&K5}d8mwNxXk|`3O1cg#H@1# zHw(bZ;@~c1dgQZxWq@Ze9*>3MeCjgKsBT^QobIkyOC=)zL55@*u z09I#qg=ZQ9kj@OVYbX48VS$!A0-(-YTy>gZIBoU-`8~f!%Tae~8;aSm4S~gZWS--S z^uv^%6UT26u|Po^e+V~VWQ`nocn6FB`W<0rLee+V-q@&~_hbrGld3rT*(JtZ*{lz5 zLO#MYKC4f17;nZL3bdiHy6O@&+wrEAqPb_>cKz#Cs{M~gT>Mo2B=5B|hZ&mMIhRYD z5%ON%@lUce1-B%urv=EolZ+5mI6~P!oZA3iQlC^14Z_T>!No3wM7J%PRWZmD(9SHu zCZ|CM730=kIE#)b=YPq=R?EMW5VrR#d{@I4QZQI0-B3T+qmZN>?SUO5#xH1|DEb`K z{nC7iNOf&k#%95JV(ijke{T=b!#DiB3Pu}*VLzQmDl@DxZtI@Rq@QZmnm-Kzy=Y=)s@P`Nm zSuKAn(bAabqZgTbrv9%%l>bS5f@sEzgtPGSO*dF$GYM8c6!;Zjt?wdf1xN76>)w623Fh3pzVbd;likr_^jvFx ztDgMcTSUb=iNEw$zX}6%c-zYxT0B2)u;t5G5vUfn=f1R>^$plawJTI#C$f0iV7fB@D=Hkf zQ0rNi+_Gz;An%a$jB`1?<=A-wo7esAme3w?6SSvO@sjbqLb+b;2+`)1$4d!vt_(q$ zp(=>sb3v@~zB*WWgu;?DMdF0n{IHw-R^HK6>h7BCCoH-|gWG-h#q#>2S41r6As_G= zVw+oxyBLdXX)e_dh%Y-bH%^#Z}wsj$u)bcnYA z8NSIe3hohn$cj7)zg;IOtafXZeOCH4_$+~{KzN@WM-F={i4S`yJiz)!w@n3bm2{Qj zvV-VX!mxTg)cFrsp?0bT`81vd0iY0Cn2x8H-COu@k#4qswBUidz8D>3^ENs+jYm|v z$%uV~5OM_SyAY9v9Af#5s%_k69<*Qu6qxY<4!K)Vs-IyM6h;R*0Rlvh6DaU%x|J}( zr;l=Ur4SrU{bNAId?*4j9+vu32C=gNu>8u$g#DP&^Mp=nNMysmE^lP+Cc8OH?eg4q z_R<>BRJ&qr&?JJ30kce_WiY>4Rcq2ec4aK{8pGsf(sRrWFiB%KwC?gl#l$Mz-&d}HkXdnYa5a)HmY1@#(V zwQri;U)_KdOThJmcNL%yh7OJmv+N#DA({(cRz|04JXhOo$gd4o4LTDzAv-`kanYX! zqc*?zKB>XmC*L8S%r0czZ+`4To4bmQXAI3Ky~$hJ%8qYuDvObx1IR;1T5>JdV>*!`Mgr>ycc^?8fr;xMSzDKnEqC)m6+t?s;4t72BIqEBV2qh3V zjWnf*2uKShArUDdq4zTu>~o%T{^z;(cfOr_zvUTOYu2^qnsbczeFtZ2PvQ5vxFe&H zjxK}bYCNSz#aaXPU2wJF#A?zqItL?n)lzpPQ_N#GV*wpc2(`7$PSu=|PM#x)e-OS_1`vx9ls9uCEEFAV`t>~#QrDpv=;APBe{u3mS z=VqPUT+A=u)vFev^!zeyvUq4e*2=t8BvV@mo3YGUB^8KMN4=2jV`2ft;%04gIHS;S zjwP5W;u}Cq#h_Pu{l}a?I$HF6nAwUV0zP;moSd7H&yw{n0NhAUu6Q8`c)K%%Vm zEWvrKa3ZAD*}L{uEzr#*Au>$1UBkYp0^tlF0WIudOiYw^pivis&v|f^(ybPMN)yoY zwi($}PSkbAZw0Nk9U3J2soaOK}VrI>3|7x6SRk*ysXEYGE;ehh=U}yKC`rJsXg?HNs-twc7wI_Q~bpS-z_kj2F z^ah7*CRZ9h2u&;XzMk{{*i(3arhQ~|DqPL8VmpS(z>T^zRDVg6boEHnTUB4~E3a+N zS{(S2@Hps@AazQX3&nPKSE`DfakML%XsXcKv5$B0+77^BaL#)3Wf(6_DYRzjR``9G z8n-C^b*NQ)7H!3^%vN%BBp(Qx`4|{!w6a?O)fioin(YeUv0Ck3$R#HQ=It(FZMDFu z6SNK_&Xv#CE!~`WNbtzM)GGLKHp7tT)nt>0%iytOA=sikN#_`MpR-*3y1ipg-aE}{!(DD6G&GeH4g3o*U z4siBwuORS78HPm2rkroJmza%KH~c*PQPsdC(b$E+8TA~;UG!~$Fq=@}8_;r#Y-Bv^j&Y;8E6!Ouj!L4p zD_#fWn7?qEM4ujUFqJPf%nlh5`h8YIkrU+(qt-dv8>>WrloLH^Akx-ZJtXGsdxEO) zqx!b9G=zVxjl^iRv2j+ZB}Y|!(52wNV#6ani(ZbGinUC8 z1WUzMB-PV@4aVmCc)Xavm@gf--q>MxtoG;4r40$Kqt7+OlI)%}UzizxyelxkBh1cV zJ27Z^gP>}1w|~RRagL~HU@+3$T2)Kmx@6&;L#eMogHl9|W9p2#QsX#RFgsvY_=V0$ z(VzbKQ-tDQx~H-#?`%Wuq{9>LLZ21*(@eoWBMg35K;b@XR%zX1qOsBe#r=GumK)M7 z0((*Yz*pPE3KT;H$4>MjkeD^`qBU&R)wQ2T>kzB(np6bu8dL`mV_<>V1ks8$cvRK@ z#mmF2=U7vyU^Ku$hAS)km~^RVgAJ%)T|jKyIk_W9Bl(>dEK*CW`Zf0%#B6bZh+Ko6 z%JqVK1Jj>q)@gUKX>j#1nnIKbZJ@8QUQ`vf6NlKs2gfGnK5#*J@Tv5F;b23Lhr}#{ zzHJ1c(m~LFYW`KW@+`ug12qA$dEDL=Te#^$O|9BLd#@h;HPQEA`k%f=lx4G>Mu;W= zmEeHOB@vAbU?xN_VHnp&_x}$A{hwz2D)Ch z{`n^bo@X4W7yl%)PH(R=SH82{i$cB7eDp8ByDlEJ|hE{Uqu`tJGV*Gu@lZ~&R6t8>bdUk<@HX;=caf35>yW(jaw3!Yh8 zp6xT>=P2rEPS#9Da{_2vKMNYUuG#f}^i22X2d*s5PyNOIZF>@hN}8q)b4++g_{;>Y zF3&FMPOc^#CEramF3k{ZE4%f>(!y(Dky%MFg|{lL&IYZR-zs@OJg9Fxt^dH`PIM*JiNpVCBeKJ5N>z;z1_>s~EHP9pwcG$*&N_7ZT`yo4vAJOw0j=XoE; zU7zom$lqYi#SCQf2>yB>AxtUV*oF4-!$X;2Yam^lr;I37_THKC%BtodRvopF3lPek+sy z@khR?F1FMte|X3R57v3Kq5kKScRov}Vg?p2I9=(~t(lm65N=pZOY-_8iwfqXDe#}x zIN^EAkKvdgLA~IleD#VuZwzy&Y32wDCD5my`Zm`%j?e06p{a40snM2av!j+tK`q#T zjm*#F1S{Jz8mu6efRr%pDqSMUTZS?GhKke7#s9qdMam{hoRhQd@$~U^Y+k{YumI&~ z0#DUnlX9!6GE=80PFVK+8>5lMpA~+ZJEvu}SkO6JHWM&8Q6rjUPSgu+c9CUrTw3kp zzNL!N2ywinx-XN{BJJ2-SHr%E>uG|XH{L=--dk$WT(Z`6S<=yD#{E}JaI;z&DRWQJ z;~C0>pG%uJqO^t(fq-F!zm65K#7Z43ds&mK0UCbkH?#F^4p?rep@igD^Gdn2l_ni= z#db~iTnj5HE+Vo19=}au6aAKc3d=bcHmdNp5L9fN9Fh&qMJ=xuCzyFLjrJ51vsNF& z%hg&5%0S(9)UMv>+7Z7R`TYd6&Y2rc@qrhzc0H{{Rc%!%F5&PgJ7dw%x_FiHn%YWK zvs6Mx-mPtXuHEfwl0{(sZu)|TC$8-3Mh?`t6w&JJ=1E%%^o=q16cp+#fcr{40|N4? zMqH?aTIdeB#lqW_t6$a3?CS>oyTpF4Jce~-V;xnXy>RcNN_iH;ApFU;?F>!bCdHat zg~Z#c)0dEa^xgI_edc-Hk1^9mY^KcTU#;6JqHG?%G@*-B=vkXiSg$@Hh0f1KIL&Au?MRfYSlXvwl|U#t7=o zfl|Rt{;wOIH{mpLRLG=K9{E%V!7;9VC2!``&-9WD$rK_TD{BwAPZsejpT5TZ>?ngg z+|j(}(dB*ai+ZbKxeMbR8&R$@etKgw@rvK`Muuy31$&QatV}YRt0mhNH#d80G`?Fe zOCh~jrE@U4aSXRSI~6pGPHtf2Qb>ei;oL<@U17~`Lq*(@y{*SBtTY#DuavZLYjERh z?*tXq4aA?aXd?gU^p06mxxm{x(N53L(b^+=G$x0!-;Q2*m>?PJ>+Kt|9o^*J+-ani zsI&A6To~ZLzMsou%m`Ptx%h^up^`tXk5A4e)~YEnssPpYuLQflZrjHD42%i-HW9BrTt9)+hYXlD}__$o-$+l~U6$0aR!d3Pp^$)g6 z@9sEwk(T40c~y{5=(Oduh-JN4G!Eu4>q)KWemX_FUh#h1=+6!L_j~-KyBUYD+p;H2 zJ95?GNCyzJ{Bkn^9%o9fUu{?Jxrb$AWBxg=XHJDJyvuLe?>=HtDeD?vx1VynthB@5 zgOOT}^lk<`BnHHgXRza7Gh2k7f3S~njcy7miTg@OS4{R*v}mu-yK>q0wvK{Rd0e*f z_!p61{vn6>`Q^i1wu_=CF1=*gY~`2t+ZWPlnQToFYWC6$TA9iQ@(G4b@<0P9sln(g z8sv9^h2WutNAZ8!rty-AX*{S8_(1E!eOl|zr?*dq;c&d~zY6dCzi*Ml2Ed7&jOLPs zZ4+!x4t`o+|7`YNU(ou}w*~ezpa1|>|DSHi$wv{lC~D$jay!MVoodeZbLx){uF!O;s#1Q0O(YCJ*h`Xz;miUy1F31V9fnj_d;RQ_a!ox9w`kR5)$~E@8)v=i)FG`pD5+pG#Oibo>CSf zfp1d54~I)yVF;RCgo&?JJ21l(szbuUgx8KF5!7)8RUEVT1$(h4Y|?8xZ$v#`vYN&X zkIUi2#OF6X_JvCtYXyb?Y>E(Ou<2$jNX)3g;N*a)U<_5>cfH`L;NqR%xD5_ zP;}7e_q&a~vVr$mA{;1xkiaQ88Ft}WgBqdVd5@Xt+2^Kz`^J~1r&dW~V!gbH8PDT& zvc&e}et(>B+!oh?RYLu2PALwih^Bt4#xB_1dGBg`*SNCk>Q|pj^{3@3b;2(uTtaby zq%7syt!*#!(DtmQALb(~qmnb`eNtZIt)CO}nj%rC{otgtrz~xtlFvErat?OD&4xA( z{1Lh`n}%`m+vxo}@5O*v!cNp5z|OB!Tg6~UM@3Vg9u~M1^3N{iZrq~ej;vRSAHExU z6jkU*Wn(X2&2F^l`-%Dvo}Q_zUt>632C=#O=v`Oqo7FG5PjBt+zBZ{;UST9apXk>` zWAArvQqPn}Wz^fe2qL7JN%xT$6YZrT5h5-cv*U)@9X55sKW1emKjhn_-WSzsfDyCr z*EM!FQtDY^={B5FE?q!aB}2SVw2D$E zwej+*YcEz;d4sX+QY)#kw6LezbQJ$1v|N3MiwB>b&+nzQk$k@FF1nteW=YN&e(dyL zA9r`WE~yyec(DVHwGJ}Gmpdx5x1wbKA6{bAGRZ3KD>jPb1%S^{Iorvo?{fe7C3dxu zzG_|KG(uT+==r(}6)dOX&%0#(hw|fhqWo47Y&{m@fz$Qn2| zC(&FVHz@vWh-*W+f-}GyAw^KAh@ZG?Zwz*^89kfOFDNH(EJw~Rm4>Zo~%JM(W-}jd}Pc|`9lO3#q3(vfS7(kq1o~u z4z1mASS$s1Vc5W|W~oAzIVE$aW?^;9s9_dvc5sl-_`_1V^CY26K_E?*6ZJM6B&-Vj zM2I!o-h0!_a+Ojl;;&*kkeKiX>)slv(17+q&(6F|q1A0|)l-}qwt+;{3k zSVvUPTTShJ_aT$T z5sBX8WuMhJQTO%{Lsq{ayo2oC!V?%qVp&mnaqrGo3K8E*Z+F5)Upw}oEWsJDD$rN+ z8Y{$JNYzlN)y@`m;FWjwXiBISA$5yg6ZBawsB(amWBww< zo$SxP&;O+9a-gaIzQ8vi0AYslP7E3$9{hnIqn*+^x8s6}A8+sbwDk%Bs`T3szh1?q zb7-CLKR4H_&}R$#8+xo)u?r}gF$?QeAm0DLydUdT7$K1i(C1pK0zybUvIT4OkqZ!g zXml!EuVPP~jlJ2(dKG|9&>=6_uT_D#(@fh8*Q?-%D&DMgt@Xke;^Z?^U29cTNkPCT zmAPKUSKx#`#{Sz25)gLE{_VwMh!PF_+l!qLZQ@}5+Y8{w<`e$y#YPbEieE2i9AYb$ zUA_p)ANIPuF+XaYkkMa+E21$Jgo#lE^p-#N)T1zvRboJ-`ByOOI*g;_zk*rvQ1-uq zS;v4jyAhv=Ap4^taA6DwD#A!Wj-I|GOvPF?AeJWWKs86qLJ(beh-pGdC9W`GcJ@>? zga+~$%)yunL{4fi4DydWRxffjY%q7GM1Dii`iVeR6~&uBhAR+uOQ=1Sja%dPl*KemNjJ3P-jyVipfI6r>+ul zVM}>#jYxU$*j-PJ97%&4uK;o83C1}TxdNE&n64sOqzrh1{OqZM$h02-diM94G2{^i zwzqjoq#1G__&@=Kv53?iRU4%*GK(-L$`Wy5e|HsWLJ9rg1Ap_qrX0B~dp~jDIKBr9 zLp=dcT~kYq3vwU4@xXMMQ->BIXbBL^>RV$@z{7E{YwkH>25g0LYGmIUa{?e2TwTIQ z89=hp&Qm~Mf$!>myuc7&P+Tk{>TF}w@UNee}Kshk&-$ZUkI4Bmt3+KV> zA;M(e#ewMp;#r9KIlNvVo|TK-Xh2ioy@NRr1EcXf49AUc*biNK!4H^|)*5r9+p2p3 zih|dZL(E=)Q29|qLlm>(4*+ll-ynvRQA$&Ac@tTT%reB=>0S`IL#mMl{Cxx_b**+$ zzlI2Vo3qH+;G=8HLNTd0q)~tQHOSc8Bq7smDnT4h#?X;zezBQNYI3k~M1~CB^5Hm4 zDl*M4+}R{&FeA@0L=q7-3VpFi>>Bj4Um%JsJ+dq#{aeuwfw49c)aI9><<{H~sHe0LQ&VhXhP2KU~u9#wLY1 z*!UxjP1;X%h{2?;P5TKp=>V9KK}nJ%IuJ3bF38o}*`yjk!mSZzq=^pqF?3|wwXE4B zTQDQj{uJc9fLer1yOsi|dVv|4c8FWQdye6T;kf`}WsY6Kh=v=)xJjTyKtjf~@Nw;> zvm)ep815n>F7;HG@C-t0N~p7W|2k3vDWV53VnTRc1*Bx}WTx?h3lZ$THogPc6~RF3+5p5W?8ONBx$9tnqFnS5p#kr zS*Twa-h`8EQmsApVY^w*dr7g%dfWhG8n&21TmVoQHSLXXFO1PKwzb zeC&eE!Q=UH1^%}Mk+b8=rP#)S4TLAvN8Sbxvwv1Q`{W&@J&Yi9S8Sb7rwPtqxT89M zj=!MMc2!Ymzo`-3Em1?4fNkM z5#F3_1t+!_$NlvF*_O|8U7=JGZh0v;deuKKGkJIez^}4{gBXWfeZ9yE0Snxrv@Q(V zLewlkEi)}H%RB}XHo?(s=s~*eI{9{(KA4pt?f*>Hwf)ZG_+@0Oclzl`C>63YjUTAu zsD+0KJWs+k1r{Dz#0l2VSRgdOd(y;1FXuYz?ZPoeMlpjIO5KM0O{ae_9=Ck%&Lqmi(;W% z1gv`ynw#tI)g&}Z-6?fE;+zDva~Z_Vzljq1+#;pv(;=}c(dmsp^l zY|zl!tZ(J+91_P=$^Tx>OKy(rydQ0FKbZ@a=9~OQHr|P9ogTostI+3fw`JchDTUX) zB4!O9Y2BRSd{Qf)O2fsH@H&g*Zj~$S*h;yy3IcBChxLV_uRp5;Qcca8H9OpyB(tR@ zE*W9P@H%Vh42%9lSiDRuos&}EV#9X&7#SWUUspC?hYO*%X0+OuY#m>Up1v(OS?n(U zn$}--?BND%DC>Ch!mNe*9`&#?WyJuEm3iU}+G@$3SuE=u7s_PzMTd);Bot5>9ogmm zy!)o+rb@d5%fAnV5v5I6Pa^J@`ivF~dTi*zTy(BLB6yQ?ITa7(=57(amKoG%lv1sY z)>UgCy+%tgDNS;pjMmbRZE3%e-uImnJyKNIk<|6KVH|eKo%3zfIge<=R(wNhxtRY< zLA|AL{_x^ii-cX}Nw&{BXZg~%g50 zt)3n|QLYn42gi9W!O@A){qwXoILV{F7Z?@C?xV8`#t?BVS^Q7St< zCYGYPM&(uB^SjFQsUP6N_oy!x+Qm#IVcHS;Na;AO36$=V3|O&h`lW-H8pIwqTQVIXWYI!DvdeTXNe}h& z6dDkaItF_V(}q#(4<4Jdw1&bu6qb54j+|w;i*Q!wvF>0DD<6O4nAqf}CyD1>c6b}b zkIy>Yus^%hc+_*e>Ul^9@$Ul*^r!Xs%K41-;`aEzYlAN z&i%5W$qDDdC+R9DM973ofzQ0erzz=qPUaoVo8dw$bL7HQjx3KU(v@sH)hg)u!MpPZ zRHwgGd&tKNE;m`6k~0$1kqgirbQH6!{_|K^&NNa5f4|hC@`x%kE^->NG z2e@zrhVt>aB{R}Mzj^G$T%G^m1hu1)k?fRIW?~eId*^z3|3=B&`#D`|HuKS+ZpL%x z`b3XO46<$b#JKY6>f1zN<0(Ii4{xkxI`w~WD6U+cKYTqa9ZpNb$4up2s4Ytq!*2>O z6kN6c(4qrALWJDnt9T;L%wlW5aXqn=h#LH{pE1g7Zy=I47)l!c%UZ1!|dFg^4?<_ zR6D{Sx&dogtxgt8yJ?xvJq)@!AjTNosves7Pi$m`A%L`iOIePw zxGcD6Ed4Dms;kJWOF3+^nQG+zOojr2u3;&*65$yk5Cb{h5LTc6fCoNK9ZB6JL?5_}8vJgd#E7e|juG6%{idk4-%Z(E&N z^5lA7eX=?S_xJ$)+RdVi9jT=Bqa|`eyWB10f57D(@4Ikbd1K+kq0)is z{oUHoBBeMpLp<5baU^*5nlsE5+Wb3<_}V_jE$pSEo<< zE9}nLA6b=6*{HNiI+To7feijWl583gbEnO2GnI7Bg zISeJ#^!9+j!;3&>BX#&*?NVg#RL%YX+;vtfYpS^V-#`lm^IY|S#}MifhVP)qwJ}vS zGEayN{X^hUm!shf?^~tS%>$zaC2tF)CX`$Oc_^|4*>D#vvIh{jg)PtGh!aQ_3G5j zIgkBol&BEDQ#0PyD<^TDA0yEB!_6u~ONA!Mqv+4o4Vs)qHgxJzz8gtXD!OLdP)-}_ zLRu_ZUB6=ZANCFd_1Q~`cExf{zOH7dp7&LS-W|Ey`nXk?#q9~_&RiLMmmbkO73qY9 z3@yEsGZM|2tMqOS)P>4Iadpb2m3!k1gFa^AlE^z|;cp@3go%FcGZktqZAuA&h8rXE zNSi#8Xx6;&y(YY8!MAlmHKJe#3V?#TM8+WhkMZ-k)s{}KN|pe#QG!}3R~yBQy5IzG zSElR8@#Jaiw$b~gDq&=i^PvpEig=%^GAWtQrE2UPsOKj!1-Axo1um;5)|?qe^^CQ- zIM4j?WUBs15a4(76AgNqeb;D{np#k(XkBrgtcUR3@Q#3ri#~*N#er5s4jKdltB2NZ z%2-!VtZf(}c5#b2_vq+~|GdoVPJ9_-f`5@Q8STemBb|+H`Ppn-VEHW0vp%I5bt*eB zNZ`q*$wkdfX-mB)Zb`=1-#I^@*cAJhe6!Iz0eh+p&Fz;ZnqOt`G8(j-kw`0FoDXwg zW4dCKBED(&Jen=lj=w3tvtJGAg-jl6R|mFNIh?a}FWuY?b$E)xM5l>eQ!le-53ba- zUgXRyO**sRhv3dz{AM`pi~;`!ICd$)b}A;ZNFxcgx4+!4%&y^cqt^3Pd;jY$d=Z;d zdH5N{*lwMOxQtrYXWDYM*$?mm?*ZB$Tv3iCa=Ea71bQlWQan7$d&{W!bZt4ZU}|6& zN!a+&h&LHrdSmQf8@@U8u@{l=9&qyXA7HBc>)~pn1{(9+<(J>Z! zN5xNpA2oIJk(8M9>!ml9suFPr9%zU8{HVDk=}Jy3-%Ka!5t&NfbHgDD4Vrwyt5nyw zC~$xXoRc7T+uQ88t`aP&ak><{T)ikEedKe;Lk?AHX>DRZp>6`*MdE?oZPKcXa2V`v z2Lvr@$Cj=*H|*dt@X-?P-%nFu9K7z!g>TqJ<)A$T3BpGs2TIvf?4-6PjA#0k-dHy7&k0+-vGWnQLvodJ=IA3ZnHvxx;0>QVA? z>6ztRRp&wIPSVaqXmTXGd8#t4Hs_~_!O99_EZY&UCpbVZ>#z%~y~%9t3Fe^a!M9%` zmbIAdviF4e5k^y&?-U)te%CKGyHUxi<$>pKa`1lN<_Cw^`TPY6^Hz560XVwuZ+@5L zGi>|1Cfuo^bvD)Q8b%9CQ!obcG7Am3j2BJZ*q@M6|x-*jv?Nb8G~J!pr7sjm$K2us3|2;ZOqrw4TnC zyB$u}%de+vC0matNEII|Bd0KB{h7K6=a`F2j`#+(YO9Q`!9Hxh`({(db>xgjI-{rQ z4xY(sjN6BUXlnT|)=6b)DvwpWXi+0!Kp^dIIZC;cqC zpjh&a^3$b<9s=4B=X3gO&Ry*Y728tMitAE1XY&JVO|x3M$7VL9x8GhPb%0 zqP`5Lwx8M}TsI%XQK3Vd;`F|*-PFMQPX>MQ66Hbb1RUMemqxF;Jkck51~q1?BO!Jz zw8v}9tt#gmQtR@I+qaqMHCQvu$!ht6VypKUAn3_$TJ*p>nl!u;ddhl+S(qZ=eQUrW<)_(O{q1T$~vW^7H6yE5J1L8?`_ ziB=7C^liOrIqmqfrs_7Fj7EVh^(rc&SLfi$!Y^W-a+#rGT-TftAgaX~u#kh#LQiFj z->-K$jfXX6?xYxznw&U9h`J8>k zKv7R?kF@dR!`S2-y&)AoR;5nBcSf9X=+{x*ZNIJC3{A~-0et}H0Q#9N#SZ1;rKdU& z>WVoK6-nC8mm+wKsY--P|iz#LEr)s@geNI(DtO$C<*D7UQnEDonc zStD$&9$mDdd?>F6Qs-qh3AAN|5%y4NJwiW22wXeC6%(vYZ$wH4b#EXsQ4r1;_(mPV zwCQ-ne1=VW24;jawi6`NA8OOLA!a`|2|OeV2(4_#kC7)WSv z#gMd9r4h3Un^XX1gk*pQ$4!!Usu!4FaI;B}E}{$J+8qYJ(OvD-lZYA3CV~Ea7t;6l zlEmQ$+Nrw`GsKZ82e1D@hW#-3q@uJ_Z4t9Ro1}5*`X7Xz^?pBb_?dPp1~J>RNl9Qv z2I?^QyAriixe;?9o3wK1dJLTHQ39om3KZUm=v_s(NZNQE|RVedoN=}4>lKxyaTb?{-KUJxKbz`^Ta ztP(+-pizBD2?+1(;FkI4h#7pNpa9y3NT`-Qzy`TCF@T9p?2BKCaiKO#{~^bg|1+kp z{y{Jzts!kGV8v{?E<{W&D{xZjmiNg&yU(=hmwSdk!%kC}>?NVy!NM!fpB=Uo6PnRj zbl>#3P8(%KAlVoX!cF+nNsrVwS~HrQCa_r7gZUb(=|o$ zKkA0lcnt7MBwdWp_W@Jm{kAdLWqi$YV!^q9&h1qGRwCP_+{iF6rYmlc!ZV|*iJujD zk1UT;h$>G@umv)+gA0L`Aw{OV%y+S#4L`C+UqoPsD&~3&Bb4(;bS%lS>XJKkbBl_*<3vI z%aP>@v7OH@q&d7kTyQ%G!vNq37ck6BW3cY`!&yO%s88~j_#C+jV zZpOJEHC2Zacp8%T=*ba+xR?Dl{GwIVUijIu92M~${%XaOlhAdtGy3)1S3jwp_6^pq zh{4cL=$qmH`D{Gf?Y^8afs!9MzYnJ)(f7B=d_6cxS`C^ma#$9rmEUHBe6wV6@btYIn|lrQJ}t9|y|=Q)N!+*aHz*30A9(xDC@?IzwBQ?a>AVCaXL-6as9|FCWU*-E2KaUg!v-N=0Vm01KJOc( z&mG}H*vH#^Jz#foP;V#4gsJoB2D~q$H%Zl=TGe~)Nt#v1w`)1%xVud21WecTSm+L0 zk;*AUk(p7;Un0%#)Mk@%k$``xGt&t9FrK429>iydDWa#Ma^x1GZJ ztgO^LPsA`sH-1m2_xXQLFMA(V>OgJuc^iVAO7Mx@hC7x~(Oo>R@00x(MqD$0h*4P8 zTlR9CLLKr%%TzpXC^|Rcm#4@lMJxVf zReDOr`a0e%wiG5{GxX;&H~daQxQI-ZZzbO_maBc_VVhJFalLNbZO_@=xe2JG~^j*Gh!cnYa$24-7O^}q#;pBkGwYjG>j zEoYSJR_=hli2dy2ostsKSB-a;4=^^4Q_b^x>KoXuLo+OTRcyRRD-@wuR%!?OiSWQbI-#q3Eikjbh9be{V#D(9sA#mFMyE(|ysT&BxA`*7#DkQX6$SwIG1s=u{rH z=tMp9Y%4hQy3+1Ed1ceSJi!|A(6>B$rkY#OurAb!ppd;vQ@3n5FO?Bg&4HjDc*X?X zYw-ayX0e1qllbPtN6-7dtS=-VlCtd;5|F>kJSC#SAa`1JJ}y3%Gg0WrkQT}iiVx^Z zm8m^V78kbYq1F!qmShvtlEtTShwI(|QEcjUv5c2jPtB8Q!YnC1GWn`Uyz9~H4-36E zj)}SOYF4O_bJOmHCyUGt)8Eo#Wg3~UN-v#iT`8CKL`)IY3XfD^q)yS_PsPyQP7 zJ}WzX{`h9X^|RmWLaHAgoe$kk8k-g}n+?k10Q$&t@Wc0gui55izv;(L<>TA56yQ^p zmFNB}ACrYhML>LgDs2T$^Th6@!-r5+-vGvOp%IQTRn?VB{=U4%9AbH#n}>#IWPwt+1O;ypah@AF`Y@wOv>vJ}y8xeYtDb*h9`MW8e2{4984h zI0_D}GaUZ{KIFxmH8Vt~C**Fd(e#LK0=d4 zdVP3JM99Apf3n5F;v=Zv0N;o$SyvYygUGw3ACyi$*U&t~jc7FZ=9{lK0$>w`rIu@L z_p!j7nc`G!KR2I@#=-fRY$UuXu?f31bOL#R{=u)9S>5Omi3 zf{dp_)8HV#m zW$t~6`x0I47fe_puJP=}WlY}fH6(LYSUJ3C3eP(io$*@iY%PlXcM2<^Xyp_uCfi6; zi5GPmp>U2sQDvXr)kGxvvW&W)TKBpHPUlR-6n>UF<(_4Y(gMZy(Rd)G()VApWQ^S0 zinXq3&2`DQe9mbM^aN-R09z$mqeWfxte-B4e60A}!-&omp*etC{PSiT7b?fqL2mDt z(u@B2Sv{`w^VoY265S0yThnMaZbY++STQf#=R-JA%h9!hgR+pucrWVA-zP4iCXyQ; z5SBb%mH%T=tbc`eXNa*h*UsG!+-KCX+0G8iiT=jdrIYuu9#fZ64~T0~M2)&yxwtIl z_kVCC%pQEy)5Vb%#(^rHo8{i*Hgv@lo8S4WXs~&Mr9rVudbRvF3O>+a8`bq?g60F! z$!lV18@6RHUUOE66GQ*;zdZ+?2Fc(}(Y1L^`9rAS)Blyiw9bS>^$SKgDe+f?@S--< z+d#6mFp!>vJv9J{*N9I4U*+-rCwdvQ|9|Hou0x)+_yJ8g)dydf$>=(1P80&KiuxpZ zA9#O1O8^YL5+qIiGFjU}1hg|OIo!w0zDz7!Wq&aIoB-*Dz?C3ecZ5B`q+jO~Th%1^ z_zWFvI_h{y!%5QD@a<03RZz*%x4t-0+%FP#V(*4cQC}!da;X-=8@!Dd6@q}#2k=av z?jFsXJb*Hq{oj@CtClg@l|!5spa6`p1+V-sY{6B8E$B)l8obi_eC5M%-!7JAUp49x zL-Bc4wiF8SN`yRZ0P7=pFgTf?INCzbC1lu-QtN+DP2YaGEwhwn|N{>Ssku0Da|!^bAOcM@&1Vf${_B(^P4Ai$SVS$gXh& zNka;_4Q}tKM#x7B*$2Ko1u^k#Nn(ikZGdtCJ@9G_8USUW3KVtnFx&*mB;UNf24Grk zG*|cEFjF9MqxBVWAAlCwJWd_7?Ten_xG!(Ri)tgNG*6hTZ#C;W)4(kZ*_gZj8=;pM zSFis@u-~hv*CnozRxLUGEC0O$i~8#Ie|5xnLw-s<)Ye-_WfFM2WxY!Pxb=MXa=i-h zk-g+;{5KsbKe7?}x8-LML}ly0Re)$W88+)k7FIxA0iyE!dKJwGq7t0`NENk3x_YDeYJvXIE&H z5drp&fSp3Yp6`PEI?B@={4yzdtcOUGF2a%HO{5S470iMoifR|g9s%xgJ0urrsYyU? zG=>|u;}ek#kr!YqVroydK+HEl{QNdX1Hpt{faU|Hl8l&B)-aVCYgdE23owWGo(Bb`cV^uyaV8|cin|I z--Vb#xc;0ybqi9+4nSt6MW!QAt1Hm?qj>W*O>;><-u({L0|()XYoL)1ir@$qu>qvm zbwseb(1oJ_^hDu%Ad?jeL=wp+T}2K^Qz6%nVV)Zg(y3EG7_XGf;ViLChV5 zYSvZ;IO+w;B4#w`U%}M~j7?sGC~;Cm0P04Ru0Td@H&XOCC1E^XSg=W;w`Pur zavwj7o|Nk%i6Dc6h>HC!;vs_E2?$Fw$ajjMAiE^NS&VOxLTU$*kmc4I9MXk6;G3!7 z8>omGfYj=(HJ1=2?w_C_4jIpk5c3eIlS4QWxfQ6BNXE z;q{O~Lrz-D-)oK|C^tamv~I@hc_3y$P^*I(Y3xrObSn<8*NT`ShzQxcJdub0CrG9r z#p_|gTtx+S@*g#x2p|#%T5S99da;Q44ycp2)OaFzKlsG93gh)=5wjGileg5A-GzBC zg5+MiyGX{!M8T^KDZM7OVXXSW#|}_RO9YAxpBO_me8V?nroegpJ~dJT88$P>W(L_O z5eR730phzgg>VRlfbwKyp2892f$4~baD|8tA}wEZ50?s_m9^QH38=6-q%E7LV7Ii; zS6Mw5hW(MfI=_(*b>Xpr@y}NsnDLy>nwa;lkzpT=lt0KIb=_X^_ecGuorV|n=}P|^ zDZ;C3A_Hd0L_)))<&PSwc-rCw9y6_5`-6xt(My8P}*-#!G!?3@xQpD&z3 zsFqdj?n?+9zgbLG`Qc<-k|gV_x*Sq3eDHrDC&iPkDwEKhRlR^cY^f=X8&Ap%NHEqY zgJ1wExSXbdZK;`66RppE{bE+5T$!0@gLGAnO@i0Ec1S!Nr~XuRoK`zlerk4bYC5B7 zO_*N8uVE;UWymLPE@t&c)#Ebj(5Do>iAOrUTL*)t_@K8mO)lS?D%Ba_?6!c5yr%H zb61oKeso7Wvbj@>XXRJpT# z@X?4hPG6I9PNL9h48-DJ=Ln^#xc-{))KD?#Q(J!SajqetF)OsmmK5mR6hWGQx}<{o z5Yc=F*4VVv%^jD8vm)Yv3q{&g;++l2T(@%Kp8m(e27-;_Tyxo^lj(K5{ zKei9f$C{4s^7xh&fn~z&P{CnI(PBV>O&KqdpL8xzk7xlNOBs zRgde94ZPYEp&76KJ2LiN%dNWP=Aeod(xae-*ZH+}nx=&DWwX>dui}czAiHt*eHiC* z7*D6{bCNH^)2w=DSlZ-`RtCpsWjJ5lTb-Rk{eFO{Xk=Da=HvfNas0}UoEZXnX8)aw zz}_jK01-5fO^pIXbA{ zUwsIhymfv?kSP^<$n2QFLKL8wG;a%N;Cc z4kj$t{#BI68v{j3BK=~aMZto;N-R^gxILGeE-zbO9mhEGaU~n%)M@>W?hZD-F<*%} z#PUS3&r;yK21>0%xAMjpJ;cUVi^!*}xY-LCPt zUHC8OItdtk<2bo8cxzr|4XKG-$^~7~F8Ea^0@R;q29NzjmV!y(~8}ZS!&OmeJ@2Y4pnQ zwTh;$(W-M#er?|q-bY^&Ffb8yY?|x91g^B#iMD;j7O2-dC5D&THc+Q#+{RfP2EB^I z`xAY6|Dv%P4>waGa%G>9R~-*A$a@pE7^JNXy^$G7q&$5uqs zu9v9p{_wQro2ZVPlr$|7LvZ2$C2D@F(s6an4>)|b{}dJpc`B3$rZt%SjZ4ozDf$fE zMoHP%?)fp?BS~35`4Pi{YA*UifjrQzQ>fPCPdhjwN{)b_0Rtc1?DNk$y9w0>h0mZbzJ{i$UpJD4sG;ChHPi_u1cy20?i;nGv48?>ruk=}?BQTiaSzq|Zry9oMOF zmd1}(wI4i&^TFx2)+F3a)`M*ZChcQ~Sa5o|KVY5gf-G1PhbEu>6E~{U^fXA3b~nE_ zi6Qh6^C@pv>p4xAv6*T+@65EqQ?Kq4%hS`0tJ-TK@>s7}LOJR+r2#>!=#_+Jw}m;t z`X@fn7pA+qA3%AIwuw8pJEtALb(C@2taZ6Ch3;p&3lo&5(p*?wHJG3A->7@9sHlQ= zTeJ}s1q1_v2!a^MK|pd8#DFA4MI?zJL1=Q$20%oDqQoW`0F|WV3`!Pg5F|IL8yd-> z$uw}k1@5!&KI4vYAI`)5?~_=oR;{X9)wQZ-&2P@5VM6l4=7Yqk?0e&>WBg}^M}}Ml zxXtG{dcOt{5?#?DHOc%~DMztsVeI>mE^D^m1RNpvy#bTN@94ZhiJ>*AMfhr?_GfK* z<-}yitoTxIs}GV$IP!0zDOc5ozdtP{)8Mc7A@tDTqwb|^?M-Wgb4PJGZ*S!aSN<)d z(qX5mU)`$BsT#vy!1hEOR>+=wg>NnqoE_!1GHiVR2jn!vWE`EGlsl*^H}zw!Yzmb+ z9vW--UX;%k5A4}Xcos*}k3&DKL=K zWR>uC5Dj~J_O=_J^KmkZt-f`?NHrg*S74Z&&tvK)iq?PGcsKF8d-H_b)A6$ef>lVo zkH8Vgh#z-3KncVWGgvLpz7LVI?tY?g*YX(a(_f`AprR{E zxmf+9V~W6S1&l|v*M$Ow?apd5c|~$38W7#(oH(U`a!V7Xpp>$iqn*JTf|KzwPYm); zKB)cC!8*)sbw#f1n8*F7s@!=ML+74}MVkSVXvM?)E=!wA6hu2nUE>%ec5Q>NqGs5m znXc_(>CmB-L0exPD-+c(<`wg1@d*{2`KbL6jYpL9_X->I^*wfQ{ehHgXYDfyK)h34 z$be_D%lN~O#6T--dsm8ki#-K`XJJ5+Ic>J9Y&K8tQwR^f?XFsr`#hV*8q@wpcaMj$ zd*^9e8Ka1Jew_)A&S#!7ARc2>2;@XkVcwJ);>b+!>hDOF(hRe`msj}0U`20#60iKx zn*mDYy@##eS&w|(@900HW2ee8I3(9KT5N7U9J9|TV@2m| z&y(~Jp2>tMh60p+bx|DyL3Sj{ttAb&5=*D!1Uo=m7s-pLbM;6xVC!>^S9!v=g!;#* zHdcLgV^HgnLvI;(;IDS3sI{Qc*%-^V;J%W#iyqC@3NI@dnNxLH8)Amhg5K!gRtop^ zJ?iRo`YfB$nle-2ApF`KN6-!6IU2W_wQp={JVQ^<*Tk^8nc4?fwF`ARtdG~#CX(8F z2<8EfsWxmR=%%i zF^(|!*6l79J53&LCOW1+keM7{?OidwVQ)XvRj8#q_4Kn3;EKWGD7nqEV+^JI(-ce&HnsT_%!!BcLMeWj%B<t=*~X>76~?kB47nS|5;2|A+O>=iU^HN2z;u%_gsUd#79FlxlUab#a$ffc6- zO9dRB+aH3HAcm8U?@qqkcB=GrvEqsx(~n<0pG4REaWk*`km?DIu{owiB?ijs zpM1<1l2i62I4Z2yGl*Ct#ZY>oBW6ZGrWB(d+ieto?{#d&r|wT7X$xyhlOV^2()D@{ zM91R}`l?nX?CoJfSUZ6}Gfg8e)rXhnSySi}n0>eGht}p}q)MwD9P>4?E^*D@G9)Gt z1z=W!HbV=TJjHlaWjtds*dZ=PJ3rLpU8FU(DTqTqydvwr%qeZhobbH+zF zP?WsRMmpz@%QZAAVccOiVu0L+up1p^bX+a1rja)SQJP~sBUg@>YTuTNEjbefw{e3E z*FJyTXY9@)U};R)-P_gu38xd0ON|32ybsw$Oc54jO@qnAhj8NvNH2UUn<7okd=w$6 zses4Xt@>|>IJ!Pi8hQL_MWN@0ZbAI!KT2a-10WIM90;)vkcj^h*9YKHjgmnm3n6sa z0H^Vp5)l!luZ#!5dr;O#PDl%5WK+&l$3(Tl)8VKAT-Vy=fW?8tIe9XOoI-dZnjpq@ zoumCcl6=}-Esa4W9^rfda0+H@?TEtqoTR%N@bFF{g@}dMUDV`ghkzy>w~!<{5&t zY$?1BJlfh>k>umM7e%Q0UbiqG8pkR5Abo}alG)rNfVu)siLYXxnp^Yo*`Cfq z0;4D9*T{pejwJ&Tgd7L8vC_#PZ~>z9juip6z%N9rbf9ZXF71Tp01N}pZ{pJBZn|!Oiop2u-pqa`f&|- z0wO02UyyHch)lmAK40AC;gP`R$gNEuArD6lSAYs`P8%IUagQP=H0r?l3C*6-Y(}e1 z1u&&8ttcxSeF0;JDY<}WQ_3@l@qrrz)7SBg*jSiIyDaEGEJ>np16VzkdxY-dpW2L9 zqHkHgp8Xsg7yqDic{^qxe+l(B&M|D5e~d#SV?jJ&x=_YtyDz)bMxey%b5J6&iOW$2 z!#vTTGICAUb^W92%&2G02f}7s&c=;qHX|>)*{7WY2jgsO7Guwu!0KxOhzAZ<~jgxB~Cn9SLSkWD)v15?r2-Ug5-b%$7CD z2-H={+XWNJb&iG~d%_>D-k`3$PL~-V!uqHm698%hD%gi|ju&lxdf`x(c&1i{gKPO( zY9brGt^KS8PvUm(297V_;=6k*)f!8S$25N(>|gc9uN+jQV%0xXBl)YC0}XSnXEOdk zPf3+y*L{rcC`q~5o;Z5XYi5pSWKLEz%DLuu#XHt5gS(|?svyM@8B5&PIV9d^h-d!F zJ@nGLsw_lWhF503x=LAH=pnkOBwIo5lD{psX___EXFOaxp#U9OMz!a8LpQJ2G^!gM zK8>l19N;|UiCnl8iz%)^w`%l9+sTs?&NhWcaXK|eElhzsMU025* zm7#AR%n{9;WIll$YZSgz%2fS`)_+#R$7W=Yx8Hh~+?8k@-^5)Pu?6Ws57c zlB8R0Dv{1+JjctuP*dOxszhCMR4#8`JIdyMNw))@#}llk;@oC=jU@hLQw5I6-m}|U zk#uyLFQi<58H znjD?0U2F5n4uS-~t(w;pQRK73k}n38tZS6+hF8O>8?7}e8|7PMwAc>YLk%-Ns6`Sn5NW7=?+aN|i{0=Zx`XjwwI<>P{oE*lh!g3s37@AEdfHEW-k-YR^n z_A*n5J;Yz;k{nR5oZkGKlPKlwHM?0%XB&axaYYaFB}9{Ol(wH%QxjV%DyT(_LQS%K z9zJguK6g(1cW-#8zyUwL)#GsNsK-&zSNk!Kr7|?Ej(vS4a{-@Y#-7zBzrudv9@dX3 z`22U5eSxkI^4CbRjxvLM`PW%*a!IHV?|&M!4r&moV{9Q+Hrh{h7jPpFN4ghYQ!~P_v$xXakfdV}EbXNFo|CdvLPh&b zf8z)0n~b9#6AVNf^jq-(y<&j|E%*7!5hmA48OLWOi?_?D5d9nh_v;)L??bA_TYjt^ z`X;H453*8_mKrk1k-JdZ+#nJ>sA`n)Ntc2mgV;L5&DoW{R_Q%<``l?m#>>Nv z+G@LEe5wc2zwj+@jS9_w81ApQ#Wmf_`O!o`XSuX9*kgedHvV)WabxvFO8Z!8gQnmC zzo$U(>m^OEs2cP}---<$`BL_*CB_GnV$H#{60curJ&&1YPoo?p3s2tf=(*Y18=WsZ z5My$AdOcX%_go8|C|3VRww04L?f3CcL!712<4ZfE*tnk|TU`EhS&fLRVt-K&fy3Zp zDMmm$sI1Sli>`K2rN$Ktc1W#aB>L8vIoTAX#wcCYWo*eJ57B&bTiJV%gHU&M*vZM8*h@UXmf6`#7705B zF&&zn$dm7UzOSGG-$uIX>_+9V-kd)OT9Fx$c+V z(y2qUnpfQq|HQ8FEgchod))D~CY{?iGBAT6Q791Wxl4 zaX0ig#)*4#j$mDKiMUDbO@}Fur7GXSsx_1P?{`aUnD$m)@Anb+XL_6#E@MN8a_sMq zM(;tX!#ZN|VAl^p3Phvehyf`uw?6fr+egGCX!gd|j1@r@nQ{)c&GRk1>5xsz z7Z42{BR_boCx-ICKjSa`a~#UG5`BJoMd;U!yP{KJO(*l4oM`veX;*!vXl19;sk(7o z;n#f@%3!h?*e<|h(jEXYul}N(xLX!+E3*0S^2TO1sXRe9|E68n!w}lVmby8)~{<&riY&vkq_u=&Bf&Q(0vtZKR$h2Y#$*j#;~M$RLSe9az)sLy7VFxJN7US=y8BZdvHW z+2aL|?yVojupce?YUwB|bxa5=o7zUIi|nb5!M~OM7PdK8%<5>?6hvXJQQDZ?XYRQD zn1^fdobgzse$xgCfG04ihnP-X7N+m{hq_!oM?QX*M&GNCStF_@Uv*v*Z!ntaefCb$ zeO2+bkS^9Xku>g;Ll%(g&l+f@Mg(5=Gx9Tn3y0P8NK>MFB)U~I4Mufv8l zC#L@yAent*t zQ9S;>`nvedEN5+~x|fTB_?A;H`vB)y)|9hZLS3YfS<{$Mt22p}u8C|XG2`JHpGazE zx>-huExwhfl67dQQnt!ejIbm($A$m(JfSd&_(-0T}+y>+t5w*i0fW$8@{*ymI7EUdn|wb2gjuP4v`$)N&s zZpYf-@J|r=0Ymy6nz8-nr@;{pC}D58fWH|_82X>Otf%XNu#5W7WaRC(;_r_!67%-< z#-^bBo3+a*l-qOJ{nXLt=OI-O_-|_4k|#M6z~~8tp$qKo#I(Bk0|@aA#>vl_5PJk* zP9%M6>aPfnfU~n5zdpNy=FKV>=oExs1#SOtYUy5gTAVFuT~DxeAF2}O^|@CyleR_N zOgqR#CFO!`0OVg;T|GyA?$y4yErTPoO9r${H)h5B1rCwTgSSH#3x`5J$SMoI9x!F`}WKl6cE9g?_SDtV7481ryV)k1S~`~Hp;X` zU^dQzjw}ByxCv&I(<;Y7r}!v)qIVB0uw~`;4$*BngCZyq_U4M7Af!%OJnUz+NhZT3 z2%r6rA&x2?@RgH8p<)-;ycpTHeJaK+Q;uCUE@rf{>cXi~pn&^cdBo%}-Mhl>z^Z3K z2V*Z71Y_zb{H{O){H*d=YEhMG-JbeFfzt*PV`Jv)b)eX+=6i9I@PwJo&BLGOaXnuA z`+L8`KgUS(w%n+g5&N}nT`R)xKDr-E&ZyP`33B^>*y0pEX4)*- z3Mi}o-V8toY5>+WStM*$8_6{yL2C0vx0TDa7W9iN3%u&Ij6+e6BKD24u zWV$_EulB-cM2UXOuF~_o+kHQ{Y3by4#Z(*>z)J)PRa`=@t;;FRHAcU>=3A!ojU_c{ z=hWC6Vy+WWj7&N>{27NUM-@bFRFA6x<9W3%uZ|E?&EhD)L_;U!n_prU?btFcYRiTy z9vPbGEqi}ID~0mClKrXC>b55_)^ulm)}3Q=J8PvPlI!vr1A{DHJrevrG$(FeV%Fs` z+Qi}e8|e81gj1d69dyjwDQ0Y$;_<^o1rk;0L4Db7ep1owky%p>+&L*D{qZ>0zjAhC z1DN+3sk)`j9hpU=ugsULDaW2L`5tU@tan$bO(f#9e!Zp%tf`0(FbWf3+grl7-APAk z;O}d$6U`9#{%1~{OV6_?&U1+Zf-{vvgne{pw>=2J?ARQ+K@1DJUq+=l{~}#HZX);C^)sZ+n8C zbv!oV`u+12E8?!4Ex2tKPnQ#pTh%3u_NJy|7j$qQBWBQ-PX&TXX;Wg-hR`b+Sf^qyW_H|QaVZN?-g?dub>W!*u+ut!|q7C*7 z;|#BTiW+%Wi4UPF=*Q0x>x_Tqa-z1AO7fR z>4C+~y47nK?m_1@(d#ElxD)7C`n-yS?&DTpT-y)W8D_Lwp~-wJsDBApGb^qR4scx^6-*=eQd87(gt<17nWX1`Sgn6Fu=Mr~ zbS2sqS4X*Kig#1fK~g-c>PwyYfMBhYMe>fc5>wMZA=rf#?ov9)wiBen13aQ zN;*HEz2^rxQ4|!(;Kt)}i$j zC^t5=zy->B+leZ)wQE*f!N@&wDgT@4uVKy>Hr8gSntqdL8bv4cZ!?9pwEaOl=diGv z-cx~#d0n(Z!vFp~pJl@2_^asKU5^Cwgp$U!_o1pGYh)&B9ow>1ht9QU_9~Oz(Gu;G z2Fa=HA3f0PQ0aqZQW4cC`E9YyNAg?tv71uc6-BLw(aDs2J`WzmQ6>lor*B(1*T7H7 z(;*U&{1616U9xh`adfAAAe!pV%~@sw=e3vU-?I(4k1uod%2?w*mAi98wCv)6NW z+R6G{3DMnJH@M){*X?`Hg1xr+l>6B4fkOm5cZFwomF>p-?j`k=)Wkuj@=6*DA!{`PjV(>O-z!4MOEpiq^twwhxH)4_BqI}KS%lwvT###Kzl)3UKMABBh- zkB76u7YgoGxSZS#5}7jT0(=8xb1U~8f~78yTOUyM078)kYc#ckuw1wlHd9h#G+LjJ-%rmlaO6|rd+Zk1abZ8QiXX7BPSrh1AJ&vD zHF%+*N;AvulKMT|N%7x@YBd|Gu8!5vH~;t2>wVJ60W+~z6hokJ(#0(5wp^I>G}x=c zh&`RcZ(V97+I4qvRE;c-B#fGKv3v(`aNv3-4iFtwX(Sk8FStfbn8TV1#CS@s4f0&Q zOX=;XZ!=l?v=T^_SxJ;&#&%IBGhpAm2C-WGjc82}+u&W^?RoM(0eLIwW;{A`!TjCq zT-l3YT9d9eN5jzhRH>D>4&6Uvdp2(?Bx-tXr}oTzT-<9V+Q3|iZ_4`S683rVZ5+U} z^1_L3lcTihQ*)gO$MZYSIF{9JS8O+Hd!3znbFU=Al{A2vA{$LN=^RFYg+3o2A2auy z6<(v@c1`mV7rifK1#!PvC!F%Ftd49iHsv*&Q3lt=xZxX&n^Lyd)~@r*l@-=ZNBU@; z;A)Z=7GhIj9u#$7zJ^s3q54)>h-YMaE&68lwXRJw-keP6CCsbgLy97U_XS2N^cjdY z6^$()vQ2o(Vw|#s9te?kP%GJ9l-LruZ;`~56Cv(Gs(Z&gcb zRGXM@y*7pcWG)?8y}yUd`{XG;?2lBu-2=`zP?PfM9}l%cV?=d5tgY;$U)0_s9;KnP zbFE-=Z(E0Y(U?y>b9pu*3Qdoy6}8qeci&|yKW?lL=O`ai)Ug=e}h@O6Uu z`qjFpJ(+|vc=}cyrYUEa^FQ#WL1USle@&lE#ZEsNMr%Y4H1tG@3VAn&9x6<&=vCB& zv#q|&7ntCmU+S%CW_aY>WnI1)r#CBj8`^uUuU=igFnc^v({0GFZBea_cRai~zCpAE z8z%5@8q?7nTR zZ%iamWeuvWboN5UkC@H165N^+D_iZ~4-zdMSyRa*&6d)7BeWk2{C>}KS!PP| zyG)BKNzr)lRvs;omsaR2a}^Mjc~#v`&u=rId2@-~LhCM`1m^ON@>EYXUr-#EeB8zK=eW!*UF(G! z{L56?Z%HP~A(|fzSV)<3aRvYYz{CINbdVIj-e6@sqe_ra}?hxY@K__XF zo2*Y^u}gh^NHV_pbFV zt9(kK5vD^MKnk~!=Aipi{k54h!TC?G#&|W)my$>^Z_{~RTpZ2Itghpl74FG1Wn;Eu z{N7zSM)Nn_CrwCWw&doT8tq!b_|R;9ON-{~8>>XGw)prjg;IUGC&pUN`Bvr-Frp+e z-`^$qw;pDfQQn|+5t`B}2#=dTq zxO!Ll7@0`r_xf4d!5?LNc@Vk}MD^&GWOo$1+{;6Dn}*mQ9WvEik46VmeO%0Ddl%c8 zZ4ziPQkw9^5N{yp69hYlea*gL1}J1U#>YrOsbS{PY-%q&R=YJGlz>|U>glsI(HifK zO&_KWai52{MElU%qPWoM#l29#)cLR#sqmDzx|>|Ym}70`-2nEWE`-J!BGJ;YB-a6)dC~F@~Ul>mAmtD=P*@Q{U z%M*q0DoXXtf;nT|kxc{qB0bdzT%jZ5rtOyEsacmst8SFI&jr=+jKq!(Y7ObO+e!^e z{Rzki>|=On;uMyZ-@CGdoyo>9{_e%tP+W^%vN*w#vt5Wi?YPn4{Ea^L9G4uNq-$zeQ>0jvsd#}KHu#`-CBBUycHfiS{g_C*K|U(VH6V6W@J->>nZ>JX z1#Jd#4)gP?KbrQ{cn-MqkNV7>3CM9c+Gj5QgZL|RT1uZ)6B|wHveG8zt5@7w%d#fQ zTqs}Z_`WhcFq|RBXPr?qP@x<5%;`Ksk~3B=07!XOWq1FSNQ>Wm80;zEIMas_@Yb(} z58@p4OuKx3wL#X?zZpO98LsAu^{G)S?8)XO;>~zM4pl1Q;N&4G!&ZCoRm`2x#Jtt& z=D4G&T;2LTw&#S2z_yNNO6|d&j0p}N%U{C=6;$@}#rf2vipB^CC#=34PA+J20yso6 z-fjBOw>7%G77z%#kv@8tyn4STL|fiL^9y<2T*IYsxCIxtK62kKbv|cn@rRQ$?!u>W zx6b`Jk_%*Ep&8UGlC6E?h^E#r{4b)KqqVG5-*Tml`SiPHubNZ^|AfnY|HTF9#}xig z97POb77J=3tTn%q>6%s&lS+!J`3(1Eep`LA@k4W1DtO=ykM|25acssJmUF&+Gu~tA zn}0{Xxi&9XBB;pZfKHhL#e&qzi3A<;;lEEE`9AkPxtZF0O1C+F)UD92v)cT5=Crm! z+)!y#RZXMdMR6H3NxAvWSxn0>p24_dv-hiCzm9jTVxb)ZZ5&Wct+VyY8|!t*C2sa< zrCuhfX)fuPI0eXAt?j2Q1al6yw`Fs8FN!c{kP;FUzz?Z-3s@x!k_PX=52{K;I-0o9hw&BZy^nL!IG!F!vl z_!+OUYUN@JH}e7KeRnnrj-HMH0@iW0Jiz))2Ub_e22Yu|(x$(SP^P&%9Cs10N3*0b z{T|dgaaML!mI07@?JM!e=S+5(o<>k30pVU;XP$t%2`j@u|Ym?cl^{cB+ z;q$bF_Dfxy*TRNhzUwdj=8|I4w@g+&&t}xu8|+wU<5p8pwfG}X=yY3_h@u{u(IhdH zB>AVH?%gkEa=;g^Fsh)wv6L*`JXhjyMHeAtTQYu|SY}F);tGgg9k;g6xvSeJDRr#c zM6xtHXy5rxD-w^@(LQnCYo0pd-;dP3YGUm^Ue$-0USpo|ct(+1|1NDgsTnk?s$w`U>b}#n1V^7 zdj)$kH}u!>ZAu{SA$&QjT;U5#A+mC3P@%v&Q`_@>_jB^!#|Z!LCn`z@62aqc@q|Ug z|9z(gFv6``IQGf}$laFie>3@gx^u4cf4;^1kJMIx5BUG{3DoNLRR84nkJq01-R!oY z8Br?H5`0U89w}^ve+WOFhiB+LEMA)Li3tyLeE9IA4im?xTVGafQjYp*AjZs!JO0;6 zJGvIF4RA1n`n$7Bb=rAX(mQ)quw;iW`Tr{Wh4vNi!4+3=sKU_(lOc)0!9jyA^uBKw z;6&xFZ>cv*F(vZ6?10F*i@-pr{tMgv`xUjo8~q#y_G`k3;q*J$+$&MSV(a8T@3qfh zINix(42KnedY zz8i}0aS(8)wLaNfIEk2QC@#T6Z2qYx*uX)Lu+^@D0tCxP>!El5z6&;E%^JJX19t#} zsG$3&rVu8(f9pv1uA0K55#d&6u74MTWcO$8Do|*FE@wswZa2>*nqa{vfgt`RU&c*VvSm*>;r}k|aTiVgBB3 zrbnO?_WYlI1vij0*316sN$`M1+sr?OjF+G%VV1S4#1IZAg&n{C=}9z!4ubaG-A9ao zo`k>1ZqJ<70}5HhKRpR|Fra$ZwA;{nP)qP&_@^gf1CRKSv@7#4J4G)31Me60caRx64~AbbLy%4;lHNZk3Svtb-Ey>5XJcYYe=B;0@3212|siU*`N zUx?Eo0qZZ|6$<9g^B^}Cf~nOd79&Ir;{`}6Y~;>ALXz(RZ$^z}a3?n?1IFdrGa$(u zkbH&ZDw13XvaB+>_D)E$3?wVEa3QWng`jcJo@;*#NwxwVK~)w7BwF7NG#bVr0vp2F zQCQHmI*caoc!&XbkdPE&gcvLx03lN;H2DlN(t!_PX-VjUkZzPxy~(v}Ear&HfE|n^ z-d;&Qq-^hn6K`5;nUMxh0rNUh92*G@egH0G^||&$q`_$;!quNbf{~WpdsBBfL|hP& zyLbSq3aq__l1Ldm?zBfN2ujFPK*VsStyTrmOrQnfiw_}u$bB$R;ogU&A+-Y;&DY|Y zh;)Yqh;i`c+P{NmC^n7=V^Tu+5W$34uV*@Uu^EuWdp~ z1El-j`w(43ID?ln%vggZ2Lhx}e?UyaHdh#V&B#PVzQJ;AM|}Y_bQN-iQ;{2g)tmg4 z{%6d^xo{#^80lNT$MxV=6;gzVhWxq21{{7hR^kw8J@LO;GU3(6x2Q}DO1&b3YkQ*<9yOjkZ5KV!43E>C+;zVO4nIGJ(KurPZ zM(Q(RU%;VD1xaRDi2h}1Vu;vfy=?XM<)iwV9MVu17S^ICUBS)aD4*h2yE(s1p~%}C zola{~uOSyF_I$ppYtWYcz!dYqXZbIFW>PvIHF~3EnU!tAFQiB&wzO!Rqf~eJa0Q|s zq_peR$2Sa5Xn$@QOQq*a5{o2$5bup{()~ii{S!)&PxtEtIfoA+JWrKuo`dHGcd%>< zXxKmHHa+dTKl}+?d6&8 zj;L@*h1W5tv1lKHF)&xGFd&aAyvCv(^FF+yV$w^e1@F6lF;$_r*n9b7#>QRpo;@gf z$bX$hJNYGzyW1`0W^BUH<4(Z>{jx_~sT3%zNKx#9g$b`&q;tW|-;Lvqq?0NW{zK}gxKg&S`_YTGr7VcX<6ZQ_Jm3x0WBL*LtAkiVGXh30hzT*^116? zQ8x?~oF#YULDt%8FF^SP`#lRXw{uHx)Qf#Uq4wy;2CIP$*%!;?%<1@9f~bqH8e$~ zU_OS^pm8ra0-}67^$Kzbb*Z0P=Ij>>lT6A#&TfP+p1g%F8jZG#S?tRrG)(KNq01Kd zz2y~H)~skAI5_*S2Ni^!e=D94L-+Y-WVV^E?{-Tc)vW;GKBd|i3P0`$&I>HrUq8tv~a2OX-}H1s3JPFAB?yPqV&90qP41cRxg%D73lY6h#QKP@~5ym ze*QblaSTsdi%~WQx3jNYVOETrPVV_tv$M0tC)O4yEeXnJVh+PkXGq>%KlN{fKmoQi zP^r9BvyAE5Imc@p3Jo{Zi<;8+cZ1ePpm@3Bxg1Rn1)g68H}=FN(xTui^8BS+ufZdh zth-h(->M&JI707}kLk`kOIvZ&p+4_zU1BDi-djp+ppwtMwrhr`M)eGHT*vmP=rOGt z%(z>5#us*IZanbG4q2p<7K(?Ux%Nn!IG+OJJ~bcc>9PDh4a|Z z$K3U)OFC3;1#DJUg#AUJu@{->}zmmQ%rNe&a z^3ahPSzKpr;KxDNy)#}F4{U962!AFIR`N!qMX<0qtcLZdm2UpTY&to|{2irpUGD5Z zu@#^B(-|#qd=^a}aeO6Tv+jcvv}KOiSdiBVc6AQS387>!%YIMzA=!AJOJSY)KlIUk5akdf$7Y)t*EHb*>G{MdsLW7 zcelxAP?{@vVP8@B*}=A-QtJ`##7+@I@~UJ?ROQTM`nQKormeLe9h|ioo;64?^ctmi zt$96e?)%W~ zd=z~|ouvU^n+(^T;T5@#rgqwd+BKf4wTJ!cA_m&L9)8K?pV~K{wan4GI{Pf;vhdBA z(l{mOuM2nou_|YsDE&62t>37nDNGypa125LohcR1s%84LM?^JGF$Oybq`J@I?2AU- zRB{7z>9tth$f@{x-t$d~t%8&MR84zh%(yX)&q%I#WLT#yw0~Zy#nid9?Lr!orvMS=o|%eN+0g(x#17si;koi!9hvWz>T=+DRcLCgmBjfLwSW>`KS;(!FRaMsOKvrHz#D>LPVaM93uO?oMnYGD>PJS_aJEGPKs zBDM^$bXS{U5JBSnsOn9qlfo@y0GoFU06`k+n4Nmm!6M^*xMc{E{6;qA9CeHfk{k95 z)E~nwpCHKzvMI9EF~vx&bs+br@GtL>WLU=BrjA)ha)YBTbxQb`AS4;YZmg+eq><#V z23Wv^^MFBU_R<=8kz!x2GMhG~zgiIzg2*`{4AO0Hk=WA_NT&B!13i`g@`?(D#6(;6 zeYl5aVz9|Yq`s0{p{Yf$>;7Efh)_V|4p7!JG`9-3Ldd8;gNRgnt@_P+22~bHv_s^T zy!GfUsCe0webg~smL@N3$O6wQ^ikChEd?jclLZHmFizMj zT0#h>01^NSi_5#*rU{6KIjlJ?Unmi;BgwE7&4*;fB?DQZhAMG_TsTS#d~|{tM8=WM zQED(bnH+u{aSM5SgRNcAUrism(PAMwg~#+E@^)cO43b)oNHFhb;Q9JD?K}`7Gm9Y= zc?F*X0CMCu#qNZ#f)+4%WFWDsTFmaW6*udjBi*+2y6!@1cn?y&ujj&y{gup+U{L@K z0-UL1kjPstATxqals58g*z1G$$c~+d$A9@-DAJ4wvuf81Aj3NZHb9p-3XtIjZ&I1t zA9oT;fQ2b77x&1{uz|&_hq9#HiJ^%Yo*LEsD(w0 zY``RggE}%AKv@mi5mDq*zDQlf z4+SGTD`B3IThN~a|{j%NQ4$_ls|#;58|N$D;Ew9 ze+-15p(;UL(2{3_6)A%b0$?>bDiFVr!>}7Y#;~A=-1r3cmzOyd5EGNbpi}S2uz>%YKn1SJS^nS1;(p-3{IG_zXuJV@Ds5p{DX{aKO541sc? z?XCLnNU{xVQbqi|ke3XrE!tV_dRHX*18l|j`FkNh%hQbRv@lheBW0g?Qx|^3pA`uj zJ`7@srQ9Qjk#2zfqv=(SS>zpkf{waM?T)-Hv;qq_tpIUQV+Ib%2?RsP*okG)1OVcV%WiH6Z1T5!zmwb& zQKywqyqgDk`O-*o;QDsY;826|o+J^_VASz| zP?OWutCzl84++2Wjz-x9$_AW#s|U){g%9r6UwK9G0-{6Dt0K`hnC$JKVbWf|IPeAk zn7$KGV+t4Vs3MUq`Z-I#htKXq5QIOR+q)NG!1y}|!bMyznRfom9z_NS!Fy}I6Wo#w z5l;7ukO-ZJF+S0&4=9lC7Hb5d0eHhP(6%C0q_)2gCGyJaA<%{L=70bc^{I7bbJdsP z^h;#>HFOY=JnzBnbq{InJmK)4iQ}Ev%Z0UJ;U8SuO(qTO39`6(kU>K1#a@uDn(=Jjs1^T!zGvL)I zQq%7Mg5Q9bG#LreM^6~ocA!RpSN4mm?>t?02PUliigfd7tHHr-%;m=>r;BxZ-7YX> zYl@>%$W_FUBtc81`?vS<5t{s!1MT%Wkpq7Zd&pG@w&I`kKSvM7*(y9wQujnqh@R}xS- zt;6oLOOC%yPGiJFK|%5@$R4A%TWf|N=c$+siyj@PE6e%P;`8A3tIrCq;sb!)XVTD(cQn6pP|VM@f6pLN$u}Z=cx3K! za}4Xq6&5CS4oX=V1&aQ;_WR&s74-3+D=a#I${}bj8guOuA=my9Trojzv)dG)ak@me z10Gg_*ofWFiM5yr@%rcDnc&HdK)a7s*s+HyPKeCO?m&s*_h&yb@`@(Np4yr=OfbvX zKj)y+e#k385z3J9@xqi+e8L}Ne0(gSPVrQ(y%oIWGn_hvG#JJF?^~Lf!7cpoW?$~2 zjquq+Y3)Zh(fQNbCBi${V_`xE?*L|c)QH9o+6WlXdp3=rB-P$%ZD{TzxOX8rqzJHI zeHhrMwR@u*A$cEo#f*HY)1p8(!N5aiH&oB>+RK0pYP_yPg)^bmF2j(*z!P3sn0$bd z1D|-*h!RT#K(`0r2}iM>Nlv}dpP(oR8b!gERTiEA6BlYu1lG?xz%6hGdwg)6jk1y^S2zovAPO~2NC|0xQ5Oy4k&;V7>o8jFE)?|_ z8B(Z|A3_>hwdw`EDj)e&De-6d9!AP}AsFw}Yknb|3$6B>K?GLUw5WMY6W*loC+4Qv*3M(ab*RJf;wdLH z1JxMxx^l`c@Oc)eZA26kX3)OddKz*w)`2oBWplAqhAxFOMT_#YNp6I!&vWD_JQ!kA3MK!{=_|VNp^G>1w@E}A#p#o1wRb1M z4{+ON@+wE4ZU6Vtr~-Qv^O;w;tyNN12KOl&W(l{-tnIC_F5hj^$XR&^K2l`ujeeP@ zUk_hmLSd)1pVg{wDhq8^+e-E0+b!a5c*{?(J$fdJ$L8twBoaE!vPv(TL@I2zet}+o z0UCZ~ku*}R-L)Hi>Ehyg0mHuv!te*v@|ZIsYRevgQfj#B#pb#YGXF}zJokkF+gd5OCtM&Eaz*3{_)^Y zh*o>1KW$lOd``2%daC!BTE|HYoKK+u^iR=6GcFWjJ0*=gtaz7(eFC#D2!Gi!t#*f-neo5+ec!8AlxlM- z^{yJEeMxK?E<2OvT!efPVbys;>|XuGU89hyrOYBrb4geCjSFNJkv*lwqw((v9Wunu zxsV(OF%%{##5konFCCzwNxWv`^ghSSdS&;2Is4TG;h4(LmgN<2-C4qvFutQ6 z!ZEA)+>p_s{#`hW`?wP3WZWrFhcQEy%L^COHakD+LER z$Nxw>;ozP>6ERn}6&bW4CTaWZ)w$J8DBt z^t!p5bU6>8T*<4FE$Ka9l8dld(}Ciy3F;OO(X>sJ4-(ik< zhm(IeL}6k}UwneE+mgUkCX&{3d*U*0+#~fI_%7(LoB$h09+m2q2?u_U)l49_9(YV} z-CSR1GwEQzyicD#=4c0){;8QtI^me*0**dfy}9Q}0`t{klIvxbu?NNSouY)|t=0&U z0R6*OUV_ZhHZP5cmtO1l1u4~R;^(mgnIZ7Wwo4mqRG)v;&vf;TasoDYpv--Fjm_c6 zo-Qrx;gY&6{R>}s+(X$mFi&qCFF0_)sf9S-L)BJ09N8aTd^9m^q*6Sg?7)E127|*9 zIwl$G_aW|z3GP!RbtqZhDqk8Iv{#jAqfFRFi@Df%M&qSXJL{eWxsfw!S&Ti&ML3k{ z4L0R@a&XO~#)yPU8jC=qYnH`To3lB|g_7E*(Ox|yF-ae;q8rQ&fg1&FOOL3Fw9dp7 zF0g|1v+!H17a_6=y4*ikx0ts~TqbfzGqIZ%Ry&Yw=qEjlvwuaI?UqVwZB&`$t=Nli zDEqqmh$4K>?;6=zJ0j}NjYVAK5t=cB0XN5pb_`w53ufzg zubwMOd`uj^jAk4&Zy5-f_E+b>E+b{>ACR8^XoQqvKR>Zw!Hp(+{fM*=`~St$cL!4a z{r_JnWkhc(du0_Ngk(mA%u+^HviAzt=ADsp-5X_OmZB2M-n`Mh*Cs16viHtjzvoNe z&+or;U-z8ze4c&I^YI*nyzcuD`Zxrf!~-BwY(SJ|W7DfHz1XWy>|K-_j8LTh)5G&D zMy^0=gT}=y^TOp{KU0|&wyKM1f+)b7yx7^*U&nDYwI!o3G1^I$tOf zqVVtzr`Ue;mcjfqtNeNVV8arv7R`La)62;cLNDaM2-dZ)VC4^27v@0U;<4xxs@II* z2`W$BbyMos{F}th2Sh_plUV)SpLI{a#jz8mxp7CuZa>?DTjFo_?3UcBuLrus?r7@{ zX}LyS@{=aU-h_Fa<{)SoQfv#FY`n1AE|{U+yh|*ibxyy7*yQ7U*fD-IYF9R`~467HwzNuaNn*+J-+Z z^knnrE+>GuN#zNf>QjeAnO&k9&kZ7VnHdUv>ycV=e?vrV* z)$%1usJO4ATMW3;6l-F-?l=(b7ME?3GsJT1Z7!alv`Yt=0>RZ)O=g|vO_!MGM)6Gx z=8o<}@g&eKfHPY$!=OL@g3R*Z@8ockN>}d_WA?x5ui-N%svSHKqcG}F^``|Qw@Ui( z;pAocYu)z3EeFGize4=DuDhJ=|GD}~OY-sT?g1jC#2(Ap^$Xa!KYmQWql4OUAcMBr z*H3DXdP>dt_1b|I=Z$iYn$8x_4Kbtd{1)Mz{&E|9hV|3mUT2Z~7P2?!JmsiPshFe5 zVkz(H_1dW^BObyi{S$Agu@1NLJIHp#9tR`!Oo>wm&c}k9al0XK*9LFv-gvqM1h0J7 zblLv%drWZR^Q}==a6QjWIkZWOWyL3H$3M1>i@knS`$3iYd9|H$e;> zDG`>W(L3t)dJLp32|GJGzS9T@9(Ano9qv<@L|AjgNC_a2eiJ(1 zrK!8YJWb9@H|AWZx@&<*`(NjIz3-KTg#GNMICqWM4rj+w&K=|}vDA!wJHh7nn>|%P<>fix=M<0y zXjsS4Q7o&TQ#t$vx3HSSTx@a;voE^Be~SOVESfTpk6BtShw*x;^IOAB=bCh#4de)v z7pmq5H(r#w57#l%99oQkWVPAWJod7ohP`?YFQt-=#$m6=AgBWDhUOQ(Ti4z6tPbzf ztO|S8z1mUddwKbk7n_s)m`+4M%cnS5zf{i6L2|2${~GjIfE@D4ZEQn*VaA)&%WR7- z5;{A^*4Qhl-`aJAcV{`)4xEY??bYM2Gbr&Y^~bX!_rV3FH1rAoT5#3q&QQ~NpAuWV zPw2mdPgF)XzL-HzZBk1HT64z3V}MrxN(?a<+y<47+Ri7}e1U_6LX&Ap>+X7iy|i(N|gsz(X#C+V@c ztM(XI+dRt;LM9M4I(A&7u=MNkt{5y^o<`aC)Xvb+WWwo+B}1*#(f5b|w2*#@CUn_O zHI}K3p;r5ZFb5BQ{a{{djiIP;CZ72C_A*66xUG4R;A%WYw#mF?NAbULSX#Ltmd#Ve zIk)!oC5OE41#b@aIlOR#H%|h=#yp5s?n1<;rQMpdrG6g6-O^5j_8aTJ>tvnJP0rS} zwvosL*CkM=F;X*uF(CT7Uo^ zk$R%eDskom9CDKrOOgg~4x=O08RT_Zdlb2v17xwN(3#yT(DVcEhtJetQ&kNEA<=j& zbTy(c`ReV`t}U~dm4PHUwFGH0Ng68u%3_(*N3PHTaI}Vd26YWM7Ord!QQLtoN*u*mw~6?ljOJNCg&yxvB`EyD?|4iPn&M+j1L4U*b(=!U=Fha1Ro8QF5GcH++>c@n z*Zr0j9oi;q@<*2$i_O91Lwil8o-#ZaNqMkqPs!sW8JQXW7AFQ{>Go*C4y7U#BeD3! zG8r%#965}U?L;SjEL~vS4@TARXuc>Ex>1gGz=SdAk%CENfCw^f79m2)T(y3@zi@@U zl~iQr!@II7X)EU_E0V3Y7e!F@ z1wK2k7q+vOIIAysFw;0ITJIG$ZmhjOWg@$QJdJtR$h))7uGU2|r~Y^}zEmM=m3zCj z+NVkT=@O676*xIBToj2^wla{v%}iIDczozFOyfoVGR#Rlj(u63S*tqIt|T)2^;T?R zKr%B-uYr3vK=kT%j|R(3zr&k!>04oc*P+&=b44=^mIXPr=pWGGx$?iziDbTc_}VSv zQnei?4yK&IEFu|>T5OOop&kyUnhvc<5O&CS_vlL&jQeFu{WV*;&s!*1%WO)yiSk79 z^?6*=+ED3z=lZ6-+caZ|1kbGuis8CX-5tj^uk0q;-1aKk6QX;5w^7*Ut9;&Y233VI;<3R;ZUku`Vu-Y zQ82$Hwk~@!l%=h8jIC(pKuN1uePuq!K%*-Eq(s1f*A}OSlFca(JITG}9b|WAC9{_H zayZwt!eU`KPCb|lnSDU9oiX(bZB4Yd-(CFibM^Ct#E515XlR+cjMql&ly&XCTxyu> zE2TG!+mrsne*TXpHI3O92NYZlkPA}OYd2b!xFt^0*oKmoRl};_&Fh64U*2};&#{HG zw=M6?%R5n%R!a`1M*B}Kwk^r*$Z73ASEh=C>sL*%1R~!SMt{JBd(E^exXvmy3UcML zw)Oq9HG6KQURb!1$I<|9Mqe{0kS#^Wgheu-c+PFt_n%F)o3E#}vB|_-@Qscpg69u?QE&ipV$+{EsaH&np zYCQC#J)P;s*xdR&!QE;){yvL!5H;cGT%W_EnJM zdWZ8iCrx9r&i?a%ZyU`mFH;80j#MxbV*o8F0D1kRSdUE1h*#Bf#S^ejH6T=* zIQXHAzc9Wh)Uy54ria~`9gL-w7=|VNA`1R9=;@E&n*cMBR>JMENH(yRaAZ+?3Q*V9 zzcZuwP!myln_#%Y^}9Ke=zCrj5Bq}2U3ph|VJc2`$~@gtZuBjKZcf-P6;o!XB0xhy zWeG;76+qkdUUwVfau2za2knrLS>Kmw0>^d*HE^1M181$zZV<+n^eR9E+G37-6^#he zR8Ti=hnnS%G`-()8Jq*RpG_yA?yE3Y=lm-IfEJ=Cukc`}su!Pdu?#>xg{RQCb^X)k zFW25FA_!9f$Y{06pW?mwbCAFDFSndfSP5~Rq}Wf zt{Ex%2*h5uXz@Iao)8z9EKSUZN+F`awN#V^@Hj-h7scahFh*hV0Pw${moj8&3_|us z7s6S+%~;H1rrvgq|@0VKc7C`-m+-(TXC8wq`bLIlbn<2GD& zL4Fm%CaDk=;;O@zh=dRRY4(R9Xx?UwO!f-&G09JOb`YO6F%@67MKS}RjdB~w!*e6p zp28{Uit|)VV$Y6a`O-%0peL?_NfFMcGz=OL1;)W2xC{Z&1 zrHM$gf?qqb-@Hg$W3TYWCciEN6nztEE5vRy%0ggLP;3-IeX=pX7mpya9Opq8{7j1> zrVG^8m`Tq-A98^Boit2wqvsfMlToMg_PuzaG#TV z>vO)Y>A!KO^boz`<0zH?rbWh`tZ~_5pYdv~0iD;DF=b_aF8e9VP-h7q{WdK^*Qp}G}KAu~2|W2}2g!tfO(qO}ut?3tO#iVJhNDHS7% zBlPv$r(=w7B%W3my+6+rS?uXn;=bgG`VAf+pNk-0xh>BLf$~L;FIDJ2{O`x|pmUnw z2zdVXo=PJ^7!ShA+ec;~*`sO{y!S~_TZC88hyUvg#K$ipp+OUr$UqTe1146V)GvIpK@SU(KRoi<4HK{#ATo%m)q?&K zJOhcuWQoN{&U{2Sq!zmBdkcbT-Ui~o&+3S9Qr_)HTf};dg@FJ06SuJ={;t1D#KHz7 zK()gK!^w*__#?pvdfS*LaGNnO0t(%)a)5d%D?;4~1QI4OWfPC)OYRHf9y_{KY-^Nd(vs-iwY>l-F|kk)@dDx|sc z?{QnppLi~F;x--p#sYovi2>-%@p)LHL41tT|K1mM9(yS=PGt~EiTdrW`GPaz9s(@q zKW>K(IR|st&$7_fI@UuI7FUfS72^#HFgYPItd^^PM6|Q=&7OT4Kn~|(hO6*9xuN@v z#*E`Yg5^Ff^caqm>(&|N6OJ;XVS5ZPoH&|v!kHTyjsuKJ0f<+B;&YJ;C0Th$k7^S5 zjZi2?w;jsju6}4y(!V!=Lm6mMZ}%Y5B61t}?ZC+N?~Bhz9A88jASUITl)Otu7)Ydo z5gBhAx4V{ID6!a_SEUTI>+FBe&Lnj{V#Y>trt-iDP{4gs)20*|0Y0P$@)#)p)y5Pf z4dTplAwj(&+6o=-!0S+Wha=<_fjj{spBnTaiaQFWR3!hAI97aTSbas_4GLB6o|L`? zD{`SSS8PD@i6kZqA|lAay-on+5Z$QnON6? zS^Kx%c;a!{wI@*8A9OXXIdKwI>7j}eNM!;sStF+XTN$&~2C(M^J@1z9A`OPb%2pdJHe~>R3a)j1I|(S_0I#$a?OeUYso(dxKJRc6Ny+! zB;AMEkNhoeiCw4x>WtqpoQ5#&X&Rx)MT(dEw^UD@%R}02m0+X+VSwuHBpij5f2iAx zqpp948IE3K$U|B_?ps80$!5%J!hnd(`)T!@j7$An9*nI={%xt;SQ>)Q7P!iZGAf;i zX{XL8n?VltFH%Rx56UW!p)UA+fosYTlTr}{xJS$a)xA!89 zp27-RSIWC#-sicANnc? zR}ZE{5P6dyRQ0w!Sn zN1$zM2fl@g!aGp^XBub=#law`7Kq5p9a2;7ZXH9x35A$MJgu7pyO)|1P$}3!q*Lno z9zz8wt;Pek4d^!t)zM(?migD#2A<~Q8-monBv^oP3}S18D3U54 zLEnhBsd--d5%i4$833>F5%dkddO#`k-CtuHmjoGJ`u_Nf2$^1@d0rYO0;vRyq$d6Q zMRftc8gKys&>Mvs0k1b5N1!+Aj0)Qt^%3ZeLUBUSigvsV66q9- zq}HxFHVYuRLF|D2fv-U723?*t7RTl}mz>ZMWlI|fV8!Um8XG^uovUQ4BgbEk7l?R^hN)AA9eY_nKEJlz7h%Cc z(@L$X#vCISsy?_=MUQ_477R0_jOgSKrC_)fBPHN*SHw}>psMO%Bbcf}lP%^sf`upW z+nV2bj|&D})aZ8nS)`p{8+s3t%A7iMN`zNUDA8phZ*`0RGrEB@@$nD*Sq)Ad5+xAv zLI(mEji_9dH|LU>aAPBs{@UGu?UnlR{`kft+bhI;NE>3uz7A77IM^79vfWS%pJ|fb z??CUiZWwy~$s|KAMWwfxbwom02A3aG-;nk-i?b{ra9EGc#zXX%rg6I*2yxqXd9L^i z?;p2BOY=d)6vz3tQ;3RB)d)c z^gmqyP?T)w1_;+S$)Lv|q2&`>1zHA$DEok<EPavS48*2ayd39{fxmx2; zzu0N&tT}PS=+!kzFu|&GgrN@jt@2RJf1d$03q~ z`P*3MkpYgJBURj`@iW7RYa795_)|&nP0G=2I>h?re`S{Qwz4U-Sz@xZ3@e$ejX1~n zF9}(I!4Pl$mrp;Z*4EZr?-ReFZ=iG;Wn0yVX@9N>yPmbYXvmzqT4~fO3a(|fFQyZE zZ?SMNX=Q}>$PTz|w3~C9%5ghZO!W3{>Jv>a9Ai`>yOPgz@ny%Dz&o1LgNLF+$-8!A z{)mmqxs=nqbZ|+!R8c-3I#YtW-!|>)JxZ-A-i`MXT9XzSzbV|G%$4+E7p!jNgK+kj zu=2*TMHf#diJ22`O72Uvm^){8i_}b5`g#eS7XRgDUu>9VA)FihWb0yQt}45U*;ZGI zl4r^KS*Nm%H(uq!yCameB`vM#CC|=kpU-sC)SpUGr9J;rys5B8kg$(Zl=d8 z%7t-(vsm4kJyL#fTd-z=&|mg#QE~)}#Tq<*gJBaMHufJjoyOI*_uIY~QMQ&gSQHzN zr7Ujiq59R&JC3g-7>8}n;yJNZ?z>U@y)t{Mq3rjzs7cu`)K9H-^6@QG#AFG7{;g9E z;sxi5qGGeM^(z;d?90}Zxgtf=WLr7e4~M%+_5HtOV`iH3c#Ha~ZcD zQ()xtfTtQ$da23XGKrb8Lkx!(pH{L>d6$Wqvi=qkzuZzdF`|$x*tovGpS;%4xan0s ze5}%$;VdiD(t0!HU#l581A&LRKk)U16-7Eu&R^>&?j);j()l7WX@sNm$UE9{6|{2ZPuBnC&_}eo-ty4H*M;r^Uwq74CC5|Yi$`+lc z@Gg3|n!NsjE_7v|%ZT;8Y+>ren;(W=)ra~T9r$|j1K&7r1}|oMx;v6{=xv4IvAPbh zpmD2jdq38~cg}4y+0pZvb=q0+q%ue2yPtObucE#CGW$02Z+%@_5*zK5+M7wP`?&5) z?6hKbUtA51%NdlkXxqfP6xB@x#8w|S*ymk#$OjXeaKly!8;fauW;$cy_hY+NG)uVs zZv**W1N?P5<%9iJ+t2bzrm|1=mzg7WXN#>)*7C`NmSz1LtxD`{4Xu^brW^ZK)D^oU zW>2c+=dvAjmnPQe(6>%oJBMGAHpE_kFSxX{wyd0)(mJB$tn2eSl*Lj-Y^LPnjo#xn+4ZOMa934{LI?fJ6dGNC%4hqITed7w-*=w~2m zPu6`$C0vxldTA#%rTDN zC0;?^k|2r9V%_Ecc_ zIv4W3;TlKmt3M;pmwt%sck2k|^ z4a7cc$G=+K-Q8W=-rl%dMni)cKGX8==mra}n;u?GTC0k0N2z z#@z04fcS)HJ`))F@REnB-vBJe0|q`B@UDi0Bb_*FJ8P{}M? z_Xj1Y&0oqSgs|bsEd?@@sV?03SyAsF_=2>h-JH3bqJ6Y@1>Gdp)#IV+PGCmNc!5!L zJe`5~obD#uJ6uD!1X)hP!Mpzmrxg6*LZS4f>lPrCM=z1KyF}2sgNrAGU%MFE4EH4z zN_t#GYxi(tab5qO;a$>;#uKMBDKyOpqa3VF?|+|lqVNxNX?DS#8Rg;rrdw(xHmFE@ zHDA(H?G$-}G7)A3QEJMn&3ixJ$Q@w@Wy>*__*g0ta0~X zoQ6jbx^5{>9+48OH~-AV=^o?&PY~5n5%FkT-z^~fTV^Hxjw;b28hNUOpw%n6EhG@K zMz>@#>}W;Fk!t9HRC0*=E2L;)y*U#HXFdn=dXid+mrx<9BUBeT*gC1m6K)_iN{osK zzQmboB5Ao%5e%f&khJco$O5EQkhIgNh(UG@XiwFm2 zx!5`-$V<%;4tOFUJfGrXG;p9`HYy^E!%1@?*;CN!eB9$sNVm!;%4D$9x}8M0Nx8t* zd5*l)7AZKsG8e7>!Fr8l_FX5}%?< zd>sS*@i82&KE*w5hj4Qr`Xi9m?Wnu$xWFSLt=j{H6h92rd0MxluAhbU1=`gyj+briI_)agQGPzQwcE6AW(aHRFtnG*OBoQ>c%Km};zEp&;ZYE%H#gBx zta^>4mFmrhG!?5NkhEdFIS!Z02VAzZtT2S#P^?0RMBzof`Ia#GHZ4*mh88;_NxsdE zROuN(SeEA=uS3Yb;bp6f4)Hsxey-m984l<55?VMNgr;((b>l^{%RM7}y`wn9jZ}Yn zh2b9e_;KWO3Vgny7=_G_=}0uyFYfUZ$mfG+1l%pf6r|^$sLF&H(XJxB*Dk!m;EDED zj74(Cqp5Z)BsiNO%O4GyutAe}oF%YQ#>ZCoPwpKe!mcw1Tip#|)uXg9CtIECAfE+N z`{o(!$g3#2JZN4`)-!^OZXCG*GHo2|%}W%l|E5DBeOvNxIvLXHC;z5lhL+sx{F`op zw0`>E^aP|$+>g?z6$Ymc?ISK90VO#NZiFi8_Nd6f$JLO_(2$y&GGV?C5|0>;r^AkT zp5r~ra=zaD;@i(hSrk-c!W8-c@_sXdpxrulRCkd0sL0v%M@N-&+>i_^Nbd5(y}vLEq|JL(x^*}tWr|H}+4 z`?u%g{_+mX{_O?6qaH(+y>?vPUsAB_Keh({Z4)f}E*rfgvaswK_+KX+k%eVX1$vZ6 zWMSEdedeJ>Iv!c}Z#_Hy7KKi=skwlR<##_h<|YM)^AHOtP-c*XqiJ(DIj6?LJSbW& zwOg3S(j1vFC=_!o!(|52KLkKef*vH&c{WdzOoyQnhrXk5wA_2__p@C@L8~6-1L_Qx z1XXlXIpQ=cD5M}Blwf#J9CeSsAXx(cf0k8sE__%NNf|fdjS1#0f1xcVA^eo9iCK^Yw!vQ2OPgoyiB7sc51Ls@tT;z;t9?+N? zXF}eJiXxfK8{lc#)cWs_IQVgeO3&h`%tfS3-cel-5%!(E9FM5C0QDffa;_t`ih!*e z-@&*e^04etzdtOt9#J_5RF-dB{;l|5sJKG2;i$4%KB#Qzp4?IS#t10?Lgw{Rd6@X9 z--0QBxqOI}rw~8lMVk+JnKqT}LvwSI-dBO{zOH9@#7rT=OiFjcQTYc@KFv`4sL9$i zND-a=zeN<0B3*ZmiipF|1q~G(kwi9BRGNUG71HA6%SaLB@}nX$M^rj|j*4g@RM`H! zM5xqoAw|4pk9r^Waw=3ocJ)!iDCi(p*!r6zUSQgy+CNw2Bb|H^nILcegdOE-M96Pc z@EtY(7*dyra}&~I$nk<|kM=s^#YF&kvB4)El`p{}RBjxxB8jjQY5>k4pn~UU^cx4X{0qNe9We5kXC>+8!z+l z4Mnn_2-y^F=3!+>-$K4~F%N5ibMFb=EogpD=(h0n*UXPLi=RjmVuQ4DL3r%_Htn|5do*GH=Ya7{+_THV^Sfa?2%n| z@&sYZv{L~5%)p7g4otlp@TKZiGj)$F77)JkoS-mlG1gx~!^kmCxG*nPU3ksyWB1PdmW;`ITkt(;w&UqONeS>P1~yBR_2xgVnIv0{UfBz|l3(B? zHIBy2@o|!>Bb)f2>*fdVDF69=+G0!>Sm9b>poY*|#=Zxgk7JTf-XtkdlW=+wLUl#I zl!-cCShiC4(Wym|p$Dim+dS77a4CmF$M{EGQv?Kai^2Tj;UpEdb`zhU{6X>v<1lbN z|4XnW9rRU+=6Yu(>SMCQz)5mOwBQQE2KY!j!^6CJQ;}>4jy%U1YgSSY=1mwQ6XFje z+%0}GeJz=Far~Bfs0>ucRSl@sj$KisV;IvPCfr73zUsWY4u*2Mb1T_jnt{`xB_+!+y zeoa}rb@Sc^#O6&GF!W(?}8r4#GpI3gBoRKJUdU>?W#^Yhk{~ z!AS~RWjY)WE`P0rxP61TYwLK*eIT;Nn`}jiXLsR zJKW5j6y%H_;V4G2Mq_MY`gXyF5Hv)@7r>sx1`A(7&%(SPCTkl~J$s%=b__1~BvL&z zO-_YaqjP{RBdzG&lN{ch^P&EIwl6*uHG@t5u`?u5EJWR+u*SJto8c;|`i z19z*4s$o$B{!im&`?M=7B_f0>rS%s3-wYuFa)Zr!Jn**x6poUA_fFpI~2$k!{s z^-qVit=>e$(~2zhvcWn&t>m_WV|KjEYgTw%uwewDy5;HcDBlve`me}Si5M$4`*uaF z%~W-K_R?b>rsNkz<)z(LG<4GCo))#OUlzth%K5QXh1-mlMR^-{7q5Ho{d>~$B?dOz zMRPX~BL>}Yi`wQkEBo}^jz+P-KjEyz;VYTp5k)-|&QSrWFAcUIE)E)6W&eC*OrYaP zb)W(zvf#vLgi_iL8=tw|{mJ^S&s9qUzv@0;PsP#Mw1o_rX)QWVq>+fnV9YcXt-cD; z^X=_z>;0iMmKowZJh@`ku(#co(1JPdVbWb^T$@WS^dxUJsfFIsiRlmQ^UO*x>OxaP zV|NzHK3P?rzwf9fJGkSUH7J%akALtfE>Zxr9tuR52rlCq1N5_o@8F9aj>b~V}Hi>}z-F6(|p%|iJNe-k+y84(}9 z+-Waod-*3--}v}aZKqe+B&;2m&R0uFG;u#S?llM+9>QS=tVstbzDhePDV<5Q0z-ac zN$od>^@$;OncmoJwn(O8Ph&po2R*NSD#XMc2lP5wVF4|Ybxj0njg#&oq3#? zu>I4S_je0Z3w}HOKOK>Dg3;UqbE48v;cx<*c?+(fyRD)sKY!kX5de{#lqf<#njg&?Io& zpdxQZznPCK!E_ci?5J1ey+a-ubn8#J^uZT_>6ChL>ANGS7L)d zdyU^3A79&2>*>^wd$7Lfg`+0Yg(t5TUkof+b)i3xEq(Zl5VOuF5uwW_STQ%9+`CN^ zMY?zG_t$bQw&58?!cYu`&@mP_?@+$86(4ZWxgEMVno>BO9F(Zc@omdyLfLughtbIt zUlyn6wL7EV_j-DLnDPBd8T8H=L(9d%k)5?BVG|jb8ZXr^P2&0xh=y%5#es^)qxxR& z*+?<^i8pHO35`!zv@QB(ZKz&Q{~Y4%&~BY}dKX@PZ)ba_w~?<(S0%1Av7x(~)$kGp z86S;{;#?oPEbdw*{fokK(c=Cu*4bfySX)QR?I-BgFAqI@a{16EX=bQjx30&;VzGAw z`-#mt-ZD9M&7v`m{auL76~pEJ5kcd9|lI{SN8S+RkG;AYjvm$_@4Q$%lu zDSMh$?+&w}ZPaQu7~<*noG;0#I4D>Z`ytqJ0`+12C1#55NqD$hZ|>o)P}$3`2EW}p zcG2z>?Dqc_2f%n5ny{fP+-9})+55J>N-pjC zAUh(IyO5hr#NZ3>v@m66s1w}%n$$dsV?u~fDdr~Xln-c zT??Gne^lOf?K#{2(oD{S%%>qTB2WB^ZOGJ4=1+@ZT0^$g_&;^_j|{Q|RIl5gg1ZUD zj4<;WZL)u1#}h?O`MG#=S}c9eh-!uXyTOtxD#H&0N0%pGl`e68y-JUI)ov+aLd7~< zjBTr_hVXgU5i;fMz-@9Oy`Vy^v zf2-#)Nwy1p?T4awC-7c0y{Q5IL)(8w2G{>Fde@ywxvbc!H+azZ<3m;Ir5~0OY@YZr zX*QnPj5{+#pC$T*O+lRLCi(R*Gz_S!3U{0nJ+RvmDD*i*K z47)~KA5fOsim7hP1P+g-@ z22lxd6_OY|H4XPDvTi${4>?W)lD?-_OF>^ZhYxutucc~P(;RfufVI`aKi-WQdta);%fjl5@#fVTm>6BxC zD}5k*@5_~Q!jJQru~NKZY~`B_K_ypsCIgl8OR5Y6*oB^X2_*Y+ zZ?JFlB~?(cdt7#%6 zG~WvXWz@^8<;SJO{N7YMwj_8bg*pX@7qreA(L6u6zU=C9Z?eqjDVnOrB$U)n;DsB8 z+rlQJpS@RC|n-rwdxKZmNRw`nZisF_aRWOwbmPR`hWfmuH1t$`vB zMu|X%djU_Z|nJp)>HhW41?)IuGR@Ju!%8kK_eE*=61ibF<9-Oi;znB(*bG8*i zU5LuAKIi{vwW6lP?ZMB+hXbeWKYCBh;$D}b+Zk-8GEKMc1jw?+U=kl03sE1b{GBR#&wi3z3L~|- z{|^!0H9yr>ogsYU4TH}Dh+hX+y#A1*tbs>BFC2C~kCN|St;n|{O%FF0 zq~E@N706Af8`{i@?R5B`F2Kt;Oz8kCXA#zHO?R{su9L+vIH|Fp4V_+jvRV>kii@&j zL9_$m%_chhReMe&i<#=q(U{VP+`xmBwr6r&U-&X7U2qZu+?ne74klIyJta*M{gX?Y zfjOQkMsQ(P*0&aBk0IA?r#sjO*DbS?Xxc({0y?S*fu`fDRSG-o20c$teoW?NEu#OrkTaH`sEj6@^Pyr-F}7R^>>=A-^3rVB%8Bj zTi;#%Y|R_Gkn$zuYG1tWm+wvWb9K6DvUEP-vZa#lU&=K>T{h@Pzwsx5mwM@D;?{Ma z_8O!eHYNTMtd7MHMBA}bYuUwJ=WQOhbUof0&S@bW3=jU2 zjKN5^_vCfQ)cIGt^Z@Y^4FfHPk6GcNJ;r-0h zSS^Xke`w@7FTOEOZm~ZvKxLZCAS|Nd2ynw z2j5JSh#zmt?e89*y2#v_@z+Wy(XOy&pmvxFaG?5O!8K=&uUg#QAZ_D z>L}sl)v>khrS*isqW0o2TDJ>2p)cJIMn`ka8f^Gkh7@oIy z_quS2$xcq*a;)`gnNY9V{i@@=!IoJkR~V4zuJ6@XxY9&EFzLqZaiGRk ztU-@atu0Pff~*z9X=%4Z{N3s*7x(kRr3S41(B0k$!suW1UCi0D}8*}L)U#8*|3Fc?^UpspWI<3(?;8Q2E0At zaIG__7>60QDw_``r9Ld#8tV^0p)4W98hJt-4r5B>YXxVS;4>$|vW1z5R}g_oc~xC8 z!#PhxI~wYCjKR$SSaOz^O{r2YRm;ctdCRdHH5K9?WeKuIIuK&plc4WeFw!h10I^D{y_nI27z8Lg;CG$yzoelqCM){xMO7QVy;Q*8g&^|_7kfck3- zjrSx3jL1E7zq2gU$MCU}UCS$IEMR|av(p&$d1GzLRLd*4?5LaIOjRgE`iNen>5Vn~ zXo2BC(~~E_Q&C}}evH>&j$JG(xFA*j(2cQ?i9BJ|!aOYi`my3=h&o_}?Z-iJzDCX9 z(+g2etFh_M>lqZCOSdVxuZsz@I4TmW?~J6+Dsjg^XD{(^x)-mF;m0O*Dh#?}1HA5X zFeOe23)!hv5LH95pJ`sIBaSk%V66Owl5~=D_%hF#ht=?=6BfP+^$zr#8`H47^L{Xe zT`nyAFlU;dHFzsD!jXn$tvPe)`Zr+S46ZLh9lzX1UZc_K5^X1D7k5 zF3)~(u_@JG2qp1llt%S@Yg@3Ov}e7km|!tA2uFYrSo_`e3`< z;zbP@$Qn&#xQ;z(q-73T13Yf_bo#4=-4fmTmr7T^udZs4J`HI;$DgDlD3j`HMNu|0 zNAGr}=?vC8*R^DI#^lu*jYO%KgwO|QPVxk?MvJj0nz#!=b@k@x^vZ~0KgHOFksGw4 zgtJ8aRBtYuzg5n3uctubY5!w{#`nLBD^hYC4@QUTJRbdOHZPp}xESr;{v%vyji_AC z+Eb0TzdK*cYON{b)J!NdGUA&u;t#Bns%y&^qMuSF&aeMYnr-VD)#?c6gkAUBVCl`z z+k52}UGMgWL^~d@c}_`JO^=+Rs%WEZdol5|e4#?jOiOfg@$0wfq^A=5-Ybts2a}w_ zv)jtOCZ9e6IB$o~<=9C-9*ex!ZTOj+_ZxmbWN@pc4>$0<2t;mudm>cXJt`1s zd~!Crr_N4ThADB7y86GD0qo; z9~_!H#ToO~MS_~pvpSrutvN0YhhiXB)hE*5L^SiY{Q}b~YSV^+@=8{H7EE(kkCnhp z0r@^h*m~A(pVUoy==;?r-g%9_{NXDWcKfb&uDD=H98T5OFL{#(SF=APJ*U{6(I2phX(jwyo& z#ryIq`B&DPLf5%`{&DJ^v~}*0>ArHPG@NRce7U8Ywn>-1C|1wVd$RrN%iS8;ANGZ> z#~tMHdEEo`cZ{256ERZ;7hF@P@=1wn-=C+>*0Hv7?_DPZU=5Ot^;l!~LO=OlkAEQI z10ngC5|dXg9%u0?u3l5$zdzU0^KUDIl9j~(T4(ZK!;Z`?76CB_!7s1ZEN`@M#ZGN5 zDikU?+?q4SMqeLTm^_~_2cX11QubcSQ~@uxyS?}tZ1$xv5-Wlz?Z}Ww2fuI|bqWHX z*VLtQ+m!qqpv?!e>R*jZ5_AM_jnXMr#p`sU*E|r60xke=CrHQ-7%Q9#5U~U-rB?90^6IeqScqK8x7g|`1?pE z^^_Y}ZEa3=w>&nZ34eX{TJ9BVf><#;?O1-6H&0UEFW+2G+DSh_``Teq7%6UGw_CDrjo;%wiXJt#}e!TqDuyhnly0>QSqs-|IRSGp?2- zI0wQu-G6-5)--B;p++QC_h&5T?KxBOZE>K2@>;ITVZ4n^-)OZk`NdyW>)zTtyIr2B zaZ}pUf!1v@ie3MUuX`OSVQ+OP4-9}N{J(T{OZSz?M||p9b_*C1SCUzL?faw@hi>NL z9*=6k8`EOVFb&^NDkXe?wXZ}?PDe$|XMjvB ztK4Eb-af&gB|nmL6I37&JmZ}mx<{l&gL_<#h_A5+Q-=?#-xUbpuRhsO}eCs;F%zldt4X305F3pc@bhH-^cLi zF<2r0zTK0Tli)y=1UAP$c|N=ntomp8`_`1Vic>OrPY51U z;)ip4v6FCUy*aopJXvHdGY8$xV6ma>J7!5aEBy`lq{{ye<%Ir7WS3Y{%rFc3e}J4#Xnvi^|*8*D4KhmiHMhC zxp1m-WcXH_fL@?g%3@PYPu|M=&#t$6Sa#$6N<3DR&pKNP&cUgQBA3Oo#g*##x%bq+ zBMlAYbF?yN0&vRzO4#1nK8EMEu6{Y+yQ;BGmx37kwqWu&Vr#`y%QN_#pz3 z+1F3-XG38%)ujEK%zUS7UuB%`ZAtid(0>M(#F3Glaa~Rl)VYNjtPz^+zmf{+Ip~edErr)m1d9Ah1oiayj**^$G+IEBovlaXLJkwPB#JFgzr*Jaroc z++75@Pd2H$zEvx4TE|vcl?qR`4t}59Cf{z(>lXTNga?MHoO@gpQ1<3Th}txJ*h7(PiIOq@x(X3Z6fnOxesgTy^>ne@7C|#wHyJNT8Lr#wrB@DE=xoRpw6^YaWW7dLTl}t7FU7lr+Ru)lnX(nI zX#FRiT7@V%12}#Z#&6MQ<)^Ow1IwzEYVU)u)&U7$#S)|f_1frir)a(n^;b5V`!?j< z`qj^@lRMjRCds@bn$Yif|4_|cc2~;7CHC@<+Wz!tNrDW`eddg3x2_DS_ny4Wuezkd z@RN4mfY~3To02bVHs@PSc`ZL*FPXgDLN`V8D_;}${PmQYf!;gDlGC93;O#D5J-n1% z?&q~1QRKvAoK&3GcXKq0ElhfHwZ??U?3(Z3HLSvGYZ@1<9JkI)@(XqH-_8?h{*if)tx;gIM-FV|EfWf`9 zmzkA6oE&f}s}ax*EbG|Kd1RXP^P92X+=$W_jZ$B1Cc9Z`V4MQO>ZDu9QdH($pMHfM zncXkyG8dO$aQ8o%OKCw_-k$0qd!Ej-mc?X!c}lOUr>COU-nXPF+qxFial=L^+p_8i zbVHMKa^o*5IGJ-3a!_k0Z)JU!Unpd^C*i?5qF0|Vo?&`$ku7{%BJje&*D7tZclx7j zo4T#lZ4u1a@SprQ5`{{i1={i3O6usZ$%k$$8-aWO;(!a^)y+rrbwgEmHB9c zmKQI(>yv)^JT_Au>8SGS%dA|yM%Q?sZM}7kB87)DwZm0n=#~3xk4d+~j2$|z>uVHZ zFOqHlUvuXf)x_5B@c|SL{U}GOf(=mV-B1G7BT|(PQbQ4tELC7-~ggp0#KB-1h z+gnD6VMY!D=;Ibce3E=q+3aTavrPq8b$!lNtYs*AhRj{nGY|FwWZDI&6Gr1QP$*c6a5@-A+Mhp*Qb%rrZik=>2tcxR#V8u}!}hpx}SI7U-htgRfJ+ftZ$6~ zbsFT2TEh+o|0;Q<9E^5ibx~pi6V*UP*0+l(-pM-d9vqUuQ(jeVL6`v`W-;_MOGx`6 zH%shIlI2{5!W$tu@i>n{s z>Q3?Bsfb%aDYt8wjVt%C0UPY>X0OZg2#mXzx`vbbGrZS03Cx8y1U*U~@>mtvVId9e zN7{Gr-rhG9XK@)Y>xpfd7yK1_=Wf>x={R#o{`qOm-Mn>d-sOzV$CF76-Hj)=AEhXE z$v2_~w%qtO%QqFIff78u{45q?B|{BDI4t5*eDgjxor&Hn3285 z;=eQlY&KSU1PA~D`lK zGMjab_f(OkEq$;!&l^!)_^YNKu^J`Lbs)2ztaRxSk`-Am99?Hdu-306zSyPq7z0as@UmoqT5^e}K2XeV^4r7gypE#Ya z8CcoZ65Z?(k>K{}6TRQ|MC)`D%8WP*lW?UNe4flnOb9^lTy}$bT zO$#JyM}fnfs^p>uFgO(|V3&M$4iq?aB!JYFpbWYE!BWVmJd9<90w?hc5_e_)`>OnOF zsDtRjWF)I;s8*sQ%S(y&aJ zR0}YYOzkZd`I`>M`#&T(D-Ou#MRlQknmX}8N~6bv$F?NGg?S>n68-z(Z;Q1<`jOQByAq@M zGzt}2+wG$KW21UfIIx8$(Xe3!#VotQoB7}XT%h{#O%;&+=W1c12W(2RUx|+0wCZtMd_cCEYAKr6tzvissH4o(Q-nDI6OV zO7gxdTb<I?+;#q-XI#v#DySq^>mgTEWvf+vbnCSxSf%`x!yb~YrhPg~D4)rHZjEYuZtbk<5_{NJ zUGbI|V4(ufQ}3a$k249E*+gW6Z1xYgzJI)x-zY??yDy<#GXz@ofqEhj2fd(j&ZFDk zO=3HXv&a2@&d$_Q&j+;zSw$@l?#f8x^>}4Fd<6{Zk4yfuO66GQ?Sd_~a%69uvjnC4 zw%ZHhLB`WQ%~Ou$^BK?7$OZ|q?vM(cp`+WXb#m07W1Y}A$WMhVilvxw&jeisfaaKkAekd5a^l}?fUyDf z^Y*LCCezyWXlmfC$dmw1JN-(&g^6ReZfNM9TyhNQR(9HrW^eCQb+#~(RqUF5|I*~x zv@u%P#56>$V0_4R^|994kp6K^XtKl;beEc}gGu0A?0X?o({)yJu)W^vrgm{}O&@)rH7>nfTH=wOaz3leKJ^^Gol#p;*eD0byhk`khbiSm#I*%i zB7R^REr+Yn3Sk-Pzd5DTa@nJr#)nk=oX`6rDD&|#y2rhXI@ZSx7 zcH)*m{E9|IOvkkvjqc^a#sV!%7rBZzIs2|egr3M1EMMqi?xfkwa$Q|p=#I02 zmgkZ2#JItn_HYi)`6|^_ofX6DSVkhiOkT8#$xr}%B`0m-b5=^L7CeR1fnX@~v;}V?!b7ul4d7-^ z+h7j+H`lAT2rmFzAWa9|F*=&@?e-0W62YXP0Y(|Ou>R7k=#M}Y_^Se!MQBx63Kf8X zz5!Xj8}ZQ}n|FUz?LSh~o}V}G-#NxWvJixVg{QkyXs7L=WWeJ=F*e*>Qa77`p{&T^ z)#QD9>uXPHgx>arlbP}OUTqpVYxl8vkG9#}d|&28LZZ-DM(l$Fab-8aGdsXFU!d38 zy+vO?$-_Zpuu(mzrU=voUpSXWDp6jAuA2Eb-$n6s$yLaSi68hHMA1FVZe4B%Or^8f z7&B{+t;;(;jmHU6%BdYl!4l64Gs7^faW*PyE+Z7{;MAMBK*Zw2o`u=;nGFiZlk{z) z$fE;AWt)Av7J=3vG&6T@bhv82bGMzIEjuIuuD}&MG|R**K;1iLRqRds<<5NKo@8mxo{2D{yl+%GK(`N(xbBD5Tng{SP^}hxg;-3gLkY6CvD1L zURb3n?ZhSS=##8B#!Q3{%f68te^kT_P7QV4`BJ=CooPhDs*(lO=SnJ{iJ3xBf$-`F zbe^CX2s=mK8}Iml+luhUCVe|~DVX(T3`mQE^epJXe{uEj1t1X6`M)~Wi|=ndrUO!t zO)@%NIjClQL5Ms|eFPV}ez;439=l*Ih{**i#4mM1_6u zRH_R|Zv2mb{{8>|KaVPPKpcTaHjf=o6@i~S{2?v-b1HcfIj}}+mkLTXTjX56&ydOk zYNvxuuHhN+?sHZidD2lj_v>-6Qos{5q_l@dj$0nP%m6`*w5m6+>6+K1tR7#l?Ck6W z2`v_wr-gDh$Yj7-ufR*d(d`_mK_G}0bkpx6q41H6I51;};4mZvsAL*QsE>DCp6?1K z9w8o&%(e3%nTF*${j@%C+#ELTi60Z6XKNq1SVJ4^70?b*1;>`-A1K{U?@;&DyC2=O zmbaP3UG=WSRO8}~n+K`gnQx$?LE!icy?5$4;e8+s3MKJhPL?=RRs3=o+^F&SGv-)^ z*Bb}h#9u^%$=g&6V8WVsy&CZXzx@h@E5te4q{!dYkqmn88LR*rOr^Ie|67Lty*r7r z9=4KdpJagMULJncbRAr#0xq75n*F^beM-jq1!V0$U!nmzD6o<3^@(FXa9S4XXXFz= zPi)lr%c&}|`Sede@$U6fO^qgKA!Y5!B$5sirTpVkEf6=@nZe|KHJ{f)cGv^z^}Yn0<3#TIG9`Y z&AR617>ju)W&z|k3IZP24@oN4Du6oC(8DofGpAYXjqaPPBySm$I_?Q^Pz+6_KJANK zZrsPgewQz9f)oes|_4o>hnu;!nnHt71&=fHU8m*`okAlp7Pad~53!^~$+ zuymKR9LD}mthG0Z8T4u+;Iu9eJRdxYjgEX3{Zto(LWlHerSi0PzD&{L!X=!|v$aIa>4K*ANPZt(K!>a1}U{ zsc-fPuP>;nsW?X>NgWauJ%a)R(`yJ(s&gY>5wzmq@zxp>q?Fd~tc2sBj}??nAE7LOcX_BN;LXQn zu1JLgZmKq+^m)hV;2w$mzP}CYZL#)EUa)_5sJ%)0=Jo0r6m<_=H}7EJbOJ9U+U_ok z%{;4FB!e~rJPJGw@Vc(p%t~7-WDMQHZgP~SiM_V7m5G6?={+k)Wy(rT3A`aZ@3SkF zrwfLaOO-lr#0rGJcHvvv82(@uJ1Vim)(AWtZZM!&~Alw`}#j{Wg@qVO~rzgcv%Ri`Z1z=Eq>4KKx6@>h9eByMnjz^*d`&S>S z?3Ln7%Qdr$e!STt_ksx3q1p3%A?>_5#h#N`SCy?iyds^XTqjonEzteZR2r8$dr&+g zl@A>Y2l70p-E719%l)N)x$n-B+8kMgDlaB}_=Tua=v<0q<~yuLIUBGNm&sG`pUBDu zEvedvzj$jfB~k0Ifw}sofVltmZ`?gEiqHFV>;WVpCj6J8!4Ng+glJOOWZrvfly}9O z0pTP>aMEDk-gsIaBM7wWfIzFq#J=|>y?_9ys1BfmV#S7UuasN#4n^Hq-ncV)X)A5O zcuE9+Z+`SrpL1zLPkOO$oxhSyR}qdrhs@Wouao0?DsvOxQh1IFP8?6Hu)OasuG2}< z?D4fV`kbZSp;1^G6;6t}?xJ~Pny*ja0%`F)2qAPmyK10n{h_i9m5+NfHfC|gtcLU# zH80#IJ^3m=wsjtqlE(8yw3Tn<+z!)Oyn5G{+XdtokGU-7#;KQoo&`9UT~}svy>Iff z0JY;qjaALi&A)hcOohLBx(6(pZt|1^C|QA>nPv838Y&?1dZUlae7@d0QLJkPv2Z{b z?Rfv&$NhBPF>irK@`DfO<}k}|Jj+s1;9XYWC0FSCQ1kLhu}0n{}!!^c`Kq0|sTWD^AbMx)={g z3EPgWlq!d}x$8B>$VW(7L`GSx%!6V4X-gq8O-ujIdY{y}!tHwh#}lo?QC5x@?JP7R zO|qd({m1XUH>?OxOi2FTBw{6sVb?fWM|^C%FKu8H;b7P))n&EV!LcSPtJ)uvi8!Gs zs}OXSGeYiBMBK$qax3NYY-WQSBdf*KjmVvIUG-`%T!^CJn5+M(ZGITnjP~N1!ld< z{`{}Q>*S^=p745$B$o2Y(D@DxmPFRzi0rl|#1EbOXZRrF`LCLgaCCRdyD~RQ-_|!{ z<|yi}!5+&nN6+-Iw@RNKM9%U2@nn;mTQo)LF%vv%jtRNF^j^;Hup*1C#+gT|4cDg+ z2lqQCIEgN4)6cVN(*r?Ct!!-8Jd5zOuHJ#})OvvvEAYDD-&z3o02q2(1Tc7y43%1T zP-SVI7IY=_NUD$90JM|X0^5+6k z4yY2Se4(vrew`QQ1-R|JM>3VL(C1IfDX4bXCyx#Kj#-Z`KAmA-ApyiwD_|%Cr|d*a8wI5BCm9tah|Vj+bdGpNb~a4}oudEFTcK zT$^c>2z6c*z+}$?lVJ20<)_S{3uky|rtB?6Q>4 zJty*dZC?H+N%{w@Hfxr^6V-mFX8YIL8BDVK_K*N;IM@(S0Gux)uWaV$PEX8?J;i;D zHWLe+2Njne7izIG4+iiR#l0rM4q!B?L@;1@Txfepj#h28kT>X^vvmc%E9Gd-ZN-G! zQVxNx9fRoX=WLrNz`6#vVsKy*=4E8ZyH3|{<_AJ{q?=foS5mclpxh=@*=W41-q3by zk7=5i^YBff@1E|bSXL9-voMcTdCT^7azy8XfOoEWRTdsDTtEx`bhEVj3rRKB1SW$# zU|#GPt4Gitbw_R7CI>Oi9hf^`;I1RQaW;UX46}d;3}$@V0A+(Zt6=cJLk0p%YM#ZQ z1LOm1W+6~=pIHocbN}Te9i0OE6~OMVU(cTbmF)nm1(R75=egkJ7JLDFEx)bU|E%W! eUfC@p7lp8OYVL>Jls literal 0 HcmV?d00001 diff --git a/docs/_static/decision_tree_minimal.tex b/docs/_static/decision_tree_minimal.tex new file mode 100644 index 000000000..f8b922e68 --- /dev/null +++ b/docs/_static/decision_tree_minimal.tex @@ -0,0 +1,106 @@ +\documentclass[border=2pt]{standalone} +\usepackage[utf8]{inputenc} % Required for inserting images +\usepackage{tikz} +\usepackage{helvet} +\usetikzlibrary{shapes.geometric, arrows} +\pagecolor{white} + +%-------------------------defining colorblind friendly colors +% Using pale color scheme in Figure 6 +% by Paul Tol https://personal.sron.nl/~pault/ +\definecolor{cbblue}{HTML}{BBCCEE} +\definecolor{cbcyan}{HTML}{CCEEFF} +\definecolor{cbgreen}{HTML}{CCDDAA} +\definecolor{cbyellow}{HTML}{EEEEBB} +\definecolor{cbred}{HTML}{FFCCCC} +\definecolor{cbgrey}{HTML}{DDDDDD} + +% -------------------------defining nodes +\tikzstyle{input} = [trapezium, trapezium left angle =80, trapezium right angle = 100, +minimum width= 3cm, minimum height=0.5cm, text centered, draw=black, fill=cbblue] +\tikzstyle{process} = [rectangle, minimum width = 3cm, minimum height = 1cm, +text centered, , text width=4cm,draw=black, fill=cbgrey] +\tikzstyle{decision} = [diamond, minimum width = 3cm, minimum height = 1cm, +text centered, , text width=3cm, draw=black, fill=cbcyan] +\tikzstyle{changeclass} = [rectangle, rounded corners, minimum width=3cm, minimum height=1cm, +text centered, draw = black, fill=cbyellow] +\tikzstyle{reject} = [trapezium, trapezium left angle =80, trapezium right angle = 100, +minimum width= 1cm, minimum height=0.5cm, text centered, draw=black, fill=cbred] +\tikzstyle{accept} = [trapezium, trapezium left angle =80, trapezium right angle = 100, +minimum width= 1cm, minimum height=0.5cm, text centered, draw=black, fill=cbgreen] + +% -------------------------defining connectors +\tikzstyle{arrow} = [thick,->, >=stealth] +\tikzstyle{line} = [thick,-,>=stealth] +\begin{document} + +% ------------------------- tikz image (flow chart) +\begin{tikzpicture}[node distance = 2cm] + +% ------------------------- nodes ------------------------- +% ----- node: 0 +\node(0)[input,label={90:\textbf{Minimal Decision Tree (Tedana implementation)}}, label={180:$node\ 0$}]{Set all components to unclassified}; +% ----- node: 1 +\node(1)[decision, below of=0,label={180:$node\ 1$}, yshift=-1.5cm]{$\rho$ $>$ $\kappa$}; +\node(rej1)[reject, right of=1, xshift=3cm, align=center]{Unlikely BOLD\\$\rightarrow$ Reject}; +% ----- node: 2 +\node(2)[decision, below of=1,label={180:$node\ 2$}, label={[align=center] 315: voxel counts for signif fit\\of multi-echo data\\to $T_2$ or $S_0$ decay models}, yshift=-4.0cm]{$n \, FS_0 \, > \, n \, FT_2$ \& $n \,FT_2$ $>$ 0}; +\node(rej2)[reject, right of=2, xshift=3cm, align=center]{Unlikely BOLD\\$\rightarrow$ Reject}; +% ----- node: 3 +\node(3)[process, below of=2, label={180:$node\ 3$}, label={[align=center] 315: varex: variance explained\\by each component}, yshift=-2.0cm]{Calculate median(varex) across all components}; +% ----- node: 4 +\node(4)[decision, below of=3,label={180:$node\ 4$},label={[align=center] 315:DICE overlap between $T_2$ or $S_0$\\decay models and ICA component\\peak clusters}, yshift=-1.5cm]{dice $FS_0$ $>$ dice $FT_2$ \& varex $>$ median(varex) +}; +\node(rej4)[reject, right of=4, xshift=3cm, align=center]{Unlikely BOLD\\$\rightarrow$ Reject}; +% ----- node: 5 +\node(5)[decision, below of=4,label={180:$node\ 5$}, label={[align=center] 315: $t-statistic$ of $FT_2$ values\\in component peak clusters vs\\peak voxels outside of clusters}, yshift=-4.0cm]{ $0 \, >$ signal-noise \& varex $>$ median(varex)}; +\node(rej5)[reject, right of=5, xshift=3cm, align=center]{Unlikely BOLD\\$\rightarrow$ Reject}; +% ----- node: 6 +\node(6)[process, below of=5, label={180:$node\ 6$}, label={0: Uses all components}, yshift=-2.0cm]{Calculate $\kappa$ elbow}; +% ----- node: 7 +\node(7)[process, below of=6, label={180:$node\ 7$}, label={[align=center] 0: Uses all components and subset\\of unclassified components}]{Calculate $\rho$ elbow\\(liberal method)}; +% ----- node: 7 +\node(8)[decision, below of=7,label={180:$node\ 8$}, yshift=-1.5cm]{$\kappa \geq \kappa$ elbow}; +\node(chrej8)[changeclass, below of=8, xshift=-0.5cm, yshift=-2cm]{Provisional reject}; +\node(chacc8)[changeclass, right of=8, xshift=3cm, yshift=0cm]{Provisional accept}; +% ----- node: 8 +\node(9)[decision, below of=chacc8,label={180:$node\ 9$},label={315: Accept even if $\rho < \rho\ elbow$},yshift=-1.5cm]{$\kappa > 2\rho$ }; +\node(acc9)[accept, right of=9, xshift=3cm, align=center]{Likely BOLD\\$\rightarrow$ Accept}; +% ----- node: 9 +\node(10)[decision, below of=chrej8,label={180:$node\ 10$}, yshift=-1.5cm]{ $\rho$ $>$ $\rho$ elbow}; +\node(chrej10)[changeclass, right of=10, xshift=4cm]{Provisional reject}; +% ----- node: 10 +\node(11)[decision, below of=chrej10,label={180:$node\ 11$},label={[align=left] 335: Will accept the lowest\\variance components until\\1\% of total variance is\\accepted this way}, yshift=-2.1cm]{$if$ component variance $<0.1$};%--check in kundu +\node(acc11)[accept, right of=11, xshift=3cm, align=center]{Low variance\\$\rightarrow$ Accept}; +% ----- node: 11 +\node(12)[accept, below of=10,label={180:$node\ 12$},yshift=-3.5cm, align=center]{Likely BOLD\\Change provisional accept\\$\rightarrow$Accept}; +% ----- node: 12 +\node(13)[reject, below of=11,label={180:$node\ 13$}, yshift=-2.0cm, align=center]{Unlikely BOLD\\Change provisional reject\\$\rightarrow$Reject}; + +% ------------------------- connections ------------------------- +% draw[x](origin)--node[anchor=position]{text}(destination); +\draw[arrow](0)--(1); +\draw[arrow](1)--node[anchor=south, right=0] {no} (2); +\draw[arrow](1)--node[anchor=south] {yes} (rej1); +\draw[arrow](2)--node[anchor=south, right=0] {no} (3); +\draw[arrow](2)--node[anchor=south] {yes} (rej2); +\draw[arrow](3)--(4); +\draw[arrow](4)--node[anchor=south, right=0] {no} (5); +\draw[arrow](4)--node[anchor=south] {yes} (rej4); +\draw[arrow](5)--node[anchor=south, right=0] {no} (6); +\draw[arrow](5)--node[anchor=south] {yes} (rej5); +\draw[arrow](6)--(7); +\draw[arrow](7)--(8); +\draw[arrow](8)--node[anchor=south] {yes} (chacc8); +\draw[arrow](8)--node[anchor=south, right=0] {no} (chrej8); +\draw[arrow](chacc8)--(9); +\draw[arrow](chrej8)--(10); +\draw[arrow](9)--node[anchor=south, right=0] {no} (10); +\draw[arrow](9)--node[anchor=south] {yes} (acc9); +\draw[arrow](10)--node[anchor=south, left=0, align=right] { yes\\still\\provisional accept} (12); +\draw[arrow](10)--node[anchor=south] {yes} (chrej10); +\draw[arrow](chrej10)--(11); +\draw[arrow](11)--node[anchor=south, right=0] {no} (13); +\draw[arrow](11)--node[anchor=south] {yes} (acc11); +\end{tikzpicture} +\end{document} diff --git a/docs/approach.rst b/docs/approach.rst index 1a4f9e557..ae59ef0bd 100644 --- a/docs/approach.rst +++ b/docs/approach.rst @@ -338,14 +338,15 @@ classify ICA components as TE-dependent (BOLD signal), TE-independent (non-BOLD noise), or neither (to be ignored). These classifications are saved in **desc-tedana_metrics.tsv**. The actual decision tree is dependent on the component selection algorithm employed. -``tedana`` includes the option `kundu` (which uses hardcoded thresholds -applied to each of the metrics). +``tedana`` includes two options `kundu` and `minimal` (which uses hardcoded thresholds +applied to each of the metrics). `These decision trees are detailed here`_. Components that are classified as noise are projected out of the optimally combined data, yielding a denoised timeseries, which is saved as **desc-optcomDenoised_bold.nii.gz**. .. image:: /_static/a15_denoised_data_timeseries.png +.. _These decision trees are detailed here: included_decision_trees.html ******************************* Manual classification with RICA diff --git a/docs/building decision trees.rst b/docs/building_decision_trees.rst similarity index 98% rename from docs/building decision trees.rst rename to docs/building_decision_trees.rst index 2bf24e62a..b2ccf20de 100644 --- a/docs/building decision trees.rst +++ b/docs/building_decision_trees.rst @@ -139,6 +139,19 @@ added_component_table_metrics: meica it is possible. + +************************************** +Decision trees distributed with tedana +************************************** + +Two decision trees are distributed with ``tedana``. +`Flow charts detailing these trees are here`_. It might be useful to +look at these trees while reading how to develop a custom decision tree. + + +.. _Flow charts detailing these trees are here: included_decision_trees.html + + ******************************* Defining a custom decision tree ******************************* diff --git a/docs/classification_output_descriptions.rst b/docs/classification_output_descriptions.rst index 8a4705827..14358db00 100644 --- a/docs/classification_output_descriptions.rst +++ b/docs/classification_output_descriptions.rst @@ -9,7 +9,7 @@ contents of several of those files in more detail. `Building decision trees`_ covers the full process, and not just the descriptions of outputted files, in more detail. -.. _Building decision trees: building\ decision\ trees.html +.. _Building decision trees: building_decision_trees.html TEDPCA codes ============ diff --git a/docs/faq.rst b/docs/faq.rst index 35c2af077..8f516ee20 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -140,7 +140,10 @@ tested on (i.e. human whole-brain acquisitions). It is also possible, but a bit challenging, to add additional metrics for each component so that the selection process can include additional criteria. -.. _make their own: building\ decision\ trees.html +`Flow charts detailing both decision trees are here`_. + +.. _Flow charts detailing both decision trees are here: included_decision_trees.html +.. _make their own: building_decision_trees.html ************************************************************************************* [tedana] What different versions of this method exist? diff --git a/docs/included_decision_trees.rst b/docs/included_decision_trees.rst new file mode 100644 index 000000000..57bcb0f3f --- /dev/null +++ b/docs/included_decision_trees.rst @@ -0,0 +1,100 @@ +####################### +Included Decision Trees +####################### + +Two decision trees are currently distributed with ``tedana``. +``kundu`` is the decision tree that is based on MEICA version 2.5 +and has been included with ``tedana`` since the start of this project. +While multiple publications have used and benefits from this decision, +tree, but it includes many steps with arbitrary thresholds and, when +components seem misclassified, it's often hard to understand why. +``minimal`` is a simplified version of that decision tree with fewer +steps and arbitrary thresholds. Minimal is designed to be more stable +and comprehensible, but it has not yet be extensively validated and +parts of the tree may change in response to additional tests on a +wider range of data sets. + +Flowcharts describing the steps in both trees are below. +As documented more in `Building Decision Trees`_, the input to each tree +is a table with metrics, like :math:`\kappa` or :math:`\rho`, for each +component. Each step or node in the decision tree either calculates +new values or changes component classifications based on these metrics. +When a component classification changes to `accept` or `reject`, a +`classification_tag` is also assigned which may help understand why +a component was given a specific classification. + +Each step in the flow chart is labeled with a ``node`` number. +If ``tedana`` is run using one of these trees, those node +numbers will match the numbers in the ``ICA status table`` and the +``ICA decision tree`` that are `saved with the outputs`_. These node +numbers can be used to see when in the process a component's +classifiation changed. + +.. image:: _static/decision_tree_legend.png + :width: 300 + :alt: Legend for Decision Tree Flow Charts + +.. + Reminder on how to load svg if I can figure out how to correctly generate them + .. raw:: html + + Legend for Decision Tree Flow Charts + +.. _saved with the outputs: output_file_descriptions.html +.. _Building Decision Trees: building_decision_trees.html + +******************* +Kundu decision tree +******************* + +Nodes 1-5 reject components that are very unlikely to be BOLD. +In nodes 9-10 components where :math:`\kappa` > +:math:`\kappa` elbow and :math:`\rho` < :math:`\rho` +elbow are classified as `provisional accept`. A non-obvious aspect +of this decision tree is that no decision node below this point distinguishes +components that are `provisional accept` from components that are still +`unclassified` and nothing that does not cross the :math:`\kappa` and +:math:`\rho` elbow thresholds is inherantly rejected. The number of +`provisional accept` components is used to see if the process should +be restarted (node 11) and calculate other thresholds (nodes 12-16 & 20), +but nothing is directly accepted or rejected based on the elbow thresholds. +Several additional criteria are used to reject components (nodes 17, 21, & 22). +In older versions of ``tedana`` components were classified as `ignored`. +This meant too small/minor to lose a degree of freedom by rejecting so treat +like the `accepted` components. This was widely confusing to many users so they +are now classified as `accepted` but with classification tags `low variance` +(node 18) or `accept borderline` (nodes 24 & 25). + +.. image:: _static/decision_tree_kundu.png + :width: 400 + :alt: Kundu Decision Tree Flow Chart + +`LaTeX file to generate the kundu decision tree flow chart`_ + +.. _LaTeX file to generate the kundu decision tree flow chart: _static/decision_tree_kundu.tex + +********************* +Minimal decision tree +********************* + +The minimal tree starts similarly to the kundu tree by rejecting components +that are very unlikely to be BOLD (nodes 1-5). Then all components where +:math:`\kappa` > :math:`\kappa` elbow and :math:`\rho` < :math:`\rho` elbow +are `provisional accept` and otherwise are `provisional reject` (nodes 8 & 10). +The only expection to this is if :math:`\kappa` > :math:`\kappa` elbow and +:math:`\kappa` > 2* :math:`\rho` than it is `provisional accept` regardless of the +:math:`\rho` elbow under the assumption that there is enough T2* weighted signal +the component should not be rejected even if it also contains noise (node 9). +If `provisional reject` components have very low variance they are accepted rather +than losing degrees of freedom, but no more than 1% of the total variance can be +accepted this way (node 11). After that point, everything that is +`provisional accept` is accepted (node 12) and everything that is `provisional reject` +is rejected (node 13) + +.. image:: _static/decision_tree_minimal.png + :width: 400 + :alt: Minimal Decision Tree Flow Chart + +`LaTeX file to generate the minimal decision tree flow chart`_ + +.. _LaTeX file to generate the minimal decision tree flow chart: _static/decision_tree_minimal.tex \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index 670136bcd..f7b1f19f9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -177,7 +177,7 @@ tedana is licensed under GNU Lesser General Public License version 2.1. approach outputs faq - building decision trees + building_decision_trees support contributing roadmap @@ -191,6 +191,7 @@ tedana is licensed under GNU Lesser General Public License version 2.1. dependence_metrics output_file_descriptions classification_output_descriptions + included_decision_trees ****************** From 5921237a712903f467673523e09fb9ed43931ba4 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Thu, 4 May 2023 11:37:48 -0400 Subject: [PATCH 175/177] RTDfix (#45) --- .readthedocs.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 6404ca6b3..3976cddad 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,7 +1,7 @@ version: 2 sphinx: - configuration: docs/conf.py + configuration: docs/conf.py build: os: ubuntu-22.04 @@ -17,4 +17,3 @@ python: path: . extra_requirements: - doc - system_packages: true From 26ff9540711721fc3e86c9ebdd091c3bc6cfc283 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Mon, 8 May 2023 10:43:40 -0400 Subject: [PATCH 176/177] Update documentation (#46) * Update docs. * Update docs/building_decision_trees.rst Co-authored-by: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> --------- Co-authored-by: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> --- docs/building_decision_trees.rst | 272 ++++++++++---------- docs/classification_output_descriptions.rst | 55 ++-- docs/included_decision_trees.rst | 24 +- docs/output_file_descriptions.rst | 10 +- 4 files changed, 189 insertions(+), 172 deletions(-) diff --git a/docs/building_decision_trees.rst b/docs/building_decision_trees.rst index b2ccf20de..c9c0dbb38 100644 --- a/docs/building_decision_trees.rst +++ b/docs/building_decision_trees.rst @@ -7,11 +7,11 @@ of the component selection process and people who are considering customizing their own decision tree or contributing to ``tedana`` code. We have tried to make this accessible, but it is long. If you just want to better understand what's in the outputs from ``tedana`` start with -`classification output descriptions`_. +:doc:`classification_output_descriptions`. ``tedana`` involves transforming data into components, currently via ICA, and then calculating metrics for each component. Each metric has one value per component that -is stored in a component_table dataframe. This structure is then passed to a +is stored in a ``component_table`` dataframe. This structure is then passed to a "decision tree" through which a series of binary choices categorize each component as **accepted** or **rejected**. The time series for the rejected components are regressed from the data in the `final denoising step`_. @@ -22,116 +22,120 @@ trees needs to be slightly altered due to the nature of a specific data set, if an idea for a new approach to multi-echo denoising, or if one wants to integrate non-multi-echo metrics into a single decision tree. -Note: We use two terminologies interchangeably. The whole process is called "component -selection" and much of the code uses variants of that phrase (i.e. the ComponentSelector -class, selection_nodes for the functions used in selection). We call the steps for how -to classify components a "decision tree" since each step in the selection process -branches components into different intermediate or final classifications. +.. note:: + We use two terminologies interchangeably. + The whole process is called "component selection" and much of the code uses + variants of that phrase + (e.g. the :class:`~tedana.selection.component_selector.ComponentSelector` class, + :mod:`~tedana.selection.selection_nodes` for the functions used in selection). + We call the steps for how to classify components a "decision tree" since each + step in the selection process branches components into different intermediate + or final classifications. -.. _classification output descriptions: classification_output_descriptions.html .. _final denoising step: denoising.html + .. contents:: :local: ****************************************** Expected outputs after component selection ****************************************** -During processing, everything is stored in a `ComponentSelector object`_ called -``selector``. The elements of that object are then saved to multiple files. +During processing, everything is stored in a +:class:`~tedana.selection.component_selector.ComponentSelector` called ``selector``. +The elements of that object are then saved to multiple files. The file key names are used below the full file names in the -`output file descriptions`_. +:doc:`output_file_descriptions`. -.. _ComponentSelector object: generated/tedana.selection.component_selector.ComponentSelector.html -.. _output file descriptions: output_file_descriptions.html -**General outputs from component selection** +General outputs from component selection +======================================== New columns in ``selector.component_table`` and the "ICA metrics tsv" file: - classification: - While the decision table is running, there may also be intermediate - classification labels, but the final labels are expected to be - "accepted" or "rejected". There will be a warning if other labels remain. - - classification_tags: - Human readable tags that explain why a classification was reached. - Each component can have no tags (an empty string or n/a), one tag, - or a comma separated list of tags. These tags may be useful parameters - for visualizing and reviewing results + - classification: + While the decision table is running, there may also be intermediate + classification labels, but the final labels are expected to be + "accepted" or "rejected". There will be a warning if other labels remain. + - classification_tags: + Human readable tags that explain why a classification was reached. + Each component can have no tags (an empty string or n/a), one tag, + or a comma separated list of tags. These tags may be useful parameters + for visualizing and reviewing results ``selector.cross_component_metrics`` and "ICA cross component metrics json": - A dictionary of metrics that are each a single value calculated across components, - for example, kappa and rho elbows. User or pre-defined scaling factors are - also stored here. Any constant that is used in the component classification - processes that isn't pre-defined in the decision tree file should be saved here. + A dictionary of metrics that are each a single value calculated across components, + for example, kappa and rho elbows. User or pre-defined scaling factors are + also stored here. Any constant that is used in the component classification + processes that isn't pre-defined in the decision tree file should be saved here. ``selector.component_status_table`` and "ICA status table tsv": - A table where each column lists the classification status of - each component after each node was run. Columns are only added - for runs where component statuses can change. - This is useful for understanding the classification - path of each component through the decision tree + A table where each column lists the classification status of + each component after each node was run. Columns are only added + for runs where component statuses can change. + This is useful for understanding the classification + path of each component through the decision tree ``selector.tree`` and "ICA decision tree json": - A copy of the inputted decision tree specification with an added "output" field - for each node. The output field (see next section) contains information about - what happened during execution. Of particular note, each output includes a list - of the metrics used within the node, "node_label", which is a (hopefully) human - readable brief description of the node's function and, for nodes where component - classifications can change, "n_false" & "n_true" list who many components - changed classifications. The inputted parameters include "if_true" and "if_false" - which specify what changes for each component. These fields can be used to - construct a visual flow chart or text-based summary of how classifications - changed for each run. + A copy of the inputted decision tree specification with an added "output" field + for each node. The output field (see next section) contains information about + what happened during execution. Of particular note, each output includes a list + of the metrics used within the node, "node_label", which is a (hopefully) human + readable brief description of the node's function and, for nodes where component + classifications can change, "n_false" & "n_true" list who many components + changed classifications. The inputted parameters include "if_true" and "if_false" + which specify what changes for each component. These fields can be used to + construct a visual flow chart or text-based summary of how classifications + changed for each run. ``selector.tree["used_metrics"]`` and a field in "ICA decision tree json": - A list of the metrics that were used in the decision tree. Everything in - ``used_metrics`` should be in either ``necessary_metrics`` or - ``generated_metrics`` If a used metric isn't in either, a warning message - will appear. This is a useful check that makes sure every metric used was - pre-specified. + A list of the metrics that were used in the decision tree. Everything in + ``used_metrics`` should be in either ``necessary_metrics`` or + ``generated_metrics`` If a used metric isn't in either, a warning message + will appear. This is a useful check that makes sure every metric used was + pre-specified. ``selector.tree["classification_tags"]`` and a field in "ICA decision tree json": - A list of the pre-specified classification tags that could be used in a decision tree. - Any reporting interface should use this field so that all possible tags are listed - even if a given tag is not used by any component by the end of the selection process. + A list of the pre-specified classification tags that could be used in a decision tree. + Any reporting interface should use this field so that all possible tags are listed + even if a given tag is not used by any component by the end of the selection process. -.. _saved in multiple files: output_file_descriptions.html -**Outputs of each decision tree step** +Outputs of each decision tree step +================================== "ICA decision tree json" includes all the information from the specified decision tree for each "node" or function call. For each node, there is an "outputs" subfield with -information from when the tree was executed. Each outputs field includes: +information from when the tree was executed. +Each outputs field includes: -decision_node_idx: +- decision_node_idx The decision tree functions are run as part of an ordered list. This is the positional index (the location of the function in the list), starting with index 0. -used_metrics: +- used_metrics A list of the metrics used in a node of the decision tree -used_cross_component_metrics: +- used_cross_component_metrics A list of cross component metrics used in the node of a decision tree -node_label: +- node_label A brief label for what happens in this node that can be used in a decision tree summary table or flow chart. -n_true, n_false: +- n_true, n_false For decision tree (dec) functions, the number of components that were classified as true or false, respectively, in this decision tree step. -calc_cross_comp_metrics: +- calc_cross_comp_metrics For calculation (calc) functions, cross component metrics that were calculated in this function. When this is included, each of those metrics and the calculated values are also distinct keys in 'outputs'. While the cross component metrics table does not include where each component was calculated, that information is stored here. -added_component_table_metrics: +- added_component_table_metrics It is possible to add a new metric to the component table during the selection process. This is useful if a metric is to be calculated on a subset of components based on what happened during previous steps in the selection process. This is **not** recommended, @@ -139,17 +143,14 @@ added_component_table_metrics: meica it is possible. - ************************************** Decision trees distributed with tedana ************************************** -Two decision trees are distributed with ``tedana``. -`Flow charts detailing these trees are here`_. It might be useful to -look at these trees while reading how to develop a custom decision tree. - - -.. _Flow charts detailing these trees are here: included_decision_trees.html +Two decision trees are distributed with ``tedana``. +These trees are documented in :doc:`included_decision_trees`. +It might be useful to look at these trees while reading how to develop a custom +decision tree. ******************************* @@ -170,50 +171,51 @@ non-ideal results for the current code. Some criteria will result in an error if violated, but more will just give a warning. If you are designing or editing a new tree, look carefully at the warnings. -A decision tree can include two types of nodes or functions. All functions are currently -in `selection_nodes.py`_ +A decision tree can include two types of nodes or functions. +All functions are currently in :mod:`~tedana.selection.selection_nodes`. - A decision function will use existing metrics and potentially change the classification of the components based on those metrics. By convention, all - these functions begin with "dec" + these functions begin with "dec". - A calculation function will take existing metrics and calculate a value across components to be used for classification, for example the kappa and rho elbows. - By convention, all these functions begin with "calc" + By convention, all these functions begin with "calc". - Nothing prevents a function from both calculating new cross component values and applying those values in a decision step, but following this convention should hopefully make decision tree specifications easier to follow and results easier to interpret. .. _resources/decision_trees: https://github.com/ME-ICA/tedana/tree/main/tedana/resources/decision_trees -.. _selection_nodes.py: https://github.com/ME-ICA/tedana/tree/main/tedana/selection/selection_nodes.py -**General information fields** + +General information fields +========================== There are several fields with general information. Some of these store general information that's useful for reporting results and others store information that is used to check whether results are plausible & can help avoid mistakes. - tree_id: - A descriptive name for the tree that will be logged. +- tree_id + A descriptive name for the tree that will be logged. - info: - A brief description of the tree for info logging +- info + A brief description of the tree for info logging - report: - A narrative description of the tree that could be used in report logging +- report + A narrative description of the tree that could be used in report logging - refs: - Publications that should be referenced when this tree is used +- refs + Publications that should be referenced when this tree is used - necessary_metrics: - A list of the necessary metrics in the component table that will be used - by the tree. If a metric doesn't exist then this will raise an error instead - of executing a tree. (Depending on future code development, this could - potentially be used to run ``tedana`` by specifying a decision tree and - metrics are calculated based on the contents of this field.) If a necessary - metric isn't used, there will be a warning. +- necessary_metrics + A list of the necessary metrics in the component table that will be used + by the tree. If a metric doesn't exist then this will raise an error instead + of executing a tree. (Depending on future code development, this could + potentially be used to run ``tedana`` by specifying a decision tree and + metrics are calculated based on the contents of this field.) If a necessary + metric isn't used, there will be a warning. - generated_metrics: +- generated_metrics An optional initial field. It lists metrics that are to be calculated as part of the decision tree's execution. This is used similarly to necessary_metrics except, since the decision tree starts before these metrics exist, it won't raise @@ -222,23 +224,25 @@ that is used to check whether results are plausible & can help avoid mistakes. classifications. This does make interpretation of results more confusing, but, since this functionality was part of the kundu decision tree, it is included. - intermediate_classifications: - A list of intermediate classifications (i.e. "provisionalaccept", - "provisionalreject"). It is very important to pre-specify these because the code - will make sure only the default classifications ("accepted" "rejected" - "unclassified") and intermediate classifications are used in a tree. This prevents - someone from accidentially losing a component due to a spelling error or other - minor variation in a classification label. +- intermediate_classifications + A list of intermediate classifications (i.e. "provisionalaccept", + "provisionalreject"). It is very important to pre-specify these because the code + will make sure only the default classifications ("accepted" "rejected" + "unclassified") and intermediate classifications are used in a tree. This prevents + someone from accidentially losing a component due to a spelling error or other + minor variation in a classification label. + +- classification_tags + A list of acceptable classification tags (i.e. "Likely BOLD", "Unlikely BOLD", + "Low variance"). This will both be used to make sure only these tags are used in + the tree and allow programs that interact with the results to see all potential + tags in one place. Note: "Likely BOLD" is a required tag. If tedana is run and + none of the components include the "Likely BOLD" tag, then ICA will be repeated + with a different seed and then the selection process will repeat. - classification_tags: - A list of acceptable classification tags (i.e. "Likely BOLD", "Unlikely BOLD", - "Low variance"). This will both be used to make sure only these tags are used in - the tree and allow programs that interact with the results to see all potential - tags in one place. Note: "Likely BOLD" is a required tag. If tedana is run and - none of the components include the "Likely BOLD" tag, then ICA will be repeated - with a different seed and then the selection process will repeat. -**Nodes in the decision tree** +Nodes in the decision tree +========================== The "nodes" field is an ordered list of elements where each element defines a node in the decision tree. Each node contains the information to call a function. @@ -255,53 +259,59 @@ classified as 'accepted' or 'rejected' by the time the tree is completed. There are several key fields for each node: -- "functionname": The exact function name in `selection_nodes.py`_ that will be called. +- "functionname": The exact function name in :mod:`~tedana.selection.selection_nodes` that will be called. - "parameters": Specifications of all required parameters for the function in functionname - "kwargs": Specifications for optional parameters for the function in functionname -The only parameter that is used in all functions is "decidecomps", which is used to +The only parameter that is used in all functions is ``decide_comps``, which is used to identify, based on their classifications, the components a function should be applied to. It can be a single classification, or a comma separated string of classifications. -In addition to the intermediate and default ("accepted" "rejected" "unclassified") +In addition to the intermediate and default ("accepted", "rejected", "unclassified") component classifications, this can be "all" for functions that should be applied to all components regardless of their classifications. -Most decision functions also include "if_true" and "if_false", which specify how to change +Most decision functions also include ``if_true`` and ``if_false``, which specify how to change the classification of each component based on whether a decision criterion is true or false. In addition to the default and intermediate classification options, this can -also be "nochange" (i.e. For components where a>b is true, "reject". For components -where a>b is false, "nochange"). The optional parameters "tag_if_true" and "tag_if_false" -define the classification tags to be assigned to components. Currently, the only -exceptions are "manual_classify" and "dec_classification_doesnt_exist" which use -"new_classification" to designate the new component classification and "tag" (optional) -to designate which classification tag to apply. +also be "nochange" +(e.g., for components where a>b is true, "reject", and for components where a>b is false, "nochange"). +The optional parameters ``tag_if_true`` and ``tag_if_false`` +define the classification tags to be assigned to components. +Currently, the only exceptions are ``manual_classify`` and ``dec_classification_doesnt_exist``, +which use ``new_classification`` to designate the new component classification and +``tag`` (optional) to designate which classification tag to apply. There are several optional parameters (to include within "kwargs") in every decision tree function: -- custom_node_label: A brief label for what happens in this node that can be used in +- ``custom_node_label``: A brief label for what happens in this node that can be used in a decision tree summary table or flow chart. If custom_node_label is not not defined, then each function has default descriptive text. -- log_extra_report, log_extra_info: Text for each function call is automatically placed +- ``log_extra_report``, ``log_extra_info``: Text for each function call is automatically placed in the logger output. In addition to that text, the text in these these strings will also be included in the logger with the report or info codes respectively. These might be useful to give a narrative explanation of why a step was parameterized a certain way. -- only_used_metrics: If true, this function will only return the names of the component +- ``only_used_metrics``: If true, this function will only return the names of the component table metrics that will be used when this function is fully run. This can be used to identify all used metrics before running the decision tree. -"_comments" can be used to add a longer explanation about what a node is doing. This -will not be logged anywhere except in the tree, but may be useful to help explain the +``"_comments"`` can be used to add a longer explanation about what a node is doing. +This will not be logged anywhere except in the tree, but may be useful to help explain the purpose of a given node. + ******************************** Key parts of selection functions ******************************** There are several expectations for selection functions that are necessary for them to -properly execute. In `selection_nodes.py`_, ``manual_classify``, ``dec_left_op_right``, -and ``calc_kappa_rho_elbows_kundu`` are good examples for how to meet these expectations. +properly execute. +In :mod:`~tedana.selection.selection_nodes`, +:func:`~tedana.selection.selection_nodes.manual_classify`, +:func:`~tedana.selection.selection_nodes.dec_left_op_right`, +and :func:`~tedana.selection.selection_nodes.calc_kappa_elbow` +are good examples for how to meet these expectations. Create a dictionary called "outputs" that includes key fields that should be recorded. The following line should be at the end of each function to retain the output info: @@ -332,7 +342,6 @@ specific dataset. Existing functions define ``function_name_idx = f"Step {selector.current_node_idx}: [text of function_name]``. This is used in logging and is cleaner to initialize near the top of each function. - Each function has code that creates a default node label in ``outputs["node_label"]``. The default node label may be used in decision tree visualization so it should be relatively short. Within this section, if there is a user-provided custom_node_label, @@ -344,9 +353,11 @@ and output a warning if the function overwrites an existing value Code that adds the text ``log_extra_info`` and ``log_extra_report`` into the appropriate logs (if they are provided by the user) -After the above information is included, all functions will call ``selectcomps2use``, +After the above information is included, +all functions will call :func:`~tedana.selection.selection_utils.selectcomps2use`, which returns the components with classifications included in ``decide_comps`` -and then runs ``confirm_metrics_exist``, which is an added check to make sure the metrics +and then runs :func:`~tedana.selection.selection_utils.confirm_metrics_exist`, +which is an added check to make sure the metrics used by this function exist in the component table. Nearly every function has a clause like: @@ -364,18 +375,21 @@ there's nothing for the function to be run on, else continue. For decision functions, the key variable is ``decision_boolean``, which should be a pandas dataframe column that is True or False for the components in ``decide_comps`` based on -the function's criteria. That column is an input to ``change_comptable_classifications``, +the function's criteria. +That column is an input to :func:`~tedana.selection.selection_utils.change_comptable_classifications`, which will update the component_table classifications, update the classification history in component_status_table, and update the component classification_tags. Components not in ``decide_comps`` retain their existing classifications and tags. -``change_comptable_classifications`` also returns and should assign values to +:func:`~tedana.selection.selection_utils.change_comptable_classifications` +also returns and should assign values to ``outputs["n_true"]`` and ``outputs["n_false"]``. These log how many components were identified as true or false within each function. For calculation functions, the calculated values should be added as a value/key pair to -both ``selector.cross_component_metrics`` and ``outputs`` +both ``selector.cross_component_metrics`` and ``outputs``. -``log_decision_tree_step`` puts the relevant info from the function call into the program's output log. +:func:`~tedana.selection.selection_utils.log_decision_tree_step` +puts the relevant info from the function call into the program's output log. Every function should end with: diff --git a/docs/classification_output_descriptions.rst b/docs/classification_output_descriptions.rst index 14358db00..04c73ad9c 100644 --- a/docs/classification_output_descriptions.rst +++ b/docs/classification_output_descriptions.rst @@ -4,12 +4,11 @@ Classification output descriptions Tedana outputs multiple files that can be used to subsequent analyses and to better understand one's denoising results. -In addition to the `descriptions of file names`_ this page explains the +In addition to the :doc:`output_file_descriptions` this page explains the contents of several of those files in more detail. -`Building decision trees`_ covers the full process, and not just the +:doc:`building_decision_trees` covers the full process, and not just the descriptions of outputted files, in more detail. -.. _Building decision trees: building_decision_trees.html TEDPCA codes ============ @@ -19,9 +18,10 @@ dataset. Without this step, the number of components would be one less than the number of volumes, many of those components would effectively be Gaussian noise and ICA would not reliably converge. Standard methods for data reduction use cost functions, like MDL, KIC, and AIC to estimate the variance -that is just noise and remove the lowest variance components under that threshold. -By default, ``tedana`` uses AIC. Of those three, AIC is the least agressive and -will retain the most components. +that is just noise and remove the lowest variance components under that +threshold. +By default, ``tedana`` uses AIC. +Of those three, AIC is the least agressive and will retain the most components. ``Tedana`` includes additional `kundu` and `kundu-stabilize` approaches that identify and remove components that don't contain T2* or S0 signal and are more @@ -50,16 +50,20 @@ ICA Classification Outputs ========================== The component table is stored in ``desc-tedana_metrics.tsv`` or -``tedana_metrics.tsv``. Each row is a component number. Each column is a metric -that is calculated for each component. Short descriptions of each column metric -are in the output log, ``tedana_[date_time].tsv``, and the actual metric -calculations are in `collect.py`_ The final two columns are `classification` -and `classification_tags`. `classification` should include `accepted` or -`rejected` for every component and `rejected` components are be removed -through denoising. `classification_tags` provide more information on why -components received a specific classification. Each component can receive -more than one tag. The following tags are included depending if ``--tree`` -is minimal, kundu, or if ``ica_reclassify`` is run. +``tedana_metrics.tsv``. +Each row is a component number. +Each column is a metric that is calculated for each component. +Short descriptions of each column metric are in the output log, +``tedana_[date_time].tsv``, and the actual metric calculations are in +:mod:`~tedana.metrics.collect`. +The final two columns are ``classification`` and ``classification_tags``. +``classification`` should include **accepted** or **rejected** for every +component and **rejected** components are be removed through denoising. +``classification_tags`` provide more information on why +components received a specific classification. +Each component can receive more than one tag. +The following tags are included depending if ``--tree`` is "minimal", "kundu", +or if ``ica_reclassify`` is run. ===================== ================ ======================================== Tag Included in Tree Explanation @@ -93,13 +97,12 @@ in several places: - The information in the output log includes the name of each node and the count of components that changed classification during execution. -- The same information is stored in the `ICA decision tree` json file (see - `descriptions of file names`_) in the "output" field for each node. That information - is organized so that it can be used to generate a visual or text-based summary of - what happened when the decision tree was run on a dataset. -- The `ICA status table` lists the classification status of each component after - each node was run. This is particularly useful to trying to understand how a - specific component ended receiving its classification. - -.. _collect.py: https://github.com/ME-ICA/tedana/blob/main/tedana/metrics/collect.py -.. _descriptions of file names: output_file_descriptions.html \ No newline at end of file +- The same information is stored in the ``ICA decision tree`` json file + (see :doc:`output_file_descriptions`) in the "output" field for each node. + That information is organized so that it can be used to generate a visual or + text-based summary of what happened when the decision tree was run on a + dataset. +- The ``ICA status table`` lists the classification status of each component + after each node was run. + This is particularly useful to trying to understand how a specific component + ended receiving its classification. diff --git a/docs/included_decision_trees.rst b/docs/included_decision_trees.rst index 57bcb0f3f..9cfb78183 100644 --- a/docs/included_decision_trees.rst +++ b/docs/included_decision_trees.rst @@ -3,31 +3,33 @@ Included Decision Trees ####################### Two decision trees are currently distributed with ``tedana``. + ``kundu`` is the decision tree that is based on MEICA version 2.5 and has been included with ``tedana`` since the start of this project. While multiple publications have used and benefits from this decision, tree, but it includes many steps with arbitrary thresholds and, when components seem misclassified, it's often hard to understand why. + ``minimal`` is a simplified version of that decision tree with fewer steps and arbitrary thresholds. Minimal is designed to be more stable and comprehensible, but it has not yet be extensively validated and parts of the tree may change in response to additional tests on a -wider range of data sets. +wider range of data sets. Flowcharts describing the steps in both trees are below. -As documented more in `Building Decision Trees`_, the input to each tree +As documented more in :doc:`building_decision_trees`, the input to each tree is a table with metrics, like :math:`\kappa` or :math:`\rho`, for each component. Each step or node in the decision tree either calculates new values or changes component classifications based on these metrics. -When a component classification changes to `accept` or `reject`, a -`classification_tag` is also assigned which may help understand why +When a component classification changes to ``accept`` or ``reject``, a +``classification_tag`` is also assigned which may help understand why a component was given a specific classification. Each step in the flow chart is labeled with a ``node`` number. If ``tedana`` is run using one of these trees, those node numbers will match the numbers in the ``ICA status table`` and the -``ICA decision tree`` that are `saved with the outputs`_. These node -numbers can be used to see when in the process a component's +``ICA decision tree`` that are :doc:`output_file_descriptions`. +These node numbers can be used to see when in the process a component's classifiation changed. .. image:: _static/decision_tree_legend.png @@ -40,8 +42,6 @@ classifiation changed. Legend for Decision Tree Flow Charts -.. _saved with the outputs: output_file_descriptions.html -.. _Building Decision Trees: building_decision_trees.html ******************* Kundu decision tree @@ -54,8 +54,8 @@ elbow are classified as `provisional accept`. A non-obvious aspect of this decision tree is that no decision node below this point distinguishes components that are `provisional accept` from components that are still `unclassified` and nothing that does not cross the :math:`\kappa` and -:math:`\rho` elbow thresholds is inherantly rejected. The number of -`provisional accept` components is used to see if the process should +:math:`\rho` elbow thresholds is inherantly rejected. The number of +`provisional accept` components is used to see if the process should be restarted (node 11) and calculate other thresholds (nodes 12-16 & 20), but nothing is directly accepted or rejected based on the elbow thresholds. Several additional criteria are used to reject components (nodes 17, 21, & 22). @@ -87,7 +87,7 @@ The only expection to this is if :math:`\kappa` > :math:`\kappa` elbow and the component should not be rejected even if it also contains noise (node 9). If `provisional reject` components have very low variance they are accepted rather than losing degrees of freedom, but no more than 1% of the total variance can be -accepted this way (node 11). After that point, everything that is +accepted this way (node 11). After that point, everything that is `provisional accept` is accepted (node 12) and everything that is `provisional reject` is rejected (node 13) @@ -97,4 +97,4 @@ is rejected (node 13) `LaTeX file to generate the minimal decision tree flow chart`_ -.. _LaTeX file to generate the minimal decision tree flow chart: _static/decision_tree_minimal.tex \ No newline at end of file +.. _LaTeX file to generate the minimal decision tree flow chart: _static/decision_tree_minimal.tex diff --git a/docs/output_file_descriptions.rst b/docs/output_file_descriptions.rst index c703da8fc..117f04ed2 100644 --- a/docs/output_file_descriptions.rst +++ b/docs/output_file_descriptions.rst @@ -3,11 +3,11 @@ Output file name descriptions ############################# tedana allows for multiple file naming conventions. The key labels and naming options for -each convention that can be set using the `--convention` option are in `outputs.json`_. -The output of `tedana` also includes a file called `registry.json` or -`desc-tedana_registry.json` that includes the keys and the matching file names for the -output. The table below lists both these keys and the default "BIDS Derivatives" -file names. +each convention that can be set using the ``--convention`` option are in `outputs.json`_. +The output of ``tedana`` also includes a file called ``registry.json`` or +``desc-tedana_registry.json`` that includes the keys and the matching file names for the +output. +The table below lists both these keys and the default "BIDS Derivatives" file names. .. _outputs.json: https://github.com/ME-ICA/tedana/blob/main/tedana/resources/config/outputs.json From bc1cf40bd4a7e01b9ab80632f72b02d2e0c91747 Mon Sep 17 00:00:00 2001 From: Dan Handwerker <7406227+handwerkerd@users.noreply.github.com> Date: Tue, 9 May 2023 17:11:57 -0400 Subject: [PATCH 177/177] Output docs on one page (#47) * Output docs on one page * added new multi-echo lectures --- docs/building_decision_trees.rst | 4 +- docs/classification_output_descriptions.rst | 108 ------- docs/included_decision_trees.rst | 6 +- docs/index.rst | 2 - docs/multi-echo.rst | 7 +- docs/output_file_descriptions.rst | 164 ----------- docs/outputs.rst | 305 +++++++++++++++++++- 7 files changed, 301 insertions(+), 295 deletions(-) delete mode 100644 docs/classification_output_descriptions.rst delete mode 100644 docs/output_file_descriptions.rst diff --git a/docs/building_decision_trees.rst b/docs/building_decision_trees.rst index c9c0dbb38..dce2c6d31 100644 --- a/docs/building_decision_trees.rst +++ b/docs/building_decision_trees.rst @@ -7,7 +7,7 @@ of the component selection process and people who are considering customizing their own decision tree or contributing to ``tedana`` code. We have tried to make this accessible, but it is long. If you just want to better understand what's in the outputs from ``tedana`` start with -:doc:`classification_output_descriptions`. +:ref:`classification-output-descriptions`. ``tedana`` involves transforming data into components, currently via ICA, and then calculating metrics for each component. Each metric has one value per component that @@ -45,7 +45,7 @@ During processing, everything is stored in a :class:`~tedana.selection.component_selector.ComponentSelector` called ``selector``. The elements of that object are then saved to multiple files. The file key names are used below the full file names in the -:doc:`output_file_descriptions`. +:ref:`output-filename-descriptions`. General outputs from component selection diff --git a/docs/classification_output_descriptions.rst b/docs/classification_output_descriptions.rst deleted file mode 100644 index 04c73ad9c..000000000 --- a/docs/classification_output_descriptions.rst +++ /dev/null @@ -1,108 +0,0 @@ -################################## -Classification output descriptions -################################## - -Tedana outputs multiple files that can be used to subsequent analyses and to -better understand one's denoising results. -In addition to the :doc:`output_file_descriptions` this page explains the -contents of several of those files in more detail. -:doc:`building_decision_trees` covers the full process, and not just the -descriptions of outputted files, in more detail. - - -TEDPCA codes -============ - -In ``tedana`` PCA is used to reduce the number of dimensions (components) in the -dataset. Without this step, the number of components would be one less than -the number of volumes, many of those components would effectively be -Gaussian noise and ICA would not reliably converge. Standard methods for data -reduction use cost functions, like MDL, KIC, and AIC to estimate the variance -that is just noise and remove the lowest variance components under that -threshold. -By default, ``tedana`` uses AIC. -Of those three, AIC is the least agressive and will retain the most components. - -``Tedana`` includes additional `kundu` and `kundu-stabilize` approaches that -identify and remove components that don't contain T2* or S0 signal and are more -likely to be noise. If the `--tedpca kundu` option is used, the PCA_metrics tsv -file will include an accepted vs rejected classification column and also a -rationale column of codes documenting why a PCA component removed. If MDL, KIC, -or AIC are used then the classification column will exist, but will include -include the accepted components and the rationale column will contain n/a" -When kundu is used, these are brief explanations of the the rationale codes - -===== =============== ======================================================== -Code Classification Description -===== =============== ======================================================== -P001 rejected Low Rho, Kappa, and variance explained -P002 rejected Low variance explained -P003 rejected Kappa equals fmax -P004 rejected Rho equals fmax -P005 rejected Cumulative variance explained above 95% (only in - stabilized PCA decision tree) -P006 rejected Kappa below fmin (only in stabilized PCA decision tree) -P007 rejected Rho below fmin (only in stabilized PCA decision tree) -===== =============== ======================================================== - - -ICA Classification Outputs -========================== - -The component table is stored in ``desc-tedana_metrics.tsv`` or -``tedana_metrics.tsv``. -Each row is a component number. -Each column is a metric that is calculated for each component. -Short descriptions of each column metric are in the output log, -``tedana_[date_time].tsv``, and the actual metric calculations are in -:mod:`~tedana.metrics.collect`. -The final two columns are ``classification`` and ``classification_tags``. -``classification`` should include **accepted** or **rejected** for every -component and **rejected** components are be removed through denoising. -``classification_tags`` provide more information on why -components received a specific classification. -Each component can receive more than one tag. -The following tags are included depending if ``--tree`` is "minimal", "kundu", -or if ``ica_reclassify`` is run. - -===================== ================ ======================================== -Tag Included in Tree Explanation -===================== ================ ======================================== -Likely BOLD minimal,kundu Accepted because likely to include some - BOLD signal -Unlikely BOLD minimal,kundu Rejected because likely to include a - lot of non-BOLD signal -Low variance minimal,kundu Accepted because too low variance to - lose a degree-of-freedom by rejecting -Less likely BOLD kundu Rejected based on some edge criteria - based on relative rankings of components -Accept borderline kundu Accepted based on some edge criteria - based on relative rankings of components -No provisional accept kundu Accepted because because kundu tree did - not find any components to consider - accepting so the conservative "failure" - case is accept everything rather than - rejecting everything -manual reclassify manual_classify Classification based on user input. If - done after automatic selection then - the preceding tag from automatic - selection is retained and this tag - notes the classification was manually - changed -===================== ================ ======================================== - -The decision tree is a list of nodes where the classification of each component -could change. The information on which nodes and how classifications changed is -in several places: - -- The information in the output log includes the name of each - node and the count of components that changed classification during execution. -- The same information is stored in the ``ICA decision tree`` json file - (see :doc:`output_file_descriptions`) in the "output" field for each node. - That information is organized so that it can be used to generate a visual or - text-based summary of what happened when the decision tree was run on a - dataset. -- The ``ICA status table`` lists the classification status of each component - after each node was run. - This is particularly useful to trying to understand how a specific component - ended receiving its classification. diff --git a/docs/included_decision_trees.rst b/docs/included_decision_trees.rst index 9cfb78183..156404bcd 100644 --- a/docs/included_decision_trees.rst +++ b/docs/included_decision_trees.rst @@ -28,9 +28,9 @@ a component was given a specific classification. Each step in the flow chart is labeled with a ``node`` number. If ``tedana`` is run using one of these trees, those node numbers will match the numbers in the ``ICA status table`` and the -``ICA decision tree`` that are :doc:`output_file_descriptions`. -These node numbers can be used to see when in the process a component's -classifiation changed. +``ICA decision tree`` that are described in +:ref:`output-filename-descriptions`. These node numbers can be used +to see when in the process a component's classifiation changed. .. image:: _static/decision_tree_legend.png :width: 300 diff --git a/docs/index.rst b/docs/index.rst index f7b1f19f9..cc1f38718 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -189,8 +189,6 @@ tedana is licensed under GNU Lesser General Public License version 2.1. :name: hiddentoc dependence_metrics - output_file_descriptions - classification_output_descriptions included_decision_trees diff --git a/docs/multi-echo.rst b/docs/multi-echo.rst index 18105b64f..11b581810 100644 --- a/docs/multi-echo.rst +++ b/docs/multi-echo.rst @@ -501,13 +501,16 @@ Videos * A `series of lectures from the OHBM 2017 multi-echo session`_ on multiple facets of multi-echo data analysis * | Multi-echo fMRI lecture from the `2018 NIH FMRI Summer Course`_ by Javier Gonzalez-Castillo | `Slides from 2018 NIH FMRI Summer Course`_ -* An `NIMH Center for Multimodal Neuroimaging video`_ by the Section on Functional Imaging Methods +* NIMH Center for Multimodal Neuroimaging `Advantages of multi-echo fMRI`_ (2019) by Dan Handwerker, Javier Gonzalez-Castillo, and Vinai Roopchansingh +* | MRI Together 2022 Conference Presentations by Eneko Uruñuela + | Tedana: Analysis of echo-time dependent fMRI data (`recording `_, `slides `_) + | A tour of tedana (`tour recording `_, `tour slides `_) .. _educational session from OHBM 2017: https://www.pathlms.com/ohbm/courses/5158/sections/7788/video_presentations/75977 .. _series of lectures from the OHBM 2017 multi-echo session: https://www.pathlms.com/ohbm/courses/5158/sections/7822 .. _2018 NIH FMRI Summer Course: https://fmrif.nimh.nih.gov/course/fmrif_course/2018/14_Javier_20180713 .. _Slides from 2018 NIH FMRI Summer Course: https://fmrif.nimh.nih.gov/COURSE/fmrif_course/2018/content/14_Javier_20180713.pdf -.. _NIMH Center for Multimodal Neuroimaging video: https://youtu.be/G1Ftd2IwF14 +.. _Advantages of multi-echo fMRI: https://youtu.be/G1Ftd2IwF14 Multi-echo preprocessing software diff --git a/docs/output_file_descriptions.rst b/docs/output_file_descriptions.rst deleted file mode 100644 index 117f04ed2..000000000 --- a/docs/output_file_descriptions.rst +++ /dev/null @@ -1,164 +0,0 @@ -############################# -Output file name descriptions -############################# - -tedana allows for multiple file naming conventions. The key labels and naming options for -each convention that can be set using the ``--convention`` option are in `outputs.json`_. -The output of ``tedana`` also includes a file called ``registry.json`` or -``desc-tedana_registry.json`` that includes the keys and the matching file names for the -output. -The table below lists both these keys and the default "BIDS Derivatives" file names. - -.. _outputs.json: https://github.com/ME-ICA/tedana/blob/main/tedana/resources/config/outputs.json - -=========================================================================== ===================================================== -Key: Filename Content -=========================================================================== ===================================================== -"registry json": desc-tedana_registry.json Mapping of file name keys to filename locations -"data description json": dataset_description.json Top-level metadata for the workflow. -tedana_report.html The interactive HTML report. -"combined img": desc-optcom_bold.nii.gz Optimally combined time series. -"denoised ts img": desc-optcomDenoised_bold.nii.gz Denoised optimally combined time series. Recommended - dataset for analysis. -"adaptive mask img": desc-adaptiveGoodSignal_mask.nii.gz Integer-valued mask used in the workflow, where - each voxel's value corresponds to the number of good - echoes to be used for T2\*/S0 estimation. Will be - calculated whether original mask estimated within - tedana or user-provided. All voxels with 1 good - echo will be included in outputted time series - but only voxels with at least 3 good echoes will be - used in ICA and metric calculations -"t2star img": T2starmap.nii.gz Full estimated T2* 3D map. - Values are in seconds. If a voxel has at least 1 good - echo then the first two echoes will be used to estimate - a value (an impresise weighting for optimal combination - is better than fully excluding a voxel) -"s0 img": S0map.nii.gz Full S0 3D map. If a voxel has at least 1 good - echo then the first two echoes will be used to estimate - a value -"PCA mixing tsv": desc-PCA_mixing.tsv Mixing matrix (component time series) from PCA - decomposition in a tab-delimited file. Each column is - a different component, and the column name is the - component number. -"PCA decomposition json": desc-PCA_decomposition.json Metadata for the PCA decomposition. -"z-scored PCA components img": desc-PCA_stat-z_components.nii.gz Component weight maps from PCA decomposition. - Each map corresponds to the same component index in - the mixing matrix and component table. - Maps are in z-statistics. -"PCA metrics tsv": desc-PCA_metrics.tsv TEDPCA component table. A BIDS Derivatives-compatible - TSV file with summary metrics and inclusion/exclusion - information for each component from the PCA - decomposition. -"PCA metrics json": desc-PCA_metrics.json Metadata about the metrics in ``desc-PCA_metrics.tsv``. -"PCA cross component metrics json": desc-PCACrossComponent_metrics.json Measures calculated across PCA compononents including - values for the full cost function curves for all - AIC, KIC, and MDL cost functions and the number of - components and variance explained for multiple options - Figures for the cost functions and variance explained - are also in - ``./figures//pca_[criteria|variance_explained.png]`` -"ICA mixing tsv": desc-ICA_mixing.tsv Mixing matrix (component time series) from ICA - decomposition in a tab-delimited file. Each column is - a different component, and the column name is the - component number. -"ICA components img": desc-ICA_components.nii.gz Full ICA coefficient feature set. -"z-scored ICA components img": desc-ICA_stat-z_components.nii.gz Z-statistic component weight maps from ICA - decomposition. - Values are z-transformed standardized regression - coefficients. Each map corresponds to the same - component index in the mixing matrix and component table. -"ICA decomposition json": desc-ICA_decomposition.json Metadata for the ICA decomposition. -"ICA metrics tsv": desc-tedana_metrics.tsv TEDICA component table. A BIDS Derivatives-compatible - TSV file with summary metrics and inclusion/exclusion - information for each component from the ICA - decomposition. -"ICA metrics json": desc-tedana_metrics.json Metadata about the metrics in - ``desc-tedana_metrics.tsv``. -"ICA cross component metrics json": desc-ICACrossComponent_metrics.json Metric names and values that are each a single number - calculated across components. For example, kappa and - rho elbows. -"ICA decision tree json": desc-ICA_decision_tree A copy of the inputted decision tree specification with - an added "output" field for each node. The output field - contains information about what happened during - execution. -"ICA status table tsv": desc-ICA_status_table.tsv A table where each column lists the classification - status of each component after each node was run. - Columns are only added for runs where component - statuses can change. -"ICA accepted components img": desc-ICAAccepted_components.nii.gz High-kappa ICA coefficient feature set -"z-scored ICA accepted components img": desc-ICAAcceptedZ_components.nii.gz Z-normalized spatial component maps -report.txt A summary report for the workflow with relevant - citations. -"low kappa ts img": desc-optcomRejected_bold.nii.gz Combined time series from rejected components. -"high kappa ts img": desc-optcomAccepted_bold.nii.gz High-kappa time series. This dataset does not - include thermal noise or low variance components. - Not the recommended dataset for analysis. -references.bib The BibTeX entries for references cited in - report.txt. - -=========================================================================== ===================================================== - -If ``verbose`` is set to True: - -============================================================================================= ===================================================== -Key: Filename Content -============================================================================================= ===================================================== -"limited t2star img": desc-limited_T2starmap.nii.gz Limited T2* map/time series. - Values are in seconds. - Unlike the full T2* maps, if only one 1 echo contains - good data the limited map will have NaN -"limited s0 img": desc-limited_S0map.nii.gz Limited S0 map/time series. - Unlike the full S0 maps, if only one 1 echo contains - good data the limited map will have NaN -"whitened img": desc-optcom_whitened_bold The optimally combined data after whitening -"echo weight [PCA|ICA] maps split img": echo-[echo]_desc-[PCA|ICA]_components.nii.gz Echo-wise PCA/ICA component weight maps. -"echo T2 [PCA|ICA] split img": echo-[echo]_desc-[PCA|ICA]T2ModelPredictions_components.nii.gz Component- and voxel-wise R2-model predictions, - separated by echo. -"echo S0 [PCA|ICA] split img": echo-[echo]_desc-[PCA|ICA]S0ModelPredictions_components.nii.gz Component- and voxel-wise S0-model predictions, - separated by echo. -"[PCA|ICA] component weights img": desc-[PCA|ICA]AveragingWeights_components.nii.gz Component-wise averaging weights for metric - calculation. -"[PCA|ICA] component F-S0 img": desc-[PCA|ICA]S0_stat-F_statmap.nii.gz F-statistic map for each component, for the S0 model. -"[PCA|ICA] component F-T2 img": desc-[PCA|ICA]T2_stat-F_statmap.nii.gz F-statistic map for each component, for the T2 model. -"PCA reduced img": desc-optcomPCAReduced_bold.nii.gz Optimally combined data after dimensionality - reduction with PCA. This is the input to the ICA. -"high kappa ts split img": echo-[echo]_desc-Accepted_bold.nii.gz High-Kappa time series for echo number ``echo`` -"low kappa ts split img": echo-[echo]_desc-Rejected_bold.nii.gz Low-Kappa time series for echo number ``echo`` -"denoised ts split img": echo-[echo]_desc-Denoised_bold.nii.gz Denoised time series for echo number ``echo`` -============================================================================================= ===================================================== - -If ``tedort`` is True - -======================================================== ===================================================== -Key: Filename Content -======================================================== ===================================================== -"ICA orthogonalized mixing tsv": desc-ICAOrth_mixing.tsv Mixing matrix with rejected components orthogonalized - from accepted components -======================================================== ===================================================== - -If ``gscontrol`` includes 'gsr': - -================================================================= ===================================================== -Key: Filename Content -================================================================= ===================================================== -"gs img": desc-globalSignal_map.nii.gz Spatial global signal -"global signal time series tsv": desc-globalSignal_timeseries.tsv Time series of global signal from optimally combined - data. -"has gs combined img": desc-optcomWithGlobalSignal_bold.nii.gz Optimally combined time series with global signal - retained. -"removed gs combined img": desc-optcomNoGlobalSignal_bold.nii.gz Optimally combined time series with global signal - removed. -================================================================= ===================================================== - -If ``gscontrol`` includes 'mir' (Minimal intensity regression, which may help remove some T1 noise and -was an option in the MEICA v2.5 code, but never fully explained or evaluted in a publication): - -======================================================================================= ===================================================== -Key: Filename Content -======================================================================================= ===================================================== -"t1 like img": desc-T1likeEffect_min.nii.gz T1-like effect -"mir denoised img": desc-optcomMIRDenoised_bold.nii.gz Denoised time series after MIR -"ICA MIR mixing tsv": desc-ICAMIRDenoised_mixing.tsv ICA mixing matrix after MIR -"ICA accepted mir component weights img": desc-ICAAcceptedMIRDenoised_components.nii.gz high-kappa components after MIR -"ICA accepted mir denoised img": desc-optcomAcceptedMIRDenoised_bold.nii.gz high-kappa time series after MIR -======================================================================================= ===================================================== diff --git a/docs/outputs.rst b/docs/outputs.rst index bec73edbf..d0a37d1bd 100644 --- a/docs/outputs.rst +++ b/docs/outputs.rst @@ -4,27 +4,304 @@ Outputs of tedana ################# +When ``tedana`` is run, it outputs many files and an html report to help interpret the +results. This details the contents of all outputted files, explains the terminology +used for describing the outputs of classification, and details the contents of the html +report. -*************************************** -Filename outputs of the tedana workflow -*************************************** +.. contents:: :local: -When tedana is run, it outputs files for the optimally combined and denoised +.. _output-filename-descriptions: + +***************************** +Output filename descriptions +***************************** + +The output include files for the optimally combined and denoised data and many additional files to help understand the results and fascilitate -future processing. `descriptions of these output files are here`_. +future processing. ``tedana`` allows for multiple file naming conventions. The key labels +and naming options for each convention that can be set using the ``--convention`` option +are in `outputs.json`_. The output of ``tedana`` also includes a file called +``registry.json`` or ``desc-tedana_registry.json`` that includes the keys and the matching +file names for the output. The table below lists both these keys and the default +"BIDS Derivatives" file names. + +.. _outputs.json: https://github.com/ME-ICA/tedana/blob/main/tedana/resources/config/outputs.json -.. _descriptions of these output files are here: output_file_descriptions.html +.. _standard-filename-outputs: + +Standard filename outputs +------------------------- -******************************************* -Component tables and classification outputs -******************************************* +=========================================================================== ===================================================== +Key: Filename Content +=========================================================================== ===================================================== +"registry json": desc-tedana_registry.json Mapping of file name keys to filename locations +"data description json": dataset_description.json Top-level metadata for the workflow. +tedana_report.html The interactive HTML report. +"combined img": desc-optcom_bold.nii.gz Optimally combined time series. +"denoised ts img": desc-optcomDenoised_bold.nii.gz Denoised optimally combined time series. Recommended + dataset for analysis. +"adaptive mask img": desc-adaptiveGoodSignal_mask.nii.gz Integer-valued mask used in the workflow, where + each voxel's value corresponds to the number of good + echoes to be used for T2\*/S0 estimation. Will be + calculated whether original mask estimated within + tedana or user-provided. All voxels with 1 good + echo will be included in outputted time series + but only voxels with at least 3 good echoes will be + used in ICA and metric calculations +"t2star img": T2starmap.nii.gz Full estimated T2* 3D map. + Values are in seconds. If a voxel has at least 1 good + echo then the first two echoes will be used to estimate + a value (an impresise weighting for optimal combination + is better than fully excluding a voxel) +"s0 img": S0map.nii.gz Full S0 3D map. If a voxel has at least 1 good + echo then the first two echoes will be used to estimate + a value +"PCA mixing tsv": desc-PCA_mixing.tsv Mixing matrix (component time series) from PCA + decomposition in a tab-delimited file. Each column is + a different component, and the column name is the + component number. +"PCA decomposition json": desc-PCA_decomposition.json Metadata for the PCA decomposition. +"z-scored PCA components img": desc-PCA_stat-z_components.nii.gz Component weight maps from PCA decomposition. + Each map corresponds to the same component index in + the mixing matrix and component table. + Maps are in z-statistics. +"PCA metrics tsv": desc-PCA_metrics.tsv TEDPCA component table. A BIDS Derivatives-compatible + TSV file with summary metrics and inclusion/exclusion + information for each component from the PCA + decomposition. +"PCA metrics json": desc-PCA_metrics.json Metadata about the metrics in ``desc-PCA_metrics.tsv``. +"PCA cross component metrics json": desc-PCACrossComponent_metrics.json Measures calculated across PCA compononents including + values for the full cost function curves for all + AIC, KIC, and MDL cost functions and the number of + components and variance explained for multiple options + Figures for the cost functions and variance explained + are also in + ``./figures//pca_[criteria|variance_explained.png]`` +"ICA mixing tsv": desc-ICA_mixing.tsv Mixing matrix (component time series) from ICA + decomposition in a tab-delimited file. Each column is + a different component, and the column name is the + component number. +"ICA components img": desc-ICA_components.nii.gz Full ICA coefficient feature set. +"z-scored ICA components img": desc-ICA_stat-z_components.nii.gz Z-statistic component weight maps from ICA + decomposition. + Values are z-transformed standardized regression + coefficients. Each map corresponds to the same + component index in the mixing matrix and component table. +"ICA decomposition json": desc-ICA_decomposition.json Metadata for the ICA decomposition. +"ICA metrics tsv": desc-tedana_metrics.tsv TEDICA component table. A BIDS Derivatives-compatible + TSV file with summary metrics and inclusion/exclusion + information for each component from the ICA + decomposition. +"ICA metrics json": desc-tedana_metrics.json Metadata about the metrics in + ``desc-tedana_metrics.tsv``. +"ICA cross component metrics json": desc-ICACrossComponent_metrics.json Metric names and values that are each a single number + calculated across components. For example, kappa and + rho elbows. +"ICA decision tree json": desc-ICA_decision_tree A copy of the inputted decision tree specification with + an added "output" field for each node. The output field + contains information about what happened during + execution. +"ICA status table tsv": desc-ICA_status_table.tsv A table where each column lists the classification + status of each component after each node was run. + Columns are only added for runs where component + statuses can change. +"ICA accepted components img": desc-ICAAccepted_components.nii.gz High-kappa ICA coefficient feature set +"z-scored ICA accepted components img": desc-ICAAcceptedZ_components.nii.gz Z-normalized spatial component maps +report.txt A summary report for the workflow with relevant + citations. +"low kappa ts img": desc-optcomRejected_bold.nii.gz Combined time series from rejected components. +"high kappa ts img": desc-optcomAccepted_bold.nii.gz High-kappa time series. This dataset does not + include thermal noise or low variance components. + Not the recommended dataset for analysis. +references.bib The BibTeX entries for references cited in + report.txt. + +=========================================================================== ===================================================== + +If ``verbose`` is set to True +------------------------------ + +============================================================================================= ===================================================== +Key: Filename Content +============================================================================================= ===================================================== +"limited t2star img": desc-limited_T2starmap.nii.gz Limited T2* map/time series. + Values are in seconds. + Unlike the full T2* maps, if only one 1 echo contains + good data the limited map will have NaN +"limited s0 img": desc-limited_S0map.nii.gz Limited S0 map/time series. + Unlike the full S0 maps, if only one 1 echo contains + good data the limited map will have NaN +"whitened img": desc-optcom_whitened_bold The optimally combined data after whitening +"echo weight [PCA|ICA] maps split img": echo-[echo]_desc-[PCA|ICA]_components.nii.gz Echo-wise PCA/ICA component weight maps. +"echo T2 [PCA|ICA] split img": echo-[echo]_desc-[PCA|ICA]T2ModelPredictions_components.nii.gz Component- and voxel-wise R2-model predictions, + separated by echo. +"echo S0 [PCA|ICA] split img": echo-[echo]_desc-[PCA|ICA]S0ModelPredictions_components.nii.gz Component- and voxel-wise S0-model predictions, + separated by echo. +"[PCA|ICA] component weights img": desc-[PCA|ICA]AveragingWeights_components.nii.gz Component-wise averaging weights for metric + calculation. +"[PCA|ICA] component F-S0 img": desc-[PCA|ICA]S0_stat-F_statmap.nii.gz F-statistic map for each component, for the S0 model. +"[PCA|ICA] component F-T2 img": desc-[PCA|ICA]T2_stat-F_statmap.nii.gz F-statistic map for each component, for the T2 model. +"PCA reduced img": desc-optcomPCAReduced_bold.nii.gz Optimally combined data after dimensionality + reduction with PCA. This is the input to the ICA. +"high kappa ts split img": echo-[echo]_desc-Accepted_bold.nii.gz High-Kappa time series for echo number ``echo`` +"low kappa ts split img": echo-[echo]_desc-Rejected_bold.nii.gz Low-Kappa time series for echo number ``echo`` +"denoised ts split img": echo-[echo]_desc-Denoised_bold.nii.gz Denoised time series for echo number ``echo`` +============================================================================================= ===================================================== + +If ``tedort`` is True +--------------------- + +======================================================== ===================================================== +Key: Filename Content +======================================================== ===================================================== +"ICA orthogonalized mixing tsv": desc-ICAOrth_mixing.tsv Mixing matrix with rejected components orthogonalized + from accepted components +======================================================== ===================================================== + +If ``gscontrol`` includes 'gsr' +------------------------------- + +================================================================= ===================================================== +Key: Filename Content +================================================================= ===================================================== +"gs img": desc-globalSignal_map.nii.gz Spatial global signal +"global signal time series tsv": desc-globalSignal_timeseries.tsv Time series of global signal from optimally combined + data. +"has gs combined img": desc-optcomWithGlobalSignal_bold.nii.gz Optimally combined time series with global signal + retained. +"removed gs combined img": desc-optcomNoGlobalSignal_bold.nii.gz Optimally combined time series with global signal + removed. +================================================================= ===================================================== + +If ``gscontrol`` includes 'mir' +------------------------------- + +(Minimal intensity regression, which may help remove some T1 noise and +was an option in the MEICA v2.5 code, but never fully explained or evaluted in a publication) + +======================================================================================= ===================================================== +Key: Filename Content +======================================================================================= ===================================================== +"t1 like img": desc-T1likeEffect_min.nii.gz T1-like effect +"mir denoised img": desc-optcomMIRDenoised_bold.nii.gz Denoised time series after MIR +"ICA MIR mixing tsv": desc-ICAMIRDenoised_mixing.tsv ICA mixing matrix after MIR +"ICA accepted mir component weights img": desc-ICAAcceptedMIRDenoised_components.nii.gz high-kappa components after MIR +"ICA accepted mir denoised img": desc-optcomAcceptedMIRDenoised_bold.nii.gz high-kappa time series after MIR +======================================================================================= ===================================================== + +.. _classification-output-descriptions: + +********************************** +Classification output descriptions +********************************** TEDPCA and TEDICA use component tables to track relevant metrics, component classifications, and rationales behind classifications. -The component tables and additional information are stored as tsv and json files. -`A full description of these outputs are here`_. +The component tables and additional information are stored as tsv and json files, +labeled "ICA metrics" and "PCA metrics" in :ref:`standard-filename-outputs` This section +explains the classification codes those files in more detail. +:doc:`building_decision_trees` covers the full process, and not just the +descriptions of outputted files. -.. _A full description of these outputs are here: classification_output_descriptions.html + +TEDPCA codes +------------ + +In ``tedana`` PCA is used to reduce the number of dimensions (components) in the +dataset. Without this step, the number of components would be one less than +the number of volumes, many of those components would effectively be +Gaussian noise and ICA would not reliably converge. Standard methods for data +reduction use cost functions, like MDL, KIC, and AIC to estimate the variance +that is just noise and remove the lowest variance components under that +threshold. +By default, ``tedana`` uses AIC. +Of those three, AIC is the least agressive and will retain the most components. + +``Tedana`` includes additional `kundu` and `kundu-stabilize` approaches that +identify and remove components that don't contain T2* or S0 signal and are more +likely to be noise. If the `--tedpca kundu` option is used, the PCA_metrics tsv +file will include an accepted vs rejected classification column and also a +rationale column of codes documenting why a PCA component removed. If MDL, KIC, +or AIC are used then the classification column will exist, but will include +include the accepted components and the rationale column will contain n/a" +When kundu is used, these are brief explanations of the the rationale codes + +===== =============== ======================================================== +Code Classification Description +===== =============== ======================================================== +P001 rejected Low Rho, Kappa, and variance explained +P002 rejected Low variance explained +P003 rejected Kappa equals fmax +P004 rejected Rho equals fmax +P005 rejected Cumulative variance explained above 95% (only in + stabilized PCA decision tree) +P006 rejected Kappa below fmin (only in stabilized PCA decision tree) +P007 rejected Rho below fmin (only in stabilized PCA decision tree) +===== =============== ======================================================== + + +ICA Classification Outputs +-------------------------- + +The component table is stored in ``desc-tedana_metrics.tsv`` or +``tedana_metrics.tsv``. +Each row is a component number. +Each column is a metric that is calculated for each component. +Short descriptions of each column metric are in the output log, +``tedana_[date_time].tsv``, and the actual metric calculations are in +:mod:`~tedana.metrics.collect`. +The final two columns are ``classification`` and ``classification_tags``. +``classification`` should include **accepted** or **rejected** for every +component and **rejected** components are be removed through denoising. +``classification_tags`` provide more information on why +components received a specific classification. +Each component can receive more than one tag. +The following tags are included depending if ``--tree`` is "minimal", "kundu", +or if ``ica_reclassify`` is run. + +===================== ================ ======================================== +Tag Included in Tree Explanation +===================== ================ ======================================== +Likely BOLD minimal,kundu Accepted because likely to include some + BOLD signal +Unlikely BOLD minimal,kundu Rejected because likely to include a + lot of non-BOLD signal +Low variance minimal,kundu Accepted because too low variance to + lose a degree-of-freedom by rejecting +Less likely BOLD kundu Rejected based on some edge criteria + based on relative rankings of components +Accept borderline kundu Accepted based on some edge criteria + based on relative rankings of components +No provisional accept kundu Accepted because because kundu tree did + not find any components to consider + accepting so the conservative "failure" + case is accept everything rather than + rejecting everything +manual reclassify manual_classify Classification based on user input. If + done after automatic selection then + the preceding tag from automatic + selection is retained and this tag + notes the classification was manually + changed +===================== ================ ======================================== + +The decision tree is a list of nodes where the classification of each component +could change. The information on which nodes and how classifications changed is +in several places: + +- The information in the output log includes the name of each + node and the count of components that changed classification during execution. +- The same information is stored in the ``ICA decision tree`` json file + (see :ref:`output-filename-descriptions`) in the "output" field for each node. + That information is organized so that it can be used to generate a visual or + text-based summary of what happened when the decision tree was run on a + dataset. +- The ``ICA status table`` lists the classification status of each component + after each node was run. + This is particularly useful to trying to understand how a specific component + ended receiving its classification. ********************* @@ -41,7 +318,7 @@ You can also play around with `our demo`_. Report Structure -================ +---------------- The image below shows a representative report, which has two sections: a) the summary view, and b) the individual component view. @@ -136,7 +413,7 @@ component (selected in the summary view, see below). It includes three different Reports User Interactions -========================= +------------------------- As previously mentioned, all summary plots in the report allow user interactions. While the Kappa/Rho Scatter plot allows full user interaction (see the toolbar that accompanies the plot