diff --git a/docs/decoding.rst b/docs/decoding.rst index 23b67b7ad..4c222d434 100644 --- a/docs/decoding.rst +++ b/docs/decoding.rst @@ -70,7 +70,7 @@ The correlation-based decoding is implemented in NiMARE's `CorrelationDecoder` c decoder = CorrelationDecoder( frequency_threshold=0.001, meta_estimator=mkda.MKDAChi2, - target_image='z_desc-specificity', + target_image='z_desc-association', ) decoder.fit(ns_dset) decoding_results = decoder.transform('pain_map.nii.gz') @@ -140,7 +140,7 @@ of the database in a confusion matrix. For each label in the ontology, studies a four groups: selected and label-positive (:math:`S_{s+l+}`), selected and label-negative (:math:`S_{s+l-}`), unselected and label-positive (:math:`S_{s-l+}`), and unselected and label-negative (:math:`S_{s-l-}`). Each method then compares these groups in order to evaluate -both consistency and specificity of the relationship between the selection criteria and each label, +both uniformity and association of the relationship between the selection criteria and each label, which are evaluated in terms of both statistical significance and effect size. @@ -162,10 +162,10 @@ individual peak, and so weighting based on the number of foci would be inappropr This decoding method produces four outputs for each label. First, the distribution of studies in the sample with the label are compared to the distributions of other labels within the sample. -This consistency analysis produces both a measure of statistical significance (i.e., a p-value) +This uniformity analysis produces both a measure of statistical significance (i.e., a p-value) and a measure of effect size (i.e., the likelihood of being selected given the presence of the label). Next, the studies in the sample are compared to the studies in the rest of the database. -This specificity analysis produces a p-value and an effect size measure of the posterior +This association analysis produces a p-value and an effect size measure of the posterior probability of having the label given selection into the sample. A detailed algorithm description is presented below. @@ -261,17 +261,17 @@ those in a larger database, but, unlike the BrainMap method, does not take foci this reason, the Neurosynth method would likely be more appropriate for selection criteria not based on regions of interest (e.g., for characterizing meta-analytic groupings from a meta-analytic clustering analysis). However, the Neurosynth method requires user-provided -information that BrainMap does not. Namely, in order to estimate probabilities for the consistency -and specificity analyses with Bayes' Theorem, the Neurosynth method requires a prior probability of +information that BrainMap does not. Namely, in order to estimate probabilities for the uniformity +and association analyses with Bayes' Theorem, the Neurosynth method requires a prior probability of a given label. Typically, a value of 0.5 is used (i.e., the estimated probability that an individual is undergoing a given mental process described by a label, barring any evidence from neuroimaging data, is predicted to be 50%). This is, admittedly, a poor prediction, which means that probabilities estimated based on this prior are not likely to be accurate, though they may still serve as useful estimates of effect size for the analysis. -Like the BrainMap method, this method produces four outputs for each label. For the consistency +Like the BrainMap method, this method produces four outputs for each label. For the uniformity analysis, this method produces both a p-value and a conditional probability of selection given the -presence of the label and the prior probability of having the label. For the specificity analysis, +presence of the label and the prior probability of having the label. For the association analysis, the Neurosynth method produces both a p-value and a posterior probability of presence of the label given selection and the prior probability of having the label. A detailed algorithm description is presented below. diff --git a/examples/02_meta-analyses/01_plot_cbma.py b/examples/02_meta-analyses/01_plot_cbma.py index 84471f302..93cb6c403 100644 --- a/examples/02_meta-analyses/01_plot_cbma.py +++ b/examples/02_meta-analyses/01_plot_cbma.py @@ -84,13 +84,13 @@ cres = corr.transform(results) plot_stat_map( - results.get_map("z_desc-consistency"), + results.get_map("z_desc-uniformity"), draw_cross=False, cmap="RdBu_r", threshold=0.1, ) plot_stat_map( - cres.get_map("z_desc-consistencySize_level-cluster_corr-FWE_method-montecarlo"), + cres.get_map("z_desc-uniformitySize_level-cluster_corr-FWE_method-montecarlo"), draw_cross=False, cmap="RdBu_r", threshold=0.1, diff --git a/examples/02_meta-analyses/07_macm.py b/examples/02_meta-analyses/07_macm.py index 822d825ac..07b4a9d92 100644 --- a/examples/02_meta-analyses/07_macm.py +++ b/examples/02_meta-analyses/07_macm.py @@ -65,9 +65,9 @@ corr = FWECorrector(method="montecarlo", n_iters=10000) cres = corr.transform(results) -# We want the "specificity" map (2-way chi-square between sel and unsel) +# We want the "association" map (2-way chi-square between sel and unsel) plotting.plot_stat_map( - cres.get_map("z_desc-consistency_level-voxel_corr-FWE_method-montecarlo"), + cres.get_map("z_desc-uniformity_level-voxel_corr-FWE_method-montecarlo"), threshold=3.09, draw_cross=False, cmap="RdBu_r", diff --git a/examples/02_meta-analyses/11_plot_cbmr.py b/examples/02_meta-analyses/11_plot_cbmr.py index 798af4cf8..e3ac5e248 100644 --- a/examples/02_meta-analyses/11_plot_cbmr.py +++ b/examples/02_meta-analyses/11_plot_cbmr.py @@ -357,7 +357,7 @@ ############################################################################### # Now that we have done group comparison tests with the specified contrast matrix, -# we can plot the z-score maps indicating consistency in activation regions among +# we can plot the z-score maps indicating uniformity in activation regions among # all four groups. plot_stat_map( diff --git a/nimare/decode/continuous.py b/nimare/decode/continuous.py index 862cf8629..7d110e15e 100755 --- a/nimare/decode/continuous.py +++ b/nimare/decode/continuous.py @@ -171,7 +171,7 @@ def __init__( features=None, frequency_threshold=0.001, meta_estimator=None, - target_image="z_desc-specificity", + target_image="z_desc-association", n_cores=1, ): meta_estimator = ( diff --git a/nimare/decode/discrete.py b/nimare/decode/discrete.py index ffb5ee86f..d624134fc 100755 --- a/nimare/decode/discrete.py +++ b/nimare/decode/discrete.py @@ -325,7 +325,7 @@ def brainmap_decode( n_selected_term - np.mean(n_selected_term) ).ravel() # pylint: disable=no-member - # Two-way chi-square test for specificity of activation + # Two-way chi-square test for association of activation cells = np.array( [ [n_selected_term, n_selected_noterm], # pylint: disable=no-member @@ -605,14 +605,14 @@ def neurosynth_decode( prior = p_term # Significance testing - # One-way chi-square test for consistency of term frequency across terms + # One-way chi-square test for uniformity of term frequency across terms chi2_fi = one_way(n_selected_term, n_term) p_fi = special.chdtrc(1, chi2_fi) sign_fi = np.sign( n_selected_term - np.mean(n_selected_term) ).ravel() # pylint: disable=no-member - # Two-way chi-square test for specificity of activation + # Two-way chi-square test for association cells = np.array( [ [n_selected_term, n_selected_noterm], # pylint: disable=no-member diff --git a/nimare/diagnostics.py b/nimare/diagnostics.py index 6d70e120d..31774e5f0 100644 --- a/nimare/diagnostics.py +++ b/nimare/diagnostics.py @@ -310,9 +310,9 @@ def _transform(self, expid, label_map, sign, result): cluster_masker.fit(label_map) # CBMAs have "stat" maps, while most IBMAs have "est" maps. ALESubtraction has - # stat_desc-group1MinusGroup2" maps, while MKDAChi2 has "z_desc-specificity" maps. + # stat_desc-group1MinusGroup2" maps, while MKDAChi2 has "z_desc-association' maps. # Fisher's and Stouffer's only have "z" maps though. - target_value_keys = {"stat", "est", "stat_desc-group1MinusGroup2", "z_desc-specificity"} + target_value_keys = {"stat", "est", "stat_desc-group1MinusGroup2", "z_desc-association"} avail_value_keys = set(result.maps.keys()) union_value_keys = list(target_value_keys & avail_value_keys) target_value_map = union_value_keys[0] if union_value_keys else "z" diff --git a/nimare/meta/cbma/base.py b/nimare/meta/cbma/base.py index 3cae7f232..96cde25b1 100644 --- a/nimare/meta/cbma/base.py +++ b/nimare/meta/cbma/base.py @@ -372,7 +372,7 @@ def _p_to_summarystat(self, p, null_method=None): null_distribution /= np.max(null_distribution) null_distribution = np.squeeze(null_distribution) - # Desired bin is the first one _before_ the target p-value (for consistency + # Desired bin is the first one _before_ the target p-value (for uniformity # with the montecarlo null). ss_idx = np.maximum(0, np.where(null_distribution <= p)[0][0] - 1) ss = self.null_distributions_["histogram_bins"][ss_idx] @@ -382,7 +382,7 @@ def _p_to_summarystat(self, p, null_method=None): assert "histweights_corr-none_method-montecarlo" in self.null_distributions_.keys() hist_weights = self.null_distributions_["histweights_corr-none_method-montecarlo"] - # Desired bin is the first one _before_ the target p-value (for consistency + # Desired bin is the first one _before_ the target p-value (for uniformity # with the montecarlo null). ss_idx = np.maximum(0, np.where(hist_weights <= p)[0][0] - 1) ss = self.null_distributions_["histogram_bins"][ss_idx] diff --git a/nimare/meta/cbma/mkda.py b/nimare/meta/cbma/mkda.py index 806e772de..a62e73bb5 100644 --- a/nimare/meta/cbma/mkda.py +++ b/nimare/meta/cbma/mkda.py @@ -302,6 +302,11 @@ class MKDAChi2(PairwiseCBMAEstimator): .. versionchanged:: 0.2.1 + - Make `prior` parameter default to None, which controls if posterior probabilities + pFgA, pAgF_prior and pFgA_prior are calculated. This is useful because probability + maps are difficult to interpret and for speeding up the algorithm. + - Rename ``consistency`` to ``uniformity`` and ``specificity`` to ``association`` to match + Neurosynth's terminology - New parameters: ``memory`` and ``memory_level`` for memory caching. .. versionchanged:: 0.0.12 @@ -416,7 +421,7 @@ def _generate_description(self): "kernel. " f"{self.kernel_transformer._generate_description()} " "This analysis calculated several measures. " - "The first dataset was evaluated for consistency of activation via a one-way " + "The first dataset was evaluated for uniformity of activation via a one-way " "chi-square test. " f"The first input dataset included {self.inputs_['coordinates1'].shape[0]} foci from " f"{len(self.inputs_['id1'])} experiments. " @@ -476,18 +481,18 @@ def _fit(self, dataset1, dataset2): del n_mappables - # Conditional probabilities pAgF = n_selected_active_voxels / n_selected pAgU = n_unselected_active_voxels / n_unselected pFgA = pAgF * pF / pA del pF - # Recompute conditionals with uniform prior - pAgF_prior = self.prior * pAgF + (1 - self.prior) * pAgU - pFgA_prior = pAgF * self.prior / pAgF_prior + if self.prior: + # Recompute conditionals with uniform prior + pAgF_prior = self.prior * pAgF + (1 - self.prior) * pAgU + pFgA_prior = pAgF * self.prior / pAgF_prior - # One-way chi-square test for consistency of activation + # One-way chi-square test for uniformity of activation pAgF_chi2_vals = one_way(np.squeeze(n_selected_active_voxels), n_selected) pAgF_p_vals = chi2.sf(pAgF_chi2_vals, 1) pAgF_sign = np.sign(n_selected_active_voxels - np.mean(n_selected_active_voxels)) @@ -495,7 +500,7 @@ def _fit(self, dataset1, dataset2): del pAgF_sign - # Two-way chi-square for specificity of activation + # Two-way chi-square for association of activation cells = np.squeeze( np.array( [ @@ -523,20 +528,22 @@ def _fit(self, dataset1, dataset2): del pFgA_sign, pAgU maps = { + "z_desc-uniformity": pAgF_z, + "z_desc-association": pFgA_z, + "chi2_desc-uniformity": pAgF_chi2_vals, + "chi2_desc-association": pFgA_chi2_vals, + "p_desc-uniformity": pAgF_p_vals, + "p_desc-association": pFgA_p_vals, "prob_desc-A": pA, "prob_desc-AgF": pAgF, "prob_desc-FgA": pFgA, - ("prob_desc-AgF_given_pF=%0.2f" % self.prior): pAgF_prior, - ("prob_desc-FgA_given_pF=%0.2f" % self.prior): pFgA_prior, - "z_desc-consistency": pAgF_z, - "z_desc-specificity": pFgA_z, - "chi2_desc-consistency": pAgF_chi2_vals, - "chi2_desc-specificity": pFgA_chi2_vals, - "p_desc-consistency": pAgF_p_vals, - "p_desc-specificity": pFgA_p_vals, } - description = self._generate_description() + if self.prior: + maps["prob_desc-AgF_prior"] = pAgF_prior + maps["prob_desc-FgA_prior"] = pFgA_prior + + description = self._generate_description() return maps, {}, description def _run_fwe_permutation(self, iter_xyz1, iter_xyz2, iter_df1, iter_df2, conn, voxel_thresh): @@ -612,7 +619,7 @@ def _run_fwe_permutation(self, iter_xyz1, iter_xyz2, iter_df1, iter_df2, conn, v # pAgF = n_selected_active_voxels / n_selected # pAgU = n_unselected_active_voxels / n_unselected - # One-way chi-square test for consistency of activation + # One-way chi-square test for uniformity of activation pAgF_chi2_vals = one_way(np.squeeze(n_selected_active_voxels), n_selected) # Voxel-level inference @@ -624,7 +631,7 @@ def _run_fwe_permutation(self, iter_xyz1, iter_xyz2, iter_df1, iter_df2, conn, v pAgF_chi2_map, voxel_thresh, conn, tail="two" ) - # Two-way chi-square for specificity of activation + # Two-way chi-square for association of activation cells = np.squeeze( np.array( [ @@ -769,42 +776,42 @@ def correct_fwe_montecarlo(self, result, voxel_thresh=0.001, n_iters=5000, n_cor the correction procedure. The following arrays are generated by this method: - - ``p_desc-consistency_level-voxel``: Voxel-level FWE-corrected p-values from the - consistency/forward inference analysis. - - ``z_desc-consistency_level-voxel``: Voxel-level FWE-corrected z-values from the - consistency/forward inference analysis. - - ``logp_desc-consistency_level-voxel``: Voxel-level FWE-corrected -log10 p-values - from the consistency/forward inference analysis. - - ``p_desc-consistencyMass_level-cluster``: Cluster-level FWE-corrected p-values - from the consistency/forward inference analysis, using cluster mass. - - ``z_desc-consistencyMass_level-cluster``: Cluster-level FWE-corrected z-values - from the consistency/forward inference analysis, using cluster mass. - - ``logp_desc-consistencyMass_level-cluster``: Cluster-level FWE-corrected -log10 - p-values from the consistency/forward inference analysis, using cluster mass. - - ``p_desc-consistencySize_level-cluster``: Cluster-level FWE-corrected p-values - from the consistency/forward inference analysis, using cluster size. - - ``z_desc-consistencySize_level-cluster``: Cluster-level FWE-corrected z-values - from the consistency/forward inference analysis, using cluster size. - - ``logp_desc-consistencySize_level-cluster``: Cluster-level FWE-corrected -log10 - p-values from the consistency/forward inference analysis, using cluster size. - - ``p_desc-specificity_level-voxel``: Voxel-level FWE-corrected p-values from the - specificity/reverse inference analysis. - - ``z_desc-specificity_level-voxel``: Voxel-level FWE-corrected z-values from the - specificity/reverse inference analysis. - - ``logp_desc-specificity_level-voxel``: Voxel-level FWE-corrected -log10 p-values - from the specificity/reverse inference analysis. - - ``p_desc-specificityMass_level-cluster``: Cluster-level FWE-corrected p-values - from the specificity/reverse inference analysis, using cluster mass. - - ``z_desc-specificityMass_level-cluster``: Cluster-level FWE-corrected z-values - from the specificity/reverse inference analysis, using cluster mass. - - ``logp_desc-specificityMass_level-cluster``: Cluster-level FWE-corrected -log10 - p-values from the specificity/reverse inference analysis, using cluster mass. - - ``p_desc-specificitySize_level-cluster``: Cluster-level FWE-corrected p-values - from the specificity/reverse inference analysis, using cluster size. - - ``z_desc-specificitySize_level-cluster``: Cluster-level FWE-corrected z-values - from the specificity/reverse inference analysis, using cluster size. - - ``logp_desc-specificitySize_level-cluster``: Cluster-level FWE-corrected -log10 - p-values from the specificity/reverse inference analysis, using cluster size. + - ``p_desc-uniformity_level-voxel``: Voxel-level FWE-corrected p-values from the + uniformity/forward inference analysis. + - ``z_desc-uniformity_level-voxel``: Voxel-level FWE-corrected z-values from the + uniformity/forward inference analysis. + - ``logp_desc-uniformity_level-voxel``: Voxel-level FWE-corrected -log10 p-values + from the uniformity/forward inference analysis. + - ``p_desc-uniformityMass_level-cluster``: Cluster-level FWE-corrected p-values + from the uniformity/forward inference analysis, using cluster mass. + - ``z_desc-uniformityMass_level-cluster``: Cluster-level FWE-corrected z-values + from the uniformity/forward inference analysis, using cluster mass. + - ``logp_desc-uniformityMass_level-cluster``: Cluster-level FWE-corrected -log10 + p-values from the uniformity/forward inference analysis, using cluster mass. + - ``p_desc-uniformitySize_level-cluster``: Cluster-level FWE-corrected p-values + from the uniformity/forward inference analysis, using cluster size. + - ``z_desc-uniformitySize_level-cluster``: Cluster-level FWE-corrected z-values + from the uniformity/forward inference analysis, using cluster size. + - ``logp_desc-uniformitySize_level-cluster``: Cluster-level FWE-corrected -log10 + p-values from the uniformity/forward inference analysis, using cluster size. + - ``p_desc-association_level-voxel``: Voxel-level FWE-corrected p-values from the + association/reverse inference analysis. + - ``z_desc-association_level-voxel``: Voxel-level FWE-corrected z-values from the + association/reverse inference analysis. + - ``logp_desc-association_level-voxel``: Voxel-level FWE-corrected -log10 p-values + from the association/reverse inference analysis. + - ``p_desc-associationMass_level-cluster``: Cluster-level FWE-corrected p-values + from the association/reverse inference analysis, using cluster mass. + - ``z_desc-associationMass_level-cluster``: Cluster-level FWE-corrected z-values + from the association/reverse inference analysis, using cluster mass. + - ``logp_desc-associationMass_level-cluster``: Cluster-level FWE-corrected -log10 + p-values from the association/reverse inference analysis, using cluster mass. + - ``p_desc-associationSize_level-cluster``: Cluster-level FWE-corrected p-values + from the association/reverse inference analysis, using cluster size. + - ``z_desc-associationSize_level-cluster``: Cluster-level FWE-corrected z-values + from the association/reverse inference analysis, using cluster size. + - ``logp_desc-associationSize_level-cluster``: Cluster-level FWE-corrected -log10 + p-values from the association/reverse inference analysis, using cluster size. Notes ----- @@ -844,10 +851,10 @@ def correct_fwe_montecarlo(self, result, voxel_thresh=0.001, n_iters=5000, n_cor np.vstack(np.where(self.masker.mask_img.get_fdata())).T, self.masker.mask_img.affine, ) - pAgF_chi2_vals = result.get_map("chi2_desc-consistency", return_type="array") - pFgA_chi2_vals = result.get_map("chi2_desc-specificity", return_type="array") - pAgF_z_vals = result.get_map("z_desc-consistency", return_type="array") - pFgA_z_vals = result.get_map("z_desc-specificity", return_type="array") + pAgF_chi2_vals = result.get_map("chi2_desc-uniformity", return_type="array") + pFgA_chi2_vals = result.get_map("chi2_desc-association", return_type="array") + pAgF_z_vals = result.get_map("z_desc-uniformity", return_type="array") + pFgA_z_vals = result.get_map("z_desc-association", return_type="array") pAgF_sign = np.sign(pAgF_z_vals) pFgA_sign = np.sign(pFgA_z_vals) @@ -962,26 +969,26 @@ def correct_fwe_montecarlo(self, result, voxel_thresh=0.001, n_iters=5000, n_cor pFgA_logp_csfwe_vals[np.isinf(pFgA_logp_csfwe_vals)] = -np.log10(eps) maps = { - # Consistency analysis - "p_desc-consistency_level-voxel": pAgF_p_vfwe_vals, - "z_desc-consistency_level-voxel": pAgF_z_vfwe_vals, - "logp_desc-consistency_level-voxel": pAgF_logp_vfwe_vals, - "p_desc-consistencyMass_level-cluster": pAgF_p_cmfwe_vals, - "z_desc-consistencyMass_level-cluster": pAgF_z_cmfwe_vals, - "logp_desc-consistencyMass_level-cluster": pAgF_logp_cmfwe_vals, - "p_desc-consistencySize_level-cluster": pAgF_p_csfwe_vals, - "z_desc-consistencySize_level-cluster": pAgF_z_csfwe_vals, - "logp_desc-consistencySize_level-cluster": pAgF_logp_csfwe_vals, - # Specificity analysis - "p_desc-specificity_level-voxel": pFgA_p_vfwe_vals, - "z_desc-specificity_level-voxel": pFgA_z_vfwe_vals, - "logp_desc-specificity_level-voxel": pFgA_logp_vfwe_vals, - "p_desc-specificityMass_level-cluster": pFgA_p_cmfwe_vals, - "z_desc-specificityMass_level-cluster": pFgA_z_cmfwe_vals, - "logp_desc-specificityMass_level-cluster": pFgA_logp_cmfwe_vals, - "p_desc-specificitySize_level-cluster": pFgA_p_csfwe_vals, - "z_desc-specificitySize_level-cluster": pFgA_z_csfwe_vals, - "logp_desc-specificitySize_level-cluster": pFgA_logp_csfwe_vals, + # uniformity analysis + "p_desc-uniformity_level-voxel": pAgF_p_vfwe_vals, + "z_desc-uniformity_level-voxel": pAgF_z_vfwe_vals, + "logp_desc-uniformity_level-voxel": pAgF_logp_vfwe_vals, + "p_desc-uniformityMass_level-cluster": pAgF_p_cmfwe_vals, + "z_desc-uniformityMass_level-cluster": pAgF_z_cmfwe_vals, + "logp_desc-uniformityMass_level-cluster": pAgF_logp_cmfwe_vals, + "p_desc-uniformitySize_level-cluster": pAgF_p_csfwe_vals, + "z_desc-uniformitySize_level-cluster": pAgF_z_csfwe_vals, + "logp_desc-uniformitySize_level-cluster": pAgF_logp_csfwe_vals, + # association analysis + "p_desc-association_level-voxel": pFgA_p_vfwe_vals, + "z_desc-association_level-voxel": pFgA_z_vfwe_vals, + "logp_desc-association_level-voxel": pFgA_logp_vfwe_vals, + "p_desc-associationMass_level-cluster": pFgA_p_cmfwe_vals, + "z_desc-associationMass_level-cluster": pFgA_z_cmfwe_vals, + "logp_desc-associationMass_level-cluster": pFgA_logp_cmfwe_vals, + "p_desc-associationSize_level-cluster": pFgA_p_csfwe_vals, + "z_desc-associationSize_level-cluster": pFgA_z_csfwe_vals, + "logp_desc-associationSize_level-cluster": pFgA_logp_csfwe_vals, } description = "" @@ -1009,7 +1016,7 @@ def correct_fdr_indep(self, result, alpha=0.05): maps : :obj:`dict` Dictionary of 1D arrays corresponding to masked maps generated by the correction procedure. The following arrays are generated by - this method: 'z_desc-consistency_level-voxel' and 'z_desc-specificity_level-voxel'. + this method: 'z_desc-uniformity_level-voxel' and 'z_desc-association_level-voxel'. See Also -------- @@ -1022,10 +1029,10 @@ def correct_fdr_indep(self, result, alpha=0.05): >>> corrector = FDRCorrector(method='indep', alpha=0.05) >>> cresult = corrector.transform(result) """ - pAgF_p_vals = result.get_map("p_desc-consistency", return_type="array") - pFgA_p_vals = result.get_map("p_desc-specificity", return_type="array") - pAgF_z_vals = result.get_map("z_desc-consistency", return_type="array") - pFgA_z_vals = result.get_map("z_desc-specificity", return_type="array") + pAgF_p_vals = result.get_map("p_desc-uniformity", return_type="array") + pFgA_p_vals = result.get_map("p_desc-association", return_type="array") + pAgF_z_vals = result.get_map("z_desc-uniformity", return_type="array") + pFgA_z_vals = result.get_map("z_desc-association", return_type="array") pAgF_sign = np.sign(pAgF_z_vals) pFgA_sign = np.sign(pFgA_z_vals) pAgF_p_FDR = fdr(pAgF_p_vals, q=alpha, method="bh") @@ -1035,8 +1042,8 @@ def correct_fdr_indep(self, result, alpha=0.05): pFgA_z_FDR = p_to_z(pFgA_p_FDR, tail="two") * pFgA_sign maps = { - "z_desc-consistency_level-voxel": pAgF_z_FDR, - "z_desc-specificity_level-voxel": pFgA_z_FDR, + "z_desc-uniformity_level-voxel": pAgF_z_FDR, + "z_desc-association_level-voxel": pFgA_z_FDR, } description = "" diff --git a/nimare/tests/test_diagnostics.py b/nimare/tests/test_diagnostics.py index 7db86f40f..87d2c82a7 100644 --- a/nimare/tests/test_diagnostics.py +++ b/nimare/tests/test_diagnostics.py @@ -15,7 +15,7 @@ (cbma.ALE, "cbma", "onesample", "z", 1.65), (cbma.MKDADensity, "cbma", "onesample", "z", 1.65), (cbma.KDA, "cbma", "onesample", "z", 1.65), - (cbma.MKDAChi2, "cbma", "twosample", "z_desc-consistency", 1.65), + (cbma.MKDAChi2, "cbma", "twosample", "z_desc-uniformity", 1.65), (ibma.Fishers, "ibma", "onesample", "z", 0.1), (ibma.Stouffers, "ibma", "onesample", "z", 0.1), (ibma.WeightedLeastSquares, "ibma", "onesample", "z", 0.1), @@ -110,7 +110,7 @@ def test_jackknife_with_custom_masker_smoke(testdata_ibma): (cbma.ALE, "cbma", "onesample", "z"), (cbma.MKDADensity, "cbma", "onesample", "z"), (cbma.KDA, "cbma", "onesample", "z"), - (cbma.MKDAChi2, "cbma", "twosample", "z_desc-consistency"), + (cbma.MKDAChi2, "cbma", "twosample", "z_desc-uniformity"), (ibma.Stouffers, "ibma", "onesample", "z"), ], ) diff --git a/nimare/workflows/base.py b/nimare/workflows/base.py index 39a6025fa..e0b80d1a5 100644 --- a/nimare/workflows/base.py +++ b/nimare/workflows/base.py @@ -152,7 +152,7 @@ def _transform(self, result): if issubclass(type(result.estimator), PairwiseCBMAEstimator): modalities = ( - ["_desc-specificityMass", "_corr-"] + ["_desc-associationMass", "_corr-"] if corr_method == "montecarlo" else ["_desc-", "_corr-"] )