From ede360e847e586e0f6604f94db5cc0ff087c16f2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 18:50:59 +0000 Subject: [PATCH] Deployed 24e9eb5 with MkDocs version: 1.6.0 --- .nojekyll | 0 404.html | 647 ++ api/eval/binary_metrics/index.html | 2034 +++++ api/eval/continuous_metrics/index.html | 1451 ++++ api/eval/pseudo_metrics/index.html | 977 +++ api/model/BayesPRSModel/index.html | 3303 ++++++++ api/model/LDPredInf/index.html | 1437 ++++ api/model/VIPRS/index.html | 5932 +++++++++++++++ api/model/VIPRSMix/index.html | 2727 +++++++ .../gridsearch/HyperparameterGrid/index.html | 2048 +++++ .../HyperparameterSearch/index.html | 3478 +++++++++ api/model/gridsearch/VIPRSBMA/index.html | 1239 +++ api/model/gridsearch/VIPRSGrid/index.html | 3043 ++++++++ .../gridsearch/VIPRSGridSearch/index.html | 1357 ++++ api/overview/index.html | 866 +++ api/plot/diagnostics/index.html | 876 +++ api/utils/OptimizeResult/index.html | 1154 +++ api/utils/compute_utils/index.html | 1675 ++++ api/utils/data_utils/index.html | 865 +++ api/utils/exceptions/index.html | 772 ++ assets/_mkdocstrings.css | 119 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.ad660dcc.min.js | 29 + assets/javascripts/bundle.ad660dcc.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.b8dbb3d2.min.js | 42 + .../workers/search.b8dbb3d2.min.js.map | 7 + assets/stylesheets/main.6543a935.min.css | 1 + assets/stylesheets/main.6543a935.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + citation/index.html | 743 ++ commandline/overview/index.html | 818 ++ commandline/viprs_evaluate/index.html | 834 ++ commandline/viprs_fit/index.html | 888 +++ commandline/viprs_score/index.html | 832 ++ download_ld/index.html | 730 ++ faq/index.html | 730 ++ getting_started/index.html | 829 ++ index.html | 855 +++ installation/index.html | 985 +++ objects.inv | Bin 0 -> 1913 bytes search/search_index.json | 1 + sitemap.xml | 3 + sitemap.xml.gz | Bin 0 -> 127 bytes tutorials/overview/index.html | 730 ++ 79 files changed, 52268 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 api/eval/binary_metrics/index.html create mode 100644 api/eval/continuous_metrics/index.html create mode 100644 api/eval/pseudo_metrics/index.html create mode 100644 api/model/BayesPRSModel/index.html create mode 100644 api/model/LDPredInf/index.html create mode 100644 api/model/VIPRS/index.html create mode 100644 api/model/VIPRSMix/index.html create mode 100644 api/model/gridsearch/HyperparameterGrid/index.html create mode 100644 api/model/gridsearch/HyperparameterSearch/index.html create mode 100644 api/model/gridsearch/VIPRSBMA/index.html create mode 100644 api/model/gridsearch/VIPRSGrid/index.html create mode 100644 api/model/gridsearch/VIPRSGridSearch/index.html create mode 100644 api/overview/index.html create mode 100644 api/plot/diagnostics/index.html create mode 100644 api/utils/OptimizeResult/index.html create mode 100644 api/utils/compute_utils/index.html create mode 100644 api/utils/data_utils/index.html create mode 100644 api/utils/exceptions/index.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.ad660dcc.min.js create mode 100644 assets/javascripts/bundle.ad660dcc.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js.map create mode 100644 assets/stylesheets/main.6543a935.min.css create mode 100644 assets/stylesheets/main.6543a935.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 citation/index.html create mode 100644 commandline/overview/index.html create mode 100644 commandline/viprs_evaluate/index.html create mode 100644 commandline/viprs_fit/index.html create mode 100644 commandline/viprs_score/index.html create mode 100644 download_ld/index.html create mode 100644 faq/index.html create mode 100644 getting_started/index.html create mode 100644 index.html create mode 100644 installation/index.html create mode 100644 objects.inv create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 tutorials/overview/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..e1ee58b --- /dev/null +++ b/404.html @@ -0,0 +1,647 @@ + + + + + + + + + + + + + + + + + + + Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/eval/binary_metrics/index.html b/api/eval/binary_metrics/index.html new file mode 100644 index 0000000..481fbd6 --- /dev/null +++ b/api/eval/binary_metrics/index.html @@ -0,0 +1,2034 @@ + + + + + + + + + + + + + + + + + + + Binary metrics - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Binary metrics

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ avg_precision(true_val, pred_val) + +

+ + +
+ +

Compute the average precision between the PRS predictions and a binary.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a binary numpy vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
+ +
+ Source code in viprs/eval/binary_metrics.py +
38
+39
+40
+41
+42
+43
+44
+45
def avg_precision(true_val, pred_val):
+    """
+    Compute the average precision between the PRS predictions and a binary.
+
+    :param true_val: The response value or phenotype (a binary numpy vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    """
+    return average_precision_score(true_val, pred_val)
+
+
+
+ +
+ +
+ + +

+ cox_snell_r2(true_val, pred_val, covariates=None) + +

+ + +
+ +

Compute the Cox-Snell pseudo-R^2 between the PRS predictions and a binary phenotype. +If covariates are provided, we compute the incremental pseudo-R^2 by conditioning +on the covariates.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a binary numpy vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
covariates + +
+

A pandas table of covariates where the rows are ordered the same way as the predictions and response.

+
+
+ None +
+ +
+ Source code in viprs/eval/binary_metrics.py +
def cox_snell_r2(true_val, pred_val, covariates=None):
+    """
+    Compute the Cox-Snell pseudo-R^2 between the PRS predictions and a binary phenotype.
+    If covariates are provided, we compute the incremental pseudo-R^2 by conditioning
+    on the covariates.
+
+    :param true_val: The response value or phenotype (a binary numpy vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    :param covariates: A pandas table of covariates where the rows are ordered
+    the same way as the predictions and response.
+    """
+
+    if covariates is None:
+        covariates = pd.DataFrame(np.ones((true_val.shape[0], 1)), columns=['const'])
+    else:
+        covariates = sm.add_constant(covariates)
+
+    null_result = sm.Logit(true_val, covariates).fit(disp=0)
+    full_result = sm.Logit(true_val, covariates.assign(pred_val=pred_val)).fit(disp=0)
+    n = true_val.shape[0]
+
+    return 1. - np.exp(-2 * (full_result.llf - null_result.llf) / n)
+
+
+
+ +
+ +
+ + +

+ f1(true_val, pred_val) + +

+ + +
+ +

Compute the F1 score between the PRS predictions and a binary phenotype.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a binary numpy vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
+ +
+ Source code in viprs/eval/binary_metrics.py +
48
+49
+50
+51
+52
+53
+54
+55
def f1(true_val, pred_val):
+    """
+    Compute the F1 score between the PRS predictions and a binary phenotype.
+
+    :param true_val: The response value or phenotype (a binary numpy vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    """
+    return f1_score(true_val, pred_val)
+
+
+
+ +
+ +
+ + +

+ liability_logit_r2(true_val, pred_val, covariates=None, return_all_r2=False) + +

+ + +
+ +

Compute the R^2 between the PRS predictions and a binary phenotype on the liability +scale using the logit likelihood as outlined in Lee et al. (2012) Gene. Epi. +https://pubmed.ncbi.nlm.nih.gov/22714935/

+

The R^2 is defined as: +R2_{probit} = Var(pred) / (Var(pred) + pi^2 / 3)

+

Where Var(pred) is the variance of the predicted liability.

+

If covariates are provided, we compute the incremental pseudo-R^2 by conditioning +on the covariates.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a binary numpy vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
covariates + +
+

A pandas table of covariates where the rows are ordered the same way as the predictions and response.

+
+
+ None +
return_all_r2 + +
+

If True, return the null, full and incremental R2 values.

+
+
+ False +
+ +
+ Source code in viprs/eval/binary_metrics.py +
def liability_logit_r2(true_val, pred_val, covariates=None, return_all_r2=False):
+    """
+    Compute the R^2 between the PRS predictions and a binary phenotype on the liability
+    scale using the logit likelihood as outlined in Lee et al. (2012) Gene. Epi.
+    https://pubmed.ncbi.nlm.nih.gov/22714935/
+
+    The R^2 is defined as:
+    R2_{probit} = Var(pred) / (Var(pred) + pi^2 / 3)
+
+    Where Var(pred) is the variance of the predicted liability.
+
+    If covariates are provided, we compute the incremental pseudo-R^2 by conditioning
+    on the covariates.
+
+    :param true_val: The response value or phenotype (a binary numpy vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    :param covariates: A pandas table of covariates where the rows are ordered
+    the same way as the predictions and response.
+    :param return_all_r2: If True, return the null, full and incremental R2 values.
+    """
+
+    if covariates is None:
+        covariates = pd.DataFrame(np.ones((true_val.shape[0], 1)), columns=['const'])
+    else:
+        covariates = sm.add_constant(covariates)
+
+    null_result = sm.Probit(true_val, covariates).fit(disp=0)
+    full_result = sm.Probit(true_val, covariates.assign(pred_val=pred_val)).fit(disp=0)
+
+    null_var = np.var(null_result.predict())
+    null_r2 = null_var / (null_var + (np.pi**2 / 3))
+
+    full_var = np.var(full_result.predict())
+    full_r2 = full_var / (full_var + (np.pi**2 / 3))
+
+    if return_all_r2:
+        return {
+            'Null_R2': null_r2,
+            'Full_R2': full_r2,
+            'Incremental_R2': full_r2 - null_r2
+        }
+    else:
+        return full_r2 - null_r2
+
+
+
+ +
+ +
+ + +

+ liability_probit_r2(true_val, pred_val, covariates=None, return_all_r2=False) + +

+ + +
+ +

Compute the R^2 between the PRS predictions and a binary phenotype on the liability +scale using the probit likelihood as outlined in Lee et al. (2012) Gene. Epi. +https://pubmed.ncbi.nlm.nih.gov/22714935/

+

The R^2 is defined as: +R2_{probit} = Var(pred) / (Var(pred) + 1)

+

Where Var(pred) is the variance of the predicted liability.

+

If covariates are provided, we compute the incremental pseudo-R^2 by conditioning +on the covariates.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a binary numpy vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
covariates + +
+

A pandas table of covariates where the rows are ordered the same way as the predictions and response.

+
+
+ None +
return_all_r2 + +
+

If True, return the null, full and incremental R2 values.

+
+
+ False +
+ +
+ Source code in viprs/eval/binary_metrics.py +
def liability_probit_r2(true_val, pred_val, covariates=None, return_all_r2=False):
+    """
+    Compute the R^2 between the PRS predictions and a binary phenotype on the liability
+    scale using the probit likelihood as outlined in Lee et al. (2012) Gene. Epi.
+    https://pubmed.ncbi.nlm.nih.gov/22714935/
+
+    The R^2 is defined as:
+    R2_{probit} = Var(pred) / (Var(pred) + 1)
+
+    Where Var(pred) is the variance of the predicted liability.
+
+    If covariates are provided, we compute the incremental pseudo-R^2 by conditioning
+    on the covariates.
+
+    :param true_val: The response value or phenotype (a binary numpy vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    :param covariates: A pandas table of covariates where the rows are ordered
+    the same way as the predictions and response.
+    :param return_all_r2: If True, return the null, full and incremental R2 values.
+    """
+
+    if covariates is None:
+        covariates = pd.DataFrame(np.ones((true_val.shape[0], 1)), columns=['const'])
+    else:
+        covariates = sm.add_constant(covariates)
+
+    null_result = sm.Probit(true_val, covariates).fit(disp=0)
+    full_result = sm.Probit(true_val, covariates.assign(pred_val=pred_val)).fit(disp=0)
+
+    null_var = np.var(null_result.predict())
+    null_r2 = null_var / (null_var + 1.)
+
+    full_var = np.var(full_result.predict())
+    full_r2 = full_var / (full_var + 1.)
+
+    if return_all_r2:
+        return {
+            'Null_R2': null_r2,
+            'Full_R2': full_r2,
+            'Incremental_R2': full_r2 - null_r2
+        }
+    else:
+        return full_r2 - null_r2
+
+
+
+ +
+ +
+ + +

+ liability_r2(true_val, pred_val, covariates=None, return_all_r2=False) + +

+ + +
+ +

Compute the coefficient of determination (R^2) on the liability scale +according to Lee et al. (2012) Gene. Epi. +https://pubmed.ncbi.nlm.nih.gov/22714935/

+

The R^2 liability is defined as: +R_{liability}^2 = R2_{observed}K(K-1)/(z^2)

+

where R_{observed}^2 is the R^2 on the observed scale and K is the sample prevalence +and z is the "height of the normal density at the quantile for K".

+

If covariates are provided, we compute the incremental pseudo-R^2 by conditioning +on the covariates.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a binary numpy vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
covariates + +
+

A pandas table of covariates where the rows are ordered the same way as the predictions and response.

+
+
+ None +
return_all_r2 + +
+

If True, return the null, full and incremental R2 values.

+
+
+ False +
+ +
+ Source code in viprs/eval/binary_metrics.py +
def liability_r2(true_val, pred_val, covariates=None, return_all_r2=False):
+    """
+    Compute the coefficient of determination (R^2) on the liability scale
+    according to Lee et al. (2012) Gene. Epi.
+    https://pubmed.ncbi.nlm.nih.gov/22714935/
+
+    The R^2 liability is defined as:
+    R_{liability}^2 = R2_{observed}*K*(K-1)/(z^2)
+
+    where R_{observed}^2 is the R^2 on the observed scale and K is the sample prevalence
+    and z is the "height of the normal density at the quantile for K".
+
+    If covariates are provided, we compute the incremental pseudo-R^2 by conditioning
+    on the covariates.
+
+    :param true_val: The response value or phenotype (a binary numpy vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    :param covariates: A pandas table of covariates where the rows are ordered
+    the same way as the predictions and response.
+    :param return_all_r2: If True, return the null, full and incremental R2 values.
+    """
+
+    # First, obtain the incremental R2 on the observed scale:
+    r2_obs = incremental_r2(true_val, pred_val, covariates, return_all_r2=return_all_r2)
+
+    # Second, compute the prevalence and the standard normal quantile of the prevalence:
+
+    k = np.mean(true_val)
+    z2 = norm.pdf(norm.ppf(1.-k))**2
+    mult_factor = k*(1. - k) / z2
+
+    if return_all_r2:
+        return {
+            'Null_R2': r2_obs['Null_R2']*mult_factor,
+            'Full_R2': r2_obs['Full_R2']*mult_factor,
+            'Incremental_R2': r2_obs['Incremental_R2']*mult_factor
+        }
+    else:
+        return r2_obs * mult_factor
+
+
+
+ +
+ +
+ + +

+ mcfadden_r2(true_val, pred_val, covariates=None) + +

+ + +
+ +

Compute the McFadden pseudo-R^2 between the PRS predictions and a phenotype. +If covariates are provided, we compute the incremental pseudo-R^2 by conditioning +on the covariates.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a binary numpy vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
covariates + +
+

A pandas table of covariates where the rows are ordered the same way as the predictions and response.

+
+
+ None +
+ +
+ Source code in viprs/eval/binary_metrics.py +
58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
def mcfadden_r2(true_val, pred_val, covariates=None):
+    """
+    Compute the McFadden pseudo-R^2 between the PRS predictions and a phenotype.
+    If covariates are provided, we compute the incremental pseudo-R^2 by conditioning
+    on the covariates.
+
+
+    :param true_val: The response value or phenotype (a binary numpy vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    :param covariates: A pandas table of covariates where the rows are ordered
+    the same way as the predictions and response.
+    """
+
+    if covariates is None:
+        covariates = pd.DataFrame(np.ones((true_val.shape[0], 1)), columns=['const'])
+    else:
+        covariates = sm.add_constant(covariates)
+
+    null_result = sm.Logit(true_val, covariates).fit(disp=0)
+    full_result = sm.Logit(true_val, covariates.assign(pred_val=pred_val)).fit(disp=0)
+
+    return 1. - (full_result.llf / null_result.llf)
+
+
+
+ +
+ +
+ + +

+ nagelkerke_r2(true_val, pred_val, covariates=None) + +

+ + +
+ +

Compute the Nagelkerke pseudo-R^2 between the PRS predictions and a binary phenotype. +If covariates are provided, we compute the incremental pseudo-R^2 by conditioning +on the covariates.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a binary numpy vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
covariates + +
+

A pandas table of covariates where the rows are ordered the same way as the predictions and response.

+
+
+ None +
+ +
+ Source code in viprs/eval/binary_metrics.py +
def nagelkerke_r2(true_val, pred_val, covariates=None):
+    """
+    Compute the Nagelkerke pseudo-R^2 between the PRS predictions and a binary phenotype.
+    If covariates are provided, we compute the incremental pseudo-R^2 by conditioning
+    on the covariates.
+
+    :param true_val: The response value or phenotype (a binary numpy vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    :param covariates: A pandas table of covariates where the rows are ordered
+    the same way as the predictions and response.
+    """
+
+    if covariates is None:
+        covariates = pd.DataFrame(np.ones((true_val.shape[0], 1)), columns=['const'])
+    else:
+        covariates = sm.add_constant(covariates)
+
+    null_result = sm.Logit(true_val, covariates).fit(disp=0)
+    full_result = sm.Logit(true_val, covariates.assign(pred_val=pred_val)).fit(disp=0)
+    n = true_val.shape[0]
+
+    # First compute the Cox & Snell R2:
+    cox_snell = 1. - np.exp(-2 * (full_result.llf - null_result.llf) / n)
+
+    # Then scale it by the maximum possible R2:
+    return cox_snell / (1. - np.exp(2 * null_result.llf / n))
+
+
+
+ +
+ +
+ + +

+ pr_auc(true_val, pred_val) + +

+ + +
+ +

Compute the area under the Precision-Recall curve for a model +that maps from the PRS predictions to the binary phenotype.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a binary numpy vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
+ +
+ Source code in viprs/eval/binary_metrics.py +
26
+27
+28
+29
+30
+31
+32
+33
+34
+35
def pr_auc(true_val, pred_val):
+    """
+    Compute the area under the Precision-Recall curve for a model
+    that maps from the PRS predictions to the binary phenotype.
+
+    :param true_val: The response value or phenotype (a binary numpy vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    """
+    precision, recall, thresholds = precision_recall_curve(true_val, pred_val)
+    return auc(recall, precision)
+
+
+
+ +
+ +
+ + +

+ roc_auc(true_val, pred_val) + +

+ + +
+ +

Compute the area under the ROC (AUROC) for a model + that maps from the PRS predictions to the binary phenotype.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a numpy binary vector with 0s and 1s)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
+ +
+ Source code in viprs/eval/binary_metrics.py +
15
+16
+17
+18
+19
+20
+21
+22
+23
def roc_auc(true_val, pred_val):
+    """
+    Compute the area under the ROC (AUROC) for a model
+     that maps from the PRS predictions to the binary phenotype.
+
+    :param true_val: The response value or phenotype (a numpy binary vector with 0s and 1s)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    """
+    return roc_auc_score(true_val, pred_val)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/eval/continuous_metrics/index.html b/api/eval/continuous_metrics/index.html new file mode 100644 index 0000000..2a8acbb --- /dev/null +++ b/api/eval/continuous_metrics/index.html @@ -0,0 +1,1451 @@ + + + + + + + + + + + + + + + + + + + Continuous metrics - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Continuous metrics

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ incremental_r2(true_val, pred_val, covariates=None, return_all_r2=False) + +

+ + +
+ +

Compute the incremental prediction R^2 (proportion of phenotypic variance explained by the PRS). +This metric is computed by taking the R^2 of a model with covariates+PRS and subtracting from it +the R^2 of a model with covariates alone covariates.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a numpy vector)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
covariates + +
+

A pandas table of covariates where the rows are ordered the same way as the predictions and response.

+
+
+ None +
return_all_r2 + +
+

If True, return the R^2 values for the null and full models as well.

+
+
+ False +
+ +
+ Source code in viprs/eval/continuous_metrics.py +
def incremental_r2(true_val, pred_val, covariates=None, return_all_r2=False):
+    """
+    Compute the incremental prediction R^2 (proportion of phenotypic variance explained by the PRS).
+    This metric is computed by taking the R^2 of a model with covariates+PRS and subtracting from it
+    the R^2 of a model with covariates alone covariates.
+
+    :param true_val: The response value or phenotype (a numpy vector)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    :param covariates: A pandas table of covariates where the rows are ordered
+    the same way as the predictions and response.
+    :param return_all_r2: If True, return the R^2 values for the null and full models as well.
+    """
+
+    if covariates is None:
+        covariates = pd.DataFrame(np.ones((true_val.shape[0], 1)), columns=['const'])
+    else:
+        covariates = sm.add_constant(covariates)
+
+    null_result = sm.OLS(true_val, covariates).fit(disp=0)
+    full_result = sm.OLS(true_val, covariates.assign(pred_val=pred_val)).fit(disp=0)
+
+    if return_all_r2:
+        return {
+            'Null_R2': null_result.rsquared,
+            'Full_R2': full_result.rsquared,
+            'Incremental_R2': full_result.rsquared - null_result.rsquared
+        }
+    else:
+        return full_result.rsquared - null_result.rsquared
+
+
+
+ +
+ +
+ + +

+ mse(true_val, pred_val) + +

+ + +
+ +

Compute the mean squared error (MSE) between +the predictions or PRS pred_val and the phenotype true_val

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a numpy vector)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
+ +
+ Source code in viprs/eval/continuous_metrics.py +
61
+62
+63
+64
+65
+66
+67
+68
+69
+70
def mse(true_val, pred_val):
+    """
+    Compute the mean squared error (MSE) between
+    the predictions or PRS `pred_val` and the phenotype `true_val`
+
+    :param true_val: The response value or phenotype (a numpy vector)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    """
+
+    return np.mean((pred_val - true_val)**2)
+
+
+
+ +
+ +
+ + +

+ partial_correlation(true_val, pred_val, covariates) + +

+ + +
+ +

Compute the partial correlation between the phenotype true_val and the PRS pred_val +by conditioning on a set of covariates. This metric is computed by first residualizing the +phenotype and the PRS on a set of covariates and then computing the correlation coefficient +between the residuals.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a numpy vector)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
covariates + +
+

A pandas table of covariates where the rows are ordered the same way as the predictions and response.

+
+
+ required +
+ +
+ Source code in viprs/eval/continuous_metrics.py +
def partial_correlation(true_val, pred_val, covariates):
+    """
+    Compute the partial correlation between the phenotype `true_val` and the PRS `pred_val`
+    by conditioning on a set of covariates. This metric is computed by first residualizing the
+    phenotype and the PRS on a set of covariates and then computing the correlation coefficient
+    between the residuals.
+
+    :param true_val: The response value or phenotype (a numpy vector)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    :param covariates: A pandas table of covariates where the rows are ordered
+    the same way as the predictions and response.
+    """
+
+    true_response = sm.OLS(true_val, sm.add_constant(covariates)).fit(disp=0)
+    pred_response = sm.OLS(pred_val, sm.add_constant(covariates)).fit(disp=0)
+
+    return np.corrcoef(true_response.resid, pred_response.resid)[0, 1]
+
+
+
+ +
+ +
+ + +

+ pearson_r(true_val, pred_val) + +

+ + +
+ +

Compute the pearson correlation coefficient between +the predictions or PRS pred_val and the phenotype true_val

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a numpy vector)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
+ +
+ Source code in viprs/eval/continuous_metrics.py +
73
+74
+75
+76
+77
+78
+79
+80
+81
def pearson_r(true_val, pred_val):
+    """
+    Compute the pearson correlation coefficient between
+    the predictions or PRS `pred_val` and the phenotype `true_val`
+
+    :param true_val: The response value or phenotype (a numpy vector)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    """
+    return np.corrcoef(true_val, pred_val)[0, 1]
+
+
+
+ +
+ +
+ + +

+ r2(true_val, pred_val) + +

+ + +
+ +

Compute the R^2 (proportion of variance explained) between +the predictions or PRS pred_val and the phenotype true_val

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
true_val + +
+

The response value or phenotype (a numpy vector)

+
+
+ required +
pred_val + +
+

The predicted value or PRS (a numpy vector)

+
+
+ required +
+ +
+ Source code in viprs/eval/continuous_metrics.py +
49
+50
+51
+52
+53
+54
+55
+56
+57
+58
def r2(true_val, pred_val):
+    """
+    Compute the R^2 (proportion of variance explained) between
+    the predictions or PRS `pred_val` and the phenotype `true_val`
+
+    :param true_val: The response value or phenotype (a numpy vector)
+    :param pred_val: The predicted value or PRS (a numpy vector)
+    """
+    _, _, r_val, _, _ = stats.linregress(pred_val, true_val)
+    return r_val ** 2
+
+
+
+ +
+ +
+ + +

+ r2_stats(r2_val, n) + +

+ + +
+ +

Compute the confidence interval and p-value for a given R-squared (proportion of variance + explained) value.

+

This function and the formulas therein are based on the following paper +by Momin et al. 2023: https://doi.org/10.1016/j.ajhg.2023.01.004 as well as +the implementation in the R package PRSmix: +https://github.com/buutrg/PRSmix/blob/main/R/get_PRS_acc.R#L63

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
r2_val + +
+

The R^2 value to compute the confidence interval/p-value for.

+
+
+ required +
n + +
+

The sample size used to compute the R^2 value

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

A dictionary with the R^2 value, the lower and upper values of the confidence interval, the p-value, and the standard error of the R^2 metric.

+
+
+ +
+ Source code in viprs/eval/continuous_metrics.py +
 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
def r2_stats(r2_val, n):
+    """
+    Compute the confidence interval and p-value for a given R-squared (proportion of variance
+     explained) value.
+
+    This function and the formulas therein are based on the following paper
+    by Momin et al. 2023: https://doi.org/10.1016/j.ajhg.2023.01.004 as well as
+    the implementation in the R package `PRSmix`:
+    https://github.com/buutrg/PRSmix/blob/main/R/get_PRS_acc.R#L63
+
+    :param r2_val: The R^2 value to compute the confidence interval/p-value for.
+    :param n: The sample size used to compute the R^2 value
+
+    :return: A dictionary with the R^2 value, the lower and upper values of the confidence interval,
+    the p-value, and the standard error of the R^2 metric.
+
+    """
+
+    assert 0. < r2_val < 1., "R^2 value must be between 0 and 1."
+
+    # Compute the variance of the R^2 value:
+    r2_var = (4. * r2_val * (1. - r2_val) ** 2 * (n - 2) ** 2) / ((n ** 2 - 1) * (n + 3))
+
+    # Compute the standard errors for the R^2 value
+    # as well as the lower and upper values for
+    # the confidence interval:
+    r2_se = np.sqrt(r2_var)
+    lower_r2 = r2_val - 1.97 * r2_se
+    upper_r2 = r2_val + 1.97 * r2_se
+
+    # Compute the p-value assuming a Chi-squared distribution with 1 degree of freedom:
+    pval = stats.chi2.sf((r2_val / r2_se) ** 2, df=1)
+
+    return {
+        'R2': r2,
+        'Lower_R2': lower_r2,
+        'Upper_R2': upper_r2,
+        'P_Value': pval,
+        'SE': r2_se,
+    }
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/eval/pseudo_metrics/index.html b/api/eval/pseudo_metrics/index.html new file mode 100644 index 0000000..2072147 --- /dev/null +++ b/api/eval/pseudo_metrics/index.html @@ -0,0 +1,977 @@ + + + + + + + + + + + + + + + + + + + Pseudo metrics - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Pseudo metrics

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ pseudo_pearson_r(test_gdl, prs_beta_table) + +

+ + +
+ +

Perform pseudo-validation of the inferred effect sizes by comparing them to +standardized marginal betas from an independent validation set. Here, we follow the pseudo-validation +procedures outlined in Mak et al. (2017) and Yang and Zhou (2020), where +the correlation between the PRS and the phenotype in an independent validation +cohort can be approximated with:

+

Corr(PRS, y) ~= r'b / sqrt(b'Sb)

+

Where r is the standardized marginal beta from a validation set, +b is the posterior mean for the effect size of each variant and S is the LD matrix.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
test_gdl + +
+

An instance of GWADataLoader with the summary statistics table initialized.

+
+
+ required +
prs_beta_table + +
+

A pandas DataFrame with the PRS effect sizes. Must contain the columns: CHR, SNP, A1, A2, BETA.

+
+
+ required +
+ +
+ Source code in viprs/eval/pseudo_metrics.py +
def pseudo_pearson_r(test_gdl, prs_beta_table):
+    """
+    Perform pseudo-validation of the inferred effect sizes by comparing them to
+    standardized marginal betas from an independent validation set. Here, we follow the pseudo-validation
+    procedures outlined in Mak et al. (2017) and Yang and Zhou (2020), where
+    the correlation between the PRS and the phenotype in an independent validation
+    cohort can be approximated with:
+
+    Corr(PRS, y) ~= r'b / sqrt(b'Sb)
+
+    Where `r` is the standardized marginal beta from a validation set,
+    `b` is the posterior mean for the effect size of each variant and `S` is the LD matrix.
+
+    :param test_gdl: An instance of `GWADataLoader` with the summary statistics table initialized.
+    :param prs_beta_table: A pandas DataFrame with the PRS effect sizes. Must contain
+    the columns: CHR, SNP, A1, A2, BETA.
+    """
+
+    std_beta, prs_beta, q = _match_variant_stats(test_gdl, prs_beta_table)
+
+    rb = np.sum((prs_beta.T * std_beta).T, axis=0)
+    bsb = np.sum(prs_beta * q, axis=0)
+
+    return rb / np.sqrt(bsb)
+
+
+
+ +
+ +
+ + +

+ pseudo_r2(test_gdl, prs_beta_table) + +

+ + +
+ +

Compute the R-Squared metric (proportion of variance explained) for a given +PRS using standardized marginal betas from an independent test set. +Here, we follow the pseudo-validation procedures outlined in Mak et al. (2017) and +Yang and Zhou (2020), where the proportion of phenotypic variance explained by the PRS +in an independent validation cohort can be approximated with:

+

R2(PRS, y) ~= 2*r'b - b'Sb

+

Where r is the standardized marginal beta from a validation/test set, +b is the posterior mean for the effect size of each variant and S is the LD matrix.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
test_gdl + +
+

An instance of GWADataLoader with the summary statistics table initialized.

+
+
+ required +
prs_beta_table + +
+

A pandas DataFrame with the PRS effect sizes. Must contain the columns: CHR, SNP, A1, A2, BETA.

+
+
+ required +
+ +
+ Source code in viprs/eval/pseudo_metrics.py +
74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
def pseudo_r2(test_gdl, prs_beta_table):
+    """
+    Compute the R-Squared metric (proportion of variance explained) for a given
+    PRS using standardized marginal betas from an independent test set.
+    Here, we follow the pseudo-validation procedures outlined in Mak et al. (2017) and
+    Yang and Zhou (2020), where the proportion of phenotypic variance explained by the PRS
+    in an independent validation cohort can be approximated with:
+
+    R2(PRS, y) ~= 2*r'b - b'Sb
+
+    Where `r` is the standardized marginal beta from a validation/test set,
+    `b` is the posterior mean for the effect size of each variant and `S` is the LD matrix.
+
+    :param test_gdl: An instance of `GWADataLoader` with the summary statistics table initialized.
+    :param prs_beta_table: A pandas DataFrame with the PRS effect sizes. Must contain
+    the columns: CHR, SNP, A1, A2, BETA.
+    """
+
+    std_beta, prs_beta, q = _match_variant_stats(test_gdl, prs_beta_table)
+
+    rb = np.sum((prs_beta.T * std_beta).T, axis=0)
+    bsb = np.sum(prs_beta*q, axis=0)
+
+    return 2*rb - bsb
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/model/BayesPRSModel/index.html b/api/model/BayesPRSModel/index.html new file mode 100644 index 0000000..3d420f3 --- /dev/null +++ b/api/model/BayesPRSModel/index.html @@ -0,0 +1,3303 @@ + + + + + + + + + + + + + + + + + + + BayesPRSModel - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

BayesPRSModel

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ BayesPRSModel + + +

+ + +
+ + +

A base class for Bayesian PRS models. This class defines the basic structure and methods +that are common to most Bayesian PRS models. Specifically, this class provides methods and interfaces +for initialization, harmonization, prediction, and fitting of Bayesian PRS models.

+

The class is generic is designed to be inherited and extended by +specific Bayesian PRS models, such as LDPred and VIPRS.

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
gdl + +
+

A GWADataLoader object containing harmonized GWAS summary statistics and Linkage-Disequilibrium (LD) matrices.

+
+
Nj + +
+

A dictionary where keys are chromosomes and values are the sample sizes per variant.

+
+
shapes + +
+

A dictionary where keys are chromosomes and values are the shapes of the variant arrays (e.g. the number of variants per chromosome).

+
+
_sample_size + +
+

The average per-SNP sample size.

+
+
pip + +
+

The posterior inclusion probability.

+
+
post_mean_beta + +
+

The posterior mean for the effect sizes.

+
+
post_var_beta + +
+

The posterior variance for the effect sizes.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
class BayesPRSModel:
+    """
+    A base class for Bayesian PRS models. This class defines the basic structure and methods
+    that are common to most Bayesian PRS models. Specifically, this class provides methods and interfaces
+    for initialization, harmonization, prediction, and fitting of Bayesian PRS models.
+
+    The class is generic is designed to be inherited and extended by
+    specific Bayesian PRS models, such as `LDPred` and `VIPRS`.
+
+    :ivar gdl: A GWADataLoader object containing harmonized GWAS summary statistics and
+    Linkage-Disequilibrium (LD) matrices.
+    :ivar Nj: A dictionary where keys are chromosomes and values are the sample sizes per variant.
+    :ivar shapes: A dictionary where keys are chromosomes and values are the shapes of the variant arrays
+    (e.g. the number of variants per chromosome).
+    :ivar _sample_size: The average per-SNP sample size.
+    :ivar pip: The posterior inclusion probability.
+    :ivar post_mean_beta: The posterior mean for the effect sizes.
+    :ivar post_var_beta: The posterior variance for the effect sizes.
+    """
+
+    def __init__(self, gdl):
+        """
+        Initialize the Bayesian PRS model.
+        :param gdl: An instance of `GWADataLoader`.
+        """
+
+        self.gdl = gdl
+
+        # Sample size per SNP:
+        try:
+            self.Nj = {c: ss.n_per_snp.astype(float) for c, ss in gdl.sumstats_table.items()}
+        except AttributeError:
+            # If not provided, use the overall sample size:
+            self.Nj = {c: np.repeat(gdl.n, c_size).astype(float) for c, c_size in gdl.shapes.items()}
+
+        self.shapes = self.gdl.shapes.copy()
+
+        # Determine the overall sample size:
+        self._sample_size = dict_mean(self.Nj)
+
+        # Inferred model parameters:
+        self.pip = None  # Posterior inclusion probability
+        self.post_mean_beta = None  # The posterior mean for the effect sizes
+        self.post_var_beta = None  # The posterior variance for the effect sizes
+
+    @property
+    def chromosomes(self):
+        """
+        :return: The list of chromosomes that are included in the BayesPRSModel
+        """
+        return sorted(list(self.shapes.keys()))
+
+    @property
+    def m(self) -> int:
+        """
+
+        !!! seealso "See Also"
+            * [n_snps][viprs.model.BayesPRSModel.BayesPRSModel.n_snps]
+
+        :return: The number of variants in the model.
+        """
+        return self.gdl.m
+
+    @property
+    def n(self) -> int:
+        """
+        :return: The number of samples in the model. If not available, average the per-SNP
+        sample sizes.
+        """
+        return self._sample_size
+
+    @property
+    def n_snps(self) -> int:
+        """
+        !!! seealso "See Also"
+            * [m][viprs.model.BayesPRSModel.BayesPRSModel.m]
+
+        :return: The number of SNPs in the model.
+        """
+        return self.m
+
+    def fit(self, *args, **kwargs):
+        """
+        A genetic method to fit the Bayesian PRS model. This method should be implemented by the
+        specific Bayesian PRS model.
+        :raises NotImplementedError: If the method is not implemented in the child class.
+        """
+        raise NotImplementedError
+
+    def get_proportion_causal(self):
+        """
+        A generic method to get an estimate of the proportion of causal variants.
+        :raises NotImplementedError: If the method is not implemented in the child class.
+        """
+        raise NotImplementedError
+
+    def get_heritability(self):
+        """
+        A generic method to get an estimate of the heritability, or proportion of variance explained by SNPs.
+        :raises NotImplementedError: If the method is not implemented in the child class.
+        """
+        raise NotImplementedError
+
+    def get_pip(self):
+        """
+        :return: The posterior inclusion probability for each variant in the model.
+        """
+        return self.pip
+
+    def get_posterior_mean_beta(self):
+        """
+        :return: The posterior mean of the effect sizes (BETA) for each variant in the model.
+        """
+        return self.post_mean_beta
+
+    def get_posterior_variance_beta(self):
+        """
+        :return: The posterior variance of the effect sizes (BETA) for each variant in the model.
+        """
+        return self.post_var_beta
+
+    def predict(self, test_gdl=None):
+        """
+        Given the inferred effect sizes, predict the phenotype for the training samples in
+        the GWADataLoader object or new test samples. If `test_gdl` is not provided, genotypes
+        from training samples will be used (if available).
+
+        :param test_gdl: A GWADataLoader object containing genotype data for new test samples.
+        :raises ValueError: If the posterior means for BETA are not set. AssertionError if the GWADataLoader object
+        does not contain genotype data.
+        """
+
+        if self.post_mean_beta is None:
+            raise ValueError("The posterior means for BETA are not set. Call `.fit()` first.")
+
+        if test_gdl is None:
+            assert self.gdl.genotype is not None, "The GWADataLoader object must contain genotype data."
+            test_gdl = self.gdl
+            post_mean_beta = self.post_mean_beta
+        else:
+            _, post_mean_beta, _ = self.harmonize_data(gdl=test_gdl)
+
+        return test_gdl.predict(post_mean_beta)
+
+    def harmonize_data(self, gdl=None, parameter_table=None):
+        """
+        Harmonize the inferred effect sizes with a new GWADataLoader object. This method is useful
+        when the user wants to predict on new samples or when the effect sizes are inferred from a
+        different set of samples. The method aligns the effect sizes with the SNP table in the
+        GWADataLoader object.
+
+        :param gdl: An instance of `GWADataLoader` object.
+        :param parameter_table: A `pandas` DataFrame of variant effect sizes.
+
+        :return: A tuple of the harmonized posterior inclusion probability, posterior mean for the effect sizes,
+        and posterior variance for the effect sizes.
+
+        """
+
+        if gdl is None and parameter_table is None:
+            return
+
+        if gdl is None:
+            gdl = self.gdl
+
+        if parameter_table is None:
+            parameter_table = self.to_table(per_chromosome=True)
+        else:
+            parameter_table = {c: parameter_table.loc[parameter_table['CHR'] == c, ]
+                               for c in parameter_table['CHR'].unique()}
+
+        snp_tables = gdl.to_snp_table(col_subset=['SNP', 'A1', 'A2'],
+                                      per_chromosome=True)
+
+        pip = {}
+        post_mean_beta = {}
+        post_var_beta = {}
+
+        common_chroms = sorted(list(set(snp_tables.keys()).intersection(set(parameter_table.keys()))))
+
+        for c in common_chroms:
+
+            try:
+                post_mean_cols = expand_column_names('BETA', self.post_mean_beta[c].shape)
+                if isinstance(post_mean_cols, str):
+                    post_mean_cols = [post_mean_cols]
+
+                pip_cols = expand_column_names('PIP', self.post_mean_beta[c].shape)
+                if isinstance(pip_cols, str):
+                    pip_cols = [pip_cols]
+
+                post_var_cols = expand_column_names('VAR_BETA', self.post_mean_beta[c].shape)
+                if isinstance(post_var_cols, str):
+                    post_var_cols = [post_var_cols]
+
+            except (TypeError, KeyError):
+                pip_cols = [col for col in parameter_table[c].columns if 'PIP' in col]
+                post_var_cols = [col for col in parameter_table[c].columns if 'VAR_BETA' in col]
+                post_mean_cols = [col for col in parameter_table[c].columns
+                                  if 'BETA' in col and col not in post_var_cols]
+
+            # Merge the effect table with the GDL SNP table:
+            c_df = merge_snp_tables(snp_tables[c], parameter_table[c], how='left',
+                                    signed_statistics=post_mean_cols)
+
+            if len(c_df) < len(snp_tables[c]):
+                raise ValueError("The parameter table could not aligned with the reference SNP table. This may due to "
+                                 "conflicts/errors in use of reference vs. alternative alleles.")
+
+            # Obtain the values for the posterior mean:
+            c_df[post_mean_cols] = c_df[post_mean_cols].fillna(0.)
+            post_mean_beta[c] = c_df[post_mean_cols].values
+
+            # Obtain the values for the posterior inclusion probability:
+            if len(set(pip_cols).intersection(set(c_df.columns))) > 0:
+                c_df[pip_cols] = c_df[pip_cols].fillna(0.)
+                pip[c] = c_df[pip_cols].values
+
+            # Obtain the values for the posterior variance:
+            if len(set(post_var_cols).intersection(set(c_df.columns))) > 0:
+                c_df[post_var_cols] = c_df[post_var_cols].fillna(0.)
+                post_var_beta[c] = c_df[post_var_cols].values
+
+        if len(pip) < 1:
+            pip = None
+
+        if len(post_var_beta) < 1:
+            post_var_beta = None
+
+        return pip, post_mean_beta, post_var_beta
+
+    def to_table(self, col_subset=('CHR', 'SNP', 'POS', 'A1', 'A2'), per_chromosome=False):
+        """
+        Output the posterior estimates for the effect sizes to a pandas dataframe.
+        :param col_subset: The subset of columns to include in the tables (in addition to the effect sizes).
+        :param per_chromosome: If True, return a separate table for each chromosome.
+
+        :return: A pandas Dataframe with the posterior estimates for the effect sizes.
+        """
+
+        if self.post_mean_beta is None:
+            raise Exception("The posterior means for BETA are not set. Call `.fit()` first.")
+
+        tables = self.gdl.to_snp_table(col_subset=col_subset, per_chromosome=True)
+
+        for c in self.chromosomes:
+
+            tables[c][expand_column_names('BETA', self.post_mean_beta[c].shape)] = self.post_mean_beta[c]
+
+            if self.pip is not None:
+                tables[c][expand_column_names('PIP', self.pip[c].shape)] = self.pip[c]
+
+            if self.post_var_beta is not None:
+                tables[c][expand_column_names('VAR_BETA', self.post_var_beta[c].shape)] = self.post_var_beta[c]
+
+        if per_chromosome:
+            return tables
+        else:
+            return pd.concat([tables[c] for c in self.chromosomes])
+
+    def pseudo_validate(self, test_gdl, metric='pearson_correlation'):
+        """
+        Evaluate the prediction accuracy of the inferred PRS using external GWAS summary statistics.
+
+        :param test_gdl: A `GWADataLoader` object with the external GWAS summary statistics and LD matrix information.
+        :param metric: The metric to use for evaluation. Options: 'r2' or 'pearson_correlation'.
+
+        :return: The pseudo-validation metric.
+        """
+
+        from ..eval.pseudo_metrics import pseudo_r2, pseudo_pearson_r
+
+        metric = metric.lower()
+
+        assert self.post_mean_beta is not None, "The posterior means for BETA are not set. Call `.fit()` first."
+
+        if metric in ('pearson_correlation', 'corr', 'r'):
+            return pseudo_pearson_r(test_gdl, self.to_table(per_chromosome=False))
+        elif metric == 'r2':
+            return pseudo_r2(test_gdl, self.to_table(per_chromosome=False))
+        else:
+            raise KeyError(f"Pseudo validation metric ({metric}) not recognized. "
+                           f"Options are: 'r2' or 'pearson_correlation'.")
+
+    def set_model_parameters(self, parameter_table):
+        """
+        Parses a pandas dataframe with model parameters and assigns them 
+        to the corresponding class attributes. 
+
+        For example: 
+            * Columns with `BETA`, will be assigned to `self.post_mean_beta`.
+            * Columns with `PIP` will be assigned to `self.pip`.
+            * Columns with `VAR_BETA`, will be assigned to `self.post_var_beta`.
+
+        :param parameter_table: A pandas table or dataframe.
+        """
+
+        self.pip, self.post_mean_beta, self.post_var_beta = self.harmonize_data(parameter_table=parameter_table)
+
+    def read_inferred_parameters(self, f_names, sep=r"\s+"):
+        """
+        Read a file with the inferred parameters.
+        :param f_names: A path (or list of paths) to the file with the effect sizes.
+        :param sep: The delimiter for the file(s).
+        """
+
+        if isinstance(f_names, str):
+            f_names = [f_names]
+
+        param_table = []
+
+        for f_name in f_names:
+            param_table.append(pd.read_csv(f_name, sep=sep))
+
+        if len(param_table) > 0:
+            param_table = pd.concat(param_table)
+            self.set_model_parameters(param_table)
+        else:
+            raise FileNotFoundError
+
+    def write_inferred_parameters(self, f_name, per_chromosome=False, sep="\t"):
+        """
+        A convenience method to write the inferred posterior for the effect sizes to file.
+
+        TODO:
+            * Support outputting scoring files compatible with PGS catalog format:
+            https://www.pgscatalog.org/downloads/#dl_scoring_files
+
+        :param f_name: The filename (or directory) where to write the effect sizes
+        :param per_chromosome: If True, write a file for each chromosome separately.
+        :param sep: The delimiter for the file (tab by default).
+        """
+
+        tables = self.to_table(per_chromosome=per_chromosome)
+
+        if '.fit' not in f_name:
+            ext = '.fit'
+        else:
+            ext = ''
+
+        if per_chromosome:
+            for c, tab in tables.items():
+                try:
+                    tab.to_csv(osp.join(f_name, f'chr_{c}.fit'), sep=sep, index=False)
+                except Exception as e:
+                    raise e
+        else:
+            try:
+                tables.to_csv(f_name + ext, sep=sep, index=False)
+            except Exception as e:
+                raise e
+
+
+ + + +
+ + + + + + + +
+ + + +

+ chromosomes + + + property + + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The list of chromosomes that are included in the BayesPRSModel

+
+
+
+ +
+ +
+ + + +

+ m: int + + + property + + +

+ + +
+ +
+

See Also

+ +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ int + +
+

The number of variants in the model.

+
+
+
+ +
+ +
+ + + +

+ n: int + + + property + + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ int + +
+

The number of samples in the model. If not available, average the per-SNP sample sizes.

+
+
+
+ +
+ +
+ + + +

+ n_snps: int + + + property + + +

+ + +
+ +
+

See Also

+
    +
  • m
  • +
+
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ int + +
+

The number of SNPs in the model.

+
+
+
+ +
+ + + +
+ + +

+ __init__(gdl) + +

+ + +
+ +

Initialize the Bayesian PRS model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

An instance of GWADataLoader.

+
+
+ required +
+ +
+ Source code in viprs/model/BayesPRSModel.py +
29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
def __init__(self, gdl):
+    """
+    Initialize the Bayesian PRS model.
+    :param gdl: An instance of `GWADataLoader`.
+    """
+
+    self.gdl = gdl
+
+    # Sample size per SNP:
+    try:
+        self.Nj = {c: ss.n_per_snp.astype(float) for c, ss in gdl.sumstats_table.items()}
+    except AttributeError:
+        # If not provided, use the overall sample size:
+        self.Nj = {c: np.repeat(gdl.n, c_size).astype(float) for c, c_size in gdl.shapes.items()}
+
+    self.shapes = self.gdl.shapes.copy()
+
+    # Determine the overall sample size:
+    self._sample_size = dict_mean(self.Nj)
+
+    # Inferred model parameters:
+    self.pip = None  # Posterior inclusion probability
+    self.post_mean_beta = None  # The posterior mean for the effect sizes
+    self.post_var_beta = None  # The posterior variance for the effect sizes
+
+
+
+ +
+ +
+ + +

+ fit(*args, **kwargs) + +

+ + +
+ +

A genetic method to fit the Bayesian PRS model. This method should be implemented by the +specific Bayesian PRS model.

+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ NotImplementedError + +
+

If the method is not implemented in the child class.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
90
+91
+92
+93
+94
+95
+96
def fit(self, *args, **kwargs):
+    """
+    A genetic method to fit the Bayesian PRS model. This method should be implemented by the
+    specific Bayesian PRS model.
+    :raises NotImplementedError: If the method is not implemented in the child class.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ get_heritability() + +

+ + +
+ +

A generic method to get an estimate of the heritability, or proportion of variance explained by SNPs.

+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ NotImplementedError + +
+

If the method is not implemented in the child class.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def get_heritability(self):
+    """
+    A generic method to get an estimate of the heritability, or proportion of variance explained by SNPs.
+    :raises NotImplementedError: If the method is not implemented in the child class.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ get_pip() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The posterior inclusion probability for each variant in the model.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def get_pip(self):
+    """
+    :return: The posterior inclusion probability for each variant in the model.
+    """
+    return self.pip
+
+
+
+ +
+ +
+ + +

+ get_posterior_mean_beta() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The posterior mean of the effect sizes (BETA) for each variant in the model.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def get_posterior_mean_beta(self):
+    """
+    :return: The posterior mean of the effect sizes (BETA) for each variant in the model.
+    """
+    return self.post_mean_beta
+
+
+
+ +
+ +
+ + +

+ get_posterior_variance_beta() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The posterior variance of the effect sizes (BETA) for each variant in the model.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def get_posterior_variance_beta(self):
+    """
+    :return: The posterior variance of the effect sizes (BETA) for each variant in the model.
+    """
+    return self.post_var_beta
+
+
+
+ +
+ +
+ + +

+ get_proportion_causal() + +

+ + +
+ +

A generic method to get an estimate of the proportion of causal variants.

+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ NotImplementedError + +
+

If the method is not implemented in the child class.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def get_proportion_causal(self):
+    """
+    A generic method to get an estimate of the proportion of causal variants.
+    :raises NotImplementedError: If the method is not implemented in the child class.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ harmonize_data(gdl=None, parameter_table=None) + +

+ + +
+ +

Harmonize the inferred effect sizes with a new GWADataLoader object. This method is useful +when the user wants to predict on new samples or when the effect sizes are inferred from a +different set of samples. The method aligns the effect sizes with the SNP table in the +GWADataLoader object.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

An instance of GWADataLoader object.

+
+
+ None +
parameter_table + +
+

A pandas DataFrame of variant effect sizes.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

A tuple of the harmonized posterior inclusion probability, posterior mean for the effect sizes, and posterior variance for the effect sizes.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def harmonize_data(self, gdl=None, parameter_table=None):
+    """
+    Harmonize the inferred effect sizes with a new GWADataLoader object. This method is useful
+    when the user wants to predict on new samples or when the effect sizes are inferred from a
+    different set of samples. The method aligns the effect sizes with the SNP table in the
+    GWADataLoader object.
+
+    :param gdl: An instance of `GWADataLoader` object.
+    :param parameter_table: A `pandas` DataFrame of variant effect sizes.
+
+    :return: A tuple of the harmonized posterior inclusion probability, posterior mean for the effect sizes,
+    and posterior variance for the effect sizes.
+
+    """
+
+    if gdl is None and parameter_table is None:
+        return
+
+    if gdl is None:
+        gdl = self.gdl
+
+    if parameter_table is None:
+        parameter_table = self.to_table(per_chromosome=True)
+    else:
+        parameter_table = {c: parameter_table.loc[parameter_table['CHR'] == c, ]
+                           for c in parameter_table['CHR'].unique()}
+
+    snp_tables = gdl.to_snp_table(col_subset=['SNP', 'A1', 'A2'],
+                                  per_chromosome=True)
+
+    pip = {}
+    post_mean_beta = {}
+    post_var_beta = {}
+
+    common_chroms = sorted(list(set(snp_tables.keys()).intersection(set(parameter_table.keys()))))
+
+    for c in common_chroms:
+
+        try:
+            post_mean_cols = expand_column_names('BETA', self.post_mean_beta[c].shape)
+            if isinstance(post_mean_cols, str):
+                post_mean_cols = [post_mean_cols]
+
+            pip_cols = expand_column_names('PIP', self.post_mean_beta[c].shape)
+            if isinstance(pip_cols, str):
+                pip_cols = [pip_cols]
+
+            post_var_cols = expand_column_names('VAR_BETA', self.post_mean_beta[c].shape)
+            if isinstance(post_var_cols, str):
+                post_var_cols = [post_var_cols]
+
+        except (TypeError, KeyError):
+            pip_cols = [col for col in parameter_table[c].columns if 'PIP' in col]
+            post_var_cols = [col for col in parameter_table[c].columns if 'VAR_BETA' in col]
+            post_mean_cols = [col for col in parameter_table[c].columns
+                              if 'BETA' in col and col not in post_var_cols]
+
+        # Merge the effect table with the GDL SNP table:
+        c_df = merge_snp_tables(snp_tables[c], parameter_table[c], how='left',
+                                signed_statistics=post_mean_cols)
+
+        if len(c_df) < len(snp_tables[c]):
+            raise ValueError("The parameter table could not aligned with the reference SNP table. This may due to "
+                             "conflicts/errors in use of reference vs. alternative alleles.")
+
+        # Obtain the values for the posterior mean:
+        c_df[post_mean_cols] = c_df[post_mean_cols].fillna(0.)
+        post_mean_beta[c] = c_df[post_mean_cols].values
+
+        # Obtain the values for the posterior inclusion probability:
+        if len(set(pip_cols).intersection(set(c_df.columns))) > 0:
+            c_df[pip_cols] = c_df[pip_cols].fillna(0.)
+            pip[c] = c_df[pip_cols].values
+
+        # Obtain the values for the posterior variance:
+        if len(set(post_var_cols).intersection(set(c_df.columns))) > 0:
+            c_df[post_var_cols] = c_df[post_var_cols].fillna(0.)
+            post_var_beta[c] = c_df[post_var_cols].values
+
+    if len(pip) < 1:
+        pip = None
+
+    if len(post_var_beta) < 1:
+        post_var_beta = None
+
+    return pip, post_mean_beta, post_var_beta
+
+
+
+ +
+ +
+ + +

+ predict(test_gdl=None) + +

+ + +
+ +

Given the inferred effect sizes, predict the phenotype for the training samples in +the GWADataLoader object or new test samples. If test_gdl is not provided, genotypes +from training samples will be used (if available).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
test_gdl + +
+

A GWADataLoader object containing genotype data for new test samples.

+
+
+ None +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If the posterior means for BETA are not set. AssertionError if the GWADataLoader object does not contain genotype data.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def predict(self, test_gdl=None):
+    """
+    Given the inferred effect sizes, predict the phenotype for the training samples in
+    the GWADataLoader object or new test samples. If `test_gdl` is not provided, genotypes
+    from training samples will be used (if available).
+
+    :param test_gdl: A GWADataLoader object containing genotype data for new test samples.
+    :raises ValueError: If the posterior means for BETA are not set. AssertionError if the GWADataLoader object
+    does not contain genotype data.
+    """
+
+    if self.post_mean_beta is None:
+        raise ValueError("The posterior means for BETA are not set. Call `.fit()` first.")
+
+    if test_gdl is None:
+        assert self.gdl.genotype is not None, "The GWADataLoader object must contain genotype data."
+        test_gdl = self.gdl
+        post_mean_beta = self.post_mean_beta
+    else:
+        _, post_mean_beta, _ = self.harmonize_data(gdl=test_gdl)
+
+    return test_gdl.predict(post_mean_beta)
+
+
+
+ +
+ +
+ + +

+ pseudo_validate(test_gdl, metric='pearson_correlation') + +

+ + +
+ +

Evaluate the prediction accuracy of the inferred PRS using external GWAS summary statistics.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
test_gdl + +
+

A GWADataLoader object with the external GWAS summary statistics and LD matrix information.

+
+
+ required +
metric + +
+

The metric to use for evaluation. Options: 'r2' or 'pearson_correlation'.

+
+
+ 'pearson_correlation' +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The pseudo-validation metric.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def pseudo_validate(self, test_gdl, metric='pearson_correlation'):
+    """
+    Evaluate the prediction accuracy of the inferred PRS using external GWAS summary statistics.
+
+    :param test_gdl: A `GWADataLoader` object with the external GWAS summary statistics and LD matrix information.
+    :param metric: The metric to use for evaluation. Options: 'r2' or 'pearson_correlation'.
+
+    :return: The pseudo-validation metric.
+    """
+
+    from ..eval.pseudo_metrics import pseudo_r2, pseudo_pearson_r
+
+    metric = metric.lower()
+
+    assert self.post_mean_beta is not None, "The posterior means for BETA are not set. Call `.fit()` first."
+
+    if metric in ('pearson_correlation', 'corr', 'r'):
+        return pseudo_pearson_r(test_gdl, self.to_table(per_chromosome=False))
+    elif metric == 'r2':
+        return pseudo_r2(test_gdl, self.to_table(per_chromosome=False))
+    else:
+        raise KeyError(f"Pseudo validation metric ({metric}) not recognized. "
+                       f"Options are: 'r2' or 'pearson_correlation'.")
+
+
+
+ +
+ +
+ + +

+ read_inferred_parameters(f_names, sep='\\s+') + +

+ + +
+ +

Read a file with the inferred parameters.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
f_names + +
+

A path (or list of paths) to the file with the effect sizes.

+
+
+ required +
sep + +
+

The delimiter for the file(s).

+
+
+ '\\s+' +
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def read_inferred_parameters(self, f_names, sep=r"\s+"):
+    """
+    Read a file with the inferred parameters.
+    :param f_names: A path (or list of paths) to the file with the effect sizes.
+    :param sep: The delimiter for the file(s).
+    """
+
+    if isinstance(f_names, str):
+        f_names = [f_names]
+
+    param_table = []
+
+    for f_name in f_names:
+        param_table.append(pd.read_csv(f_name, sep=sep))
+
+    if len(param_table) > 0:
+        param_table = pd.concat(param_table)
+        self.set_model_parameters(param_table)
+    else:
+        raise FileNotFoundError
+
+
+
+ +
+ +
+ + +

+ set_model_parameters(parameter_table) + +

+ + +
+ +

Parses a pandas dataframe with model parameters and assigns them +to the corresponding class attributes.

+

For example: + * Columns with BETA, will be assigned to self.post_mean_beta. + * Columns with PIP will be assigned to self.pip. + * Columns with VAR_BETA, will be assigned to self.post_var_beta.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
parameter_table + +
+

A pandas table or dataframe.

+
+
+ required +
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def set_model_parameters(self, parameter_table):
+    """
+    Parses a pandas dataframe with model parameters and assigns them 
+    to the corresponding class attributes. 
+
+    For example: 
+        * Columns with `BETA`, will be assigned to `self.post_mean_beta`.
+        * Columns with `PIP` will be assigned to `self.pip`.
+        * Columns with `VAR_BETA`, will be assigned to `self.post_var_beta`.
+
+    :param parameter_table: A pandas table or dataframe.
+    """
+
+    self.pip, self.post_mean_beta, self.post_var_beta = self.harmonize_data(parameter_table=parameter_table)
+
+
+
+ +
+ +
+ + +

+ to_table(col_subset=('CHR', 'SNP', 'POS', 'A1', 'A2'), per_chromosome=False) + +

+ + +
+ +

Output the posterior estimates for the effect sizes to a pandas dataframe.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
col_subset + +
+

The subset of columns to include in the tables (in addition to the effect sizes).

+
+
+ ('CHR', 'SNP', 'POS', 'A1', 'A2') +
per_chromosome + +
+

If True, return a separate table for each chromosome.

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

A pandas Dataframe with the posterior estimates for the effect sizes.

+
+
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def to_table(self, col_subset=('CHR', 'SNP', 'POS', 'A1', 'A2'), per_chromosome=False):
+    """
+    Output the posterior estimates for the effect sizes to a pandas dataframe.
+    :param col_subset: The subset of columns to include in the tables (in addition to the effect sizes).
+    :param per_chromosome: If True, return a separate table for each chromosome.
+
+    :return: A pandas Dataframe with the posterior estimates for the effect sizes.
+    """
+
+    if self.post_mean_beta is None:
+        raise Exception("The posterior means for BETA are not set. Call `.fit()` first.")
+
+    tables = self.gdl.to_snp_table(col_subset=col_subset, per_chromosome=True)
+
+    for c in self.chromosomes:
+
+        tables[c][expand_column_names('BETA', self.post_mean_beta[c].shape)] = self.post_mean_beta[c]
+
+        if self.pip is not None:
+            tables[c][expand_column_names('PIP', self.pip[c].shape)] = self.pip[c]
+
+        if self.post_var_beta is not None:
+            tables[c][expand_column_names('VAR_BETA', self.post_var_beta[c].shape)] = self.post_var_beta[c]
+
+    if per_chromosome:
+        return tables
+    else:
+        return pd.concat([tables[c] for c in self.chromosomes])
+
+
+
+ +
+ +
+ + +

+ write_inferred_parameters(f_name, per_chromosome=False, sep='\t') + +

+ + +
+ +

A convenience method to write the inferred posterior for the effect sizes to file.

+

TODO: + * Support outputting scoring files compatible with PGS catalog format: + https://www.pgscatalog.org/downloads/#dl_scoring_files

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
f_name + +
+

The filename (or directory) where to write the effect sizes

+
+
+ required +
per_chromosome + +
+

If True, write a file for each chromosome separately.

+
+
+ False +
sep + +
+

The delimiter for the file (tab by default).

+
+
+ '\t' +
+ +
+ Source code in viprs/model/BayesPRSModel.py +
def write_inferred_parameters(self, f_name, per_chromosome=False, sep="\t"):
+    """
+    A convenience method to write the inferred posterior for the effect sizes to file.
+
+    TODO:
+        * Support outputting scoring files compatible with PGS catalog format:
+        https://www.pgscatalog.org/downloads/#dl_scoring_files
+
+    :param f_name: The filename (or directory) where to write the effect sizes
+    :param per_chromosome: If True, write a file for each chromosome separately.
+    :param sep: The delimiter for the file (tab by default).
+    """
+
+    tables = self.to_table(per_chromosome=per_chromosome)
+
+    if '.fit' not in f_name:
+        ext = '.fit'
+    else:
+        ext = ''
+
+    if per_chromosome:
+        for c, tab in tables.items():
+            try:
+                tab.to_csv(osp.join(f_name, f'chr_{c}.fit'), sep=sep, index=False)
+            except Exception as e:
+                raise e
+    else:
+        try:
+            tables.to_csv(f_name + ext, sep=sep, index=False)
+        except Exception as e:
+            raise e
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/model/LDPredInf/index.html b/api/model/LDPredInf/index.html new file mode 100644 index 0000000..b074cab --- /dev/null +++ b/api/model/LDPredInf/index.html @@ -0,0 +1,1437 @@ + + + + + + + + + + + + + + + + + + + LDPredInf - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

LDPredInf

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ LDPredInf + + +

+ + +
+

+ Bases: BayesPRSModel

+ + +

A wrapper class implementing the LDPred-inf model. +The LDPred-inf model is a Bayesian model that uses summary statistics +from GWAS to estimate the posterior mean effect sizes of the SNPs. It is equivalent +to performing ridge regression, with the penalty proportional to the inverse of +the per-SNP heritability.

+

Refer to the following references for details about the LDPred-inf model: +* Vilhjálmsson et al. AJHG. 2015 +* Privé et al. Bioinformatics. 2020

+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
gdl + +
+

An instance of GWADataLoader

+
+
h2 + +
+

The heritability for the trait (can also be chromosome-specific)

+
+
+ +
+ Source code in viprs/model/LDPredInf.py +
  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
class LDPredInf(BayesPRSModel):
+    """
+    A wrapper class implementing the LDPred-inf model.
+    The LDPred-inf model is a Bayesian model that uses summary statistics
+    from GWAS to estimate the posterior mean effect sizes of the SNPs. It is equivalent
+    to performing ridge regression, with the penalty proportional to the inverse of
+    the per-SNP heritability.
+
+    Refer to the following references for details about the LDPred-inf model:
+    * Vilhjálmsson et al. AJHG. 2015
+    * Privé et al. Bioinformatics. 2020
+
+    :ivar gdl: An instance of `GWADataLoader`
+    :ivar h2: The heritability for the trait (can also be chromosome-specific)
+
+    """
+
+    def __init__(self,
+                 gdl,
+                 h2=None):
+        """
+        Initialize the LDPred-inf model.
+        :param gdl: An instance of GWADataLoader
+        :param h2: The heritability for the trait (can also be chromosome-specific)
+        """
+        super().__init__(gdl)
+
+        if h2 is None:
+            from magenpy.stats.h2.ldsc import simple_ldsc
+            self.h2 = simple_ldsc(self.gdl)
+        else:
+            self.h2 = h2
+
+    def get_heritability(self):
+        """
+        :return: The heritability estimate for the trait of interest.
+        """
+        return self.h2
+
+    def fit(self, solver='minres', **solver_kwargs):
+        """
+        Fit the summary statistics-based ridge regression,
+        following the specifications of the LDPred-inf model.
+
+        !!! warning
+            Not tested yet.
+
+        Here, we use `lsqr` or `minres` solvers to solve the system of equations:
+
+        (D + lam*I)BETA = BETA_HAT
+
+        where D is the LD matrix, BETA is ridge regression
+        estimate that we wish to obtain and BETA_HAT is the
+        marginal effect sizes estimated from GWAS.
+
+        In this case, lam = M / N*h2, where M is the number of SNPs,
+        N is the number of samples and h2 is the heritability
+        of the trait.
+
+        https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lsqr.html
+        https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.minres.html
+
+        :param solver: The solver for the system of linear equations. Options: `minres` or `lsqr`
+        :param solver_kwargs: keyword arguments for the solver.
+        """
+
+        assert solver in ('lsqr', 'minres')
+
+        import numpy as np
+        from scipy.sparse.linalg import lsqr, minres
+        from scipy.sparse import identity, block_diag
+
+        if solver == 'lsqr':
+            solve = lsqr
+        else:
+            solve = minres
+
+        # Lambda, the regularization parameter for the
+        # ridge regression estimator. For LDPred-inf model,
+        # we set this to 'M / N*h2', where M is the number of SNPs,
+        # N is the number of samples and h2 is the heritability
+        # of the trait.
+        lam = self.n_snps / (self.n * self.h2)
+
+        chroms = self.gdl.chromosomes
+
+        # Extract the LD matrices for all the chromosomes represented and
+        # concatenate them into one block diagonal matrix:
+        ld_mats = []
+        for c in chroms:
+            self.gdl.ld[c].load(dtype=np.float32)
+            ld_mats.append(self.gdl.ld[c].csr_matrix)
+
+        ld = block_diag(ld_mats, format='csr')
+
+        # Extract the marginal GWAS effect sizes:
+        marginal_beta = np.concatenate([self.gdl.sumstats_table[c].marginal_beta
+                                        for c in chroms])
+
+        # Estimate the BETAs under the ridge penalty:
+        res = solve(ld + lam * identity(ld.shape[0]), marginal_beta, **solver_kwargs)
+
+        # Extract the estimates and populate them in `post_mean_beta`
+        start = 0
+        self.post_mean_beta = {}
+
+        for c in chroms:
+            self.post_mean_beta[c] = res[0][start:start + self.shapes[c]]
+            start += self.shapes[c]
+
+        return self
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(gdl, h2=None) + +

+ + +
+ +

Initialize the LDPred-inf model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

An instance of GWADataLoader

+
+
+ required +
h2 + +
+

The heritability for the trait (can also be chromosome-specific)

+
+
+ None +
+ +
+ Source code in viprs/model/LDPredInf.py +
21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
def __init__(self,
+             gdl,
+             h2=None):
+    """
+    Initialize the LDPred-inf model.
+    :param gdl: An instance of GWADataLoader
+    :param h2: The heritability for the trait (can also be chromosome-specific)
+    """
+    super().__init__(gdl)
+
+    if h2 is None:
+        from magenpy.stats.h2.ldsc import simple_ldsc
+        self.h2 = simple_ldsc(self.gdl)
+    else:
+        self.h2 = h2
+
+
+
+ +
+ +
+ + +

+ fit(solver='minres', **solver_kwargs) + +

+ + +
+ +

Fit the summary statistics-based ridge regression, +following the specifications of the LDPred-inf model.

+
+

Warning

+

Not tested yet.

+
+

Here, we use lsqr or minres solvers to solve the system of equations:

+

(D + lam*I)BETA = BETA_HAT

+

where D is the LD matrix, BETA is ridge regression +estimate that we wish to obtain and BETA_HAT is the +marginal effect sizes estimated from GWAS.

+

In this case, lam = M / N*h2, where M is the number of SNPs, +N is the number of samples and h2 is the heritability +of the trait.

+

https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lsqr.html +https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.minres.html

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
solver + +
+

The solver for the system of linear equations. Options: minres or lsqr

+
+
+ 'minres' +
solver_kwargs + +
+

keyword arguments for the solver.

+
+
+ {} +
+ +
+ Source code in viprs/model/LDPredInf.py +
def fit(self, solver='minres', **solver_kwargs):
+    """
+    Fit the summary statistics-based ridge regression,
+    following the specifications of the LDPred-inf model.
+
+    !!! warning
+        Not tested yet.
+
+    Here, we use `lsqr` or `minres` solvers to solve the system of equations:
+
+    (D + lam*I)BETA = BETA_HAT
+
+    where D is the LD matrix, BETA is ridge regression
+    estimate that we wish to obtain and BETA_HAT is the
+    marginal effect sizes estimated from GWAS.
+
+    In this case, lam = M / N*h2, where M is the number of SNPs,
+    N is the number of samples and h2 is the heritability
+    of the trait.
+
+    https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lsqr.html
+    https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.minres.html
+
+    :param solver: The solver for the system of linear equations. Options: `minres` or `lsqr`
+    :param solver_kwargs: keyword arguments for the solver.
+    """
+
+    assert solver in ('lsqr', 'minres')
+
+    import numpy as np
+    from scipy.sparse.linalg import lsqr, minres
+    from scipy.sparse import identity, block_diag
+
+    if solver == 'lsqr':
+        solve = lsqr
+    else:
+        solve = minres
+
+    # Lambda, the regularization parameter for the
+    # ridge regression estimator. For LDPred-inf model,
+    # we set this to 'M / N*h2', where M is the number of SNPs,
+    # N is the number of samples and h2 is the heritability
+    # of the trait.
+    lam = self.n_snps / (self.n * self.h2)
+
+    chroms = self.gdl.chromosomes
+
+    # Extract the LD matrices for all the chromosomes represented and
+    # concatenate them into one block diagonal matrix:
+    ld_mats = []
+    for c in chroms:
+        self.gdl.ld[c].load(dtype=np.float32)
+        ld_mats.append(self.gdl.ld[c].csr_matrix)
+
+    ld = block_diag(ld_mats, format='csr')
+
+    # Extract the marginal GWAS effect sizes:
+    marginal_beta = np.concatenate([self.gdl.sumstats_table[c].marginal_beta
+                                    for c in chroms])
+
+    # Estimate the BETAs under the ridge penalty:
+    res = solve(ld + lam * identity(ld.shape[0]), marginal_beta, **solver_kwargs)
+
+    # Extract the estimates and populate them in `post_mean_beta`
+    start = 0
+    self.post_mean_beta = {}
+
+    for c in chroms:
+        self.post_mean_beta[c] = res[0][start:start + self.shapes[c]]
+        start += self.shapes[c]
+
+    return self
+
+
+
+ +
+ +
+ + +

+ get_heritability() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The heritability estimate for the trait of interest.

+
+
+ +
+ Source code in viprs/model/LDPredInf.py +
37
+38
+39
+40
+41
def get_heritability(self):
+    """
+    :return: The heritability estimate for the trait of interest.
+    """
+    return self.h2
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/model/VIPRS/index.html b/api/model/VIPRS/index.html new file mode 100644 index 0000000..20801c5 --- /dev/null +++ b/api/model/VIPRS/index.html @@ -0,0 +1,5932 @@ + + + + + + + + + + + + + + + + + + + VIPRS - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

VIPRS

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ VIPRS + + +

+ + +
+

+ Bases: BayesPRSModel

+ + +

The base class for performing Variational Inference of Polygenic Risk Scores (VIPRS).

+

This class implements the Variational EM algorithm for estimating the posterior distribution +of the effect sizes using GWAS summary statistics. The model assumes a spike-and-slab mixture +prior on the effect size distribution, with the spike component representing the null effects +and the slab component representing the non-null effects.

+

Details for the algorithm can be found in the Supplementary Material of the following paper:

+
+

Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference. +Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009. +Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.

+
+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
gdl + +
+

An instance of GWADataLoader containing harmonized GWAS summary statistics and LD matrices.

+
+
var_gamma + +
+

A dictionary of the variational gamma parameter, denoting the probability that the variant comes from the slab component.

+
+
var_mu + +
+

A dictionary of the variational mu parameter, denoting the mean of the effect size for each variant.

+
+
var_tau + +
+

A dictionary of the variational tau parameter, denoting the precision of the effect size for each variant.

+
+
eta + +
+

A dictionary of the posterior mean of the effect size, E[B] = gamma*mu.

+
+
zeta + +
+

A dictionary of the expectation of B^2 under the posterior, E[B^2] = gamma*(mu^2 + 1./tau).

+
+
eta_diff + +
+

A dictionary of the difference between the etas in two consecutive iterations.

+
+
q + +
+

A dictionary of the q-factor, which keeps track of the multiplication of eta with the LD matrix.

+
+
ld_data + +
+

A dictionary of the data arrays of the sparse LD matrices.

+
+
ld_indptr + +
+

A dictionary of the indptr arrays of the sparse LD matrices.

+
+
ld_left_bound + +
+

A dictionary of the left boundaries of the LD matrices.

+
+
std_beta + +
+

A dictionary of the standardized marginal effect sizes from GWAS.

+
+
Nj + +
+

A dictionary of the sample size per SNP from the GWAS study.

+
+
threads + +
+

The number of threads to use when fitting the model.

+
+
fix_params + +
+

A dictionary of hyperparameters with their fixed values.

+
+
float_precision + +
+

The precision of the floating point variables. Options are: 'float32' or 'float64'.

+
+
order + +
+

The order of the arrays in memory. Options are: 'C' or 'F'.

+
+
low_memory + +
+

A boolean flag to indicate whether to use low memory mode.

+
+
dequantize_on_the_fly + +
+

A boolean flag to indicate whether to dequantize the LD matrix on the fly.

+
+
use_cpp + +
+

A boolean flag to indicate whether to use the C++ backend.

+
+
use_blas + +
+

A boolean flag to indicate whether to use BLAS for linear algebra operations.

+
+
optim_result + +
+

An instance of OptimizeResult tracking the progress of the optimization algorithm.

+
+
verbose + +
+

Verbosity of the information printed to standard output. Can be boolean or an integer.

+
+
history + +
+

A dictionary to store the history of the optimization procedure (e.g. the objective as a function of iteration number).

+
+
tracked_theta + +
+

A list of hyperparameters to track throughout the optimization procedure. Useful for debugging/model checking.

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+643
+644
+645
+646
+647
+648
+649
+650
+651
+652
+653
+654
+655
+656
+657
+658
+659
+660
+661
+662
+663
+664
+665
+666
+667
+668
+669
+670
+671
+672
+673
+674
+675
+676
+677
+678
+679
+680
+681
+682
+683
+684
+685
+686
+687
+688
+689
+690
+691
+692
+693
+694
+695
+696
+697
+698
+699
+700
+701
+702
+703
+704
+705
+706
+707
+708
+709
+710
+711
+712
+713
+714
+715
+716
+717
+718
+719
+720
+721
+722
+723
+724
+725
+726
+727
+728
+729
+730
+731
+732
+733
+734
+735
+736
+737
+738
+739
+740
+741
+742
+743
+744
+745
+746
+747
+748
+749
+750
+751
+752
+753
+754
+755
+756
+757
+758
+759
+760
+761
+762
+763
+764
+765
+766
+767
+768
+769
+770
+771
+772
+773
+774
+775
+776
+777
+778
+779
+780
+781
+782
+783
+784
+785
+786
+787
+788
+789
+790
+791
+792
+793
+794
+795
+796
+797
+798
+799
+800
+801
+802
+803
+804
+805
+806
+807
+808
+809
+810
+811
+812
+813
+814
+815
+816
+817
+818
+819
+820
+821
+822
+823
+824
+825
+826
+827
+828
+829
+830
+831
+832
+833
+834
+835
+836
+837
+838
+839
+840
+841
+842
+843
+844
+845
+846
+847
+848
+849
+850
+851
+852
+853
+854
+855
+856
+857
+858
+859
+860
+861
+862
+863
+864
+865
+866
+867
+868
+869
+870
+871
+872
+873
+874
+875
+876
+877
class VIPRS(BayesPRSModel):
+    """
+    The base class for performing Variational Inference of Polygenic Risk Scores (VIPRS).
+
+    This class implements the Variational EM algorithm for estimating the posterior distribution
+    of the effect sizes using GWAS summary statistics. The model assumes a spike-and-slab mixture
+    prior on the effect size distribution, with the spike component representing the null effects
+    and the slab component representing the non-null effects.
+
+    Details for the algorithm can be found in the Supplementary Material of the following paper:
+
+    > Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference.
+    Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009.
+    Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.
+
+    :ivar gdl: An instance of GWADataLoader containing harmonized GWAS summary statistics and LD matrices.
+    :ivar var_gamma: A dictionary of the variational gamma parameter, denoting the probability that the
+    variant comes from the slab component.
+    :ivar var_mu: A dictionary of the variational mu parameter, denoting the mean of the
+    effect size for each variant.
+    :ivar var_tau: A dictionary of the variational tau parameter, denoting the precision of
+    the effect size for each variant.
+    :ivar eta: A dictionary of the posterior mean of the effect size, E[B] = gamma*mu.
+    :ivar zeta: A dictionary of the expectation of B^2 under the posterior, E[B^2] = gamma*(mu^2 + 1./tau).
+    :ivar eta_diff: A dictionary of the difference between the etas in two consecutive iterations.
+    :ivar q: A dictionary of the q-factor, which keeps track of the multiplication of eta with the LD matrix.
+    :ivar ld_data: A dictionary of the `data` arrays of the sparse LD matrices.
+    :ivar ld_indptr: A dictionary of the `indptr` arrays of the sparse LD matrices.
+    :ivar ld_left_bound: A dictionary of the left boundaries of the LD matrices.
+    :ivar std_beta: A dictionary of the standardized marginal effect sizes from GWAS.
+    :ivar Nj: A dictionary of the sample size per SNP from the GWAS study.
+    :ivar threads: The number of threads to use when fitting the model.
+    :ivar fix_params: A dictionary of hyperparameters with their fixed values.
+    :ivar float_precision: The precision of the floating point variables. Options are: 'float32' or 'float64'.
+    :ivar order: The order of the arrays in memory. Options are: 'C' or 'F'.
+    :ivar low_memory: A boolean flag to indicate whether to use low memory mode.
+    :ivar dequantize_on_the_fly: A boolean flag to indicate whether to dequantize the LD matrix on the fly.
+    :ivar use_cpp: A boolean flag to indicate whether to use the C++ backend.
+    :ivar use_blas: A boolean flag to indicate whether to use BLAS for linear algebra operations.
+    :ivar optim_result: An instance of OptimizeResult tracking the progress of the optimization algorithm.
+    :ivar verbose: Verbosity of the information printed to standard output. Can be boolean or an integer.
+    :ivar history: A dictionary to store the history of the optimization procedure (e.g. the objective as a function
+    of iteration number).
+    :ivar tracked_theta: A list of hyperparameters to track throughout the optimization procedure. Useful for
+    debugging/model checking.
+
+    """
+
+    def __init__(self,
+                 gdl,
+                 fix_params=None,
+                 tracked_theta=None,
+                 verbose=True,
+                 float_precision='float32',
+                 order='F',
+                 low_memory=True,
+                 use_blas=True,
+                 use_cpp=True,
+                 dequantize_on_the_fly=False,
+                 threads=1):
+
+        """
+
+        Initialize the VIPRS model.
+
+        .. note::
+            The initialization of the model involves loading the LD matrix to memory.
+
+        :param gdl: An instance of GWADataLoader containing harmonized GWAS summary statistics and LD matrices.
+        :param fix_params: A dictionary of hyperparameters with their fixed values.
+        :param tracked_theta: A list of hyperparameters to track throughout the optimization procedure. Useful
+        for debugging/model checking. Currently, we allow the user to track the following:
+
+            * The proportion of causal variants (`pi`).
+            * The heritability ('heritability').
+            * The residual variance (`sigma_epsilon`).
+
+        :param verbose: Verbosity of the information printed to standard output. Can be boolean or an integer.
+        Provide a number greater than 1 for more detailed output.
+        :param float_precision: The precision of the floating point variables. Options are: 'float32' or 'float64'.
+        :param order: The order of the arrays in memory. Options are: 'C' or 'F'.
+        :param low_memory: A boolean flag to indicate whether to use low memory mode.
+        :param use_blas: A boolean flag to indicate whether to use BLAS for linear algebra operations.
+        :param use_cpp: A boolean flag to indicate whether to use the C++ backend.
+        :param dequantize_on_the_fly: A boolean flag to indicate whether to dequantize the LD matrix on the fly.
+        :param threads: The number of threads to use when fitting the model.
+        """
+
+        super().__init__(gdl)
+
+        # ------------------- Sanity checks -------------------
+
+        assert gdl.ld is not None, "The LD matrices must be initialized in the GWADataLoader object."
+        assert gdl.sumstats_table is not None, ("The summary statistics must be "
+                                                "initialized in the GWADataLoader object.")
+
+        if dequantize_on_the_fly and not use_cpp:
+            raise Exception("Dequantization on the fly is only supported when using the C++ backend.")
+
+        # ------------------- Initialize the model -------------------
+
+        # Variational parameters:
+        self.var_gamma = {}
+        self.var_mu = {}
+        self.var_tau = {}
+
+        # Cache this quantity:
+        self._log_var_tau = {}
+
+        # Properties of proposed variational distribution:
+        self.eta = {}  # The posterior mean, E[B] = \gamma*\mu_beta
+        self.zeta = {}  # The expectation of B^2 under the posterior, E[B^2] = \gamma*(\mu_beta^2 + 1./\tau_beta)
+
+        # The difference between the etas in two consecutive iterations (can be used for checking convergence,
+        # or implementing optimized updates in the E-Step).
+        self.eta_diff = {}
+
+        # q-factor (keeps track of LD-related terms)
+        self.q = {}
+
+        # ---------- Model hyperparameters ----------
+
+        self.sigma_epsilon = None
+        self.tau_beta = None
+        self.pi = None
+        self._sigma_g = None  # A proxy for the additive genotypic variance
+
+        # ---------- Inputs to the model: ----------
+
+        # NOTE: Here, we typecast the inputs to the model to the specified float precision.
+        # This also needs to be done in the initialization methods.
+
+        # LD-related quantities:
+
+        self.ld_data = {}
+        self.ld_indptr = {}
+        self.ld_left_bound = {}
+
+        for c, ld_mat in self.gdl.get_ld_matrices().items():
+            # Load the data for the LD matrix:
+            if dequantize_on_the_fly and np.issubdtype(ld_mat.stored_dtype, np.integer):
+                # Cannot dequantize float16 on the fly due to lack of canonical representation
+                # for this data type:
+                dtype = ld_mat.stored_dtype
+            else:
+
+                if dequantize_on_the_fly:
+                    logging.warning("Dequantization on the fly is only supported for "
+                                    "integer data types. Ignoring this flag.")
+
+                dtype = float_precision
+                dequantize_on_the_fly = False
+
+            self.ld_data[c], self.ld_indptr[c], self.ld_left_bound[c] = ld_mat.load_data(
+                return_symmetric=not low_memory,
+                dtype=dtype
+            )
+
+        # Standardized betas:
+        self.std_beta = {c: ss.get_snp_pseudo_corr().astype(float_precision)
+                         for c, ss in self.gdl.sumstats_table.items()}
+
+        # Make sure that the data type for the sample size-per-SNP has the correct format:
+
+        self.Nj = {c: nj.astype(float_precision, order=order)
+                   for c, nj in self.Nj.items()}
+
+        # ---------- General properties: ----------
+
+        self.threads = threads
+        self.fix_params = fix_params or {}
+
+        self.float_precision = float_precision
+        self.float_resolution = np.finfo(self.float_precision).resolution
+        self.order = order
+        self.low_memory = low_memory
+
+        self.dequantize_on_the_fly = dequantize_on_the_fly
+
+        if self.dequantize_on_the_fly:
+            info = np.iinfo(self.ld_data[c].dtype)
+            self.dequantize_scale = 2. / (info.max - (info.min + 1))
+        else:
+            self.dequantize_scale = 1.
+
+        self.use_cpp = use_cpp
+        self.use_blas = use_blas
+
+        self.optim_result = OptimizeResult()
+        self.verbose = verbose
+        self.history = {}
+        self.tracked_theta = tracked_theta or []
+
+    def initialize(self, theta_0=None, param_0=None):
+        """
+        A convenience method to initialize all the objects associated with the model.
+        :param theta_0: A dictionary of initial values for the hyperparameters theta
+        :param param_0: A dictionary of initial values for the variational parameters
+        """
+
+        logging.debug("> Initializing model parameters")
+
+        self.initialize_theta(theta_0)
+        self.initialize_variational_parameters(param_0)
+        self.init_optim_meta()
+
+    def init_optim_meta(self):
+        """
+        Initialize the various quantities/objects to keep track of the optimization process.
+         This method initializes the "history" object (which keeps track of the objective + other
+         hyperparameters requested by the user), in addition to the OptimizeResult objects.
+        """
+
+        self.history = {
+            'ELBO': [],
+        }
+
+        for tt in self.tracked_theta:
+            self.history[tt] = []
+
+        self.optim_result.reset()
+
+    def initialize_theta(self, theta_0=None):
+        """
+        Initialize the global hyperparameters of the model.
+        :param theta_0: A dictionary of initial values for the hyperparameters theta
+        """
+
+        if theta_0 is not None and self.fix_params is not None:
+            theta_0.update(self.fix_params)
+        elif self.fix_params is not None:
+            theta_0 = self.fix_params
+        elif theta_0 is None:
+            theta_0 = {}
+
+        # ----------------------------------------------
+        # (1) If 'pi' is not set, initialize from a uniform
+        if 'pi' not in theta_0:
+            self.pi = np.random.uniform(low=max(0.005, 1. / self.n_snps), high=.1)
+        else:
+            self.pi = theta_0['pi']
+
+        # ----------------------------------------------
+        # (2) Initialize sigma_epsilon and tau_beta
+        # Assuming that the genotype and phenotype are normalized,
+        # these two quantities are conceptually linked.
+        # The initialization routine here assumes that:
+        # Var(y) = h2 + sigma_epsilon
+        # Where, by assumption, Var(y) = 1,
+        # And h2 ~= pi*M/tau_beta
+
+        if 'sigma_epsilon' not in theta_0:
+            if 'tau_beta' not in theta_0:
+
+                # If neither tau_beta nor sigma_epsilon are given,
+                # then initialize using the SNP heritability estimate
+
+                try:
+                    naive_h2g = np.clip(simple_ldsc(self.gdl), a_min=1e-3, a_max=1. - 1e-3)
+                except Exception as e:
+                    naive_h2g = np.random.uniform(low=.001, high=.999)
+
+                self.sigma_epsilon = 1. - naive_h2g
+                self.tau_beta = self.pi * self.n_snps / naive_h2g
+            else:
+
+                # If tau_beta is given, use it to initialize sigma_epsilon
+
+                self.tau_beta = theta_0['tau_beta']
+                self.sigma_epsilon = np.clip(1. - (self.pi * self.n_snps / self.tau_beta),
+                                             a_min=self.float_resolution,
+                                             a_max=1. - self.float_resolution)
+        else:
+
+            # If sigma_epsilon is given, use it in the initialization
+
+            self.sigma_epsilon = theta_0['sigma_epsilon']
+
+            if 'tau_beta' in theta_0:
+                self.tau_beta = theta_0['tau_beta']
+            else:
+                self.tau_beta = (self.pi * self.n_snps) / (1. - self.sigma_epsilon)
+
+        # Cast all the hyperparameters to conform to the precision set by the user:
+        self.sigma_epsilon = np.dtype(self.float_precision).type(self.sigma_epsilon)
+        self.tau_beta = np.dtype(self.float_precision).type(self.tau_beta)
+        self.pi = np.dtype(self.float_precision).type(self.pi)
+        self._sigma_g = np.dtype(self.float_precision).type(0.)
+
+    def initialize_variational_parameters(self, param_0=None):
+        """
+        Initialize the variational parameters of the model.
+        :param param_0: A dictionary of initial values for the variational parameters
+        """
+
+        param_0 = param_0 or {}
+
+        self.var_mu = {}
+        self.var_tau = {}
+        self.var_gamma = {}
+
+        for c, shapes in self.shapes.items():
+
+            # Initialize the variational parameters according to the derived update equations,
+            # ignoring correlations between SNPs.
+            if 'tau' in param_0:
+                self.var_tau[c] = param_0['tau'][c]
+            else:
+                self.var_tau[c] = (self.Nj[c] / self.sigma_epsilon) + self.tau_beta
+
+            self.var_tau[c] = self.var_tau[c].astype(self.float_precision, order=self.order, copy=False)
+
+            if 'mu' in param_0:
+                self.var_mu[c] = param_0['mu'][c].astype(self.float_precision, order=self.order)
+            else:
+                self.var_mu[c] = np.zeros(shapes, dtype=self.float_precision, order=self.order)
+
+            if 'gamma' in param_0:
+                self.var_gamma[c] = param_0['gamma'][c].astype(self.float_precision, order=self.order)
+            else:
+                pi = self.get_pi(c)
+                if isinstance(self.pi, dict):
+                    self.var_gamma[c] = pi.astype(self.float_precision, order=self.order)
+                else:
+                    self.var_gamma[c] = pi*np.ones(shapes, dtype=self.float_precision, order=self.order)
+
+        self.eta = self.compute_eta()
+        self.zeta = self.compute_zeta()
+        self.eta_diff = {c: np.zeros_like(eta, dtype=self.float_precision) for c, eta in self.eta.items()}
+        self.q = {c: np.zeros_like(eta, dtype=self.float_precision) for c, eta in self.eta.items()}
+        self._log_var_tau = {c: np.log(self.var_tau[c]) for c in self.var_tau}
+
+    def e_step(self):
+        """
+        Run the E-Step of the Variational EM algorithm.
+        Here, we update the variational parameters for each variant using coordinate
+        ascent optimization techniques. The update equations are outlined in
+        the Supplementary Material of the following paper:
+
+        > Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference.
+        Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009.
+        Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.
+        """
+
+        for c, c_size in self.shapes.items():
+
+            # Get the priors:
+            tau_beta = self.get_tau_beta(c)
+            pi = self.get_pi(c)
+
+            # Updates for tau variational parameters:
+            self.var_tau[c] = (self.Nj[c] / self.sigma_epsilon) + tau_beta
+            np.log(self.var_tau[c], out=self._log_var_tau[c])
+
+            # Compute some quantities that are needed for the per-SNP updates:
+            mu_mult = self.Nj[c]/(self.var_tau[c]*self.sigma_epsilon)
+            u_logs = np.log(pi) - np.log(1. - pi) + .5*(np.log(tau_beta) - self._log_var_tau[c])
+
+            if self.use_cpp:
+                cpp_e_step(self.ld_left_bound[c],
+                           self.ld_indptr[c],
+                           self.ld_data[c],
+                           self.std_beta[c],
+                           self.var_gamma[c],
+                           self.var_mu[c],
+                           self.eta[c],
+                           self.q[c],
+                           self.eta_diff[c],
+                           u_logs,
+                           0.5*self.var_tau[c],
+                           mu_mult,
+                           self.dequantize_scale,
+                           self.threads,
+                           self.use_blas,
+                           self.low_memory)
+            else:
+
+                e_step(self.ld_left_bound[c],
+                       self.ld_indptr[c],
+                       self.ld_data[c],
+                       self.std_beta[c],
+                       self.var_gamma[c],
+                       self.var_mu[c],
+                       self.eta[c],
+                       self.q[c],
+                       self.eta_diff[c],
+                       u_logs,
+                       0.5*self.var_tau[c],
+                       mu_mult,
+                       self.threads,
+                       self.use_blas,
+                       self.low_memory)
+
+        self.zeta = self.compute_zeta()
+
+    def update_pi(self):
+        """
+        Update the prior probability of a variant being causal, or the proportion of causal variants, `pi`.
+        """
+
+        if 'pi' not in self.fix_params:
+
+            # Get the average of the gammas:
+            self.pi = dict_mean(self.var_gamma, axis=0)
+
+    def update_tau_beta(self):
+        """
+        Update the prior precision (inverse variance) for the effect size, `tau_beta`.
+        """
+
+        if 'tau_beta' not in self.fix_params:
+
+            # tau_beta estimate:
+            self.tau_beta = (self.pi * self.m / dict_sum(self.zeta, axis=0)).astype(self.float_precision)
+
+    def _update_sigma_g(self):
+        """
+        Update the additive genotypic variance, `sigma_g`. This quantity is equivalent to
+        B'RB, where B is the posterior mean for the effect sizes and R is the LD matrix.
+        This quantity is used in the update of the residual variance, `sigma_epsilon` and
+        in computing the pseudo-heritability.
+        """
+
+        self._sigma_g = np.sum([
+            np.sum(self.zeta[c] + np.multiply(self.q[c], self.eta[c]), axis=0)
+            for c in self.shapes.keys()
+        ], axis=0)
+
+    def update_sigma_epsilon(self):
+        """
+        Update the global residual variance parameter, `sigma_epsilon`.
+        """
+
+        if 'sigma_epsilon' not in self.fix_params:
+
+            sig_eps = 0.
+
+            for c, _ in self.shapes.items():
+                sig_eps -= 2.*self.std_beta[c].dot(self.eta[c])
+
+            self.sigma_epsilon = 1. + sig_eps + self._sigma_g
+
+    def m_step(self):
+        """
+        Run the M-Step of the Variational EM algorithm.
+        Here, we update the hyperparameters of the model, by simply calling
+        the update functions for each hyperparameter separately.
+
+        """
+
+        self.update_pi()
+        self.update_tau_beta()
+        self._update_sigma_g()
+        self.update_sigma_epsilon()
+
+    def objective(self):
+        """
+        The optimization objective for the variational inference problem. The objective
+        for the VIPRS method is the Evidence Lower-Bound (ELBO) in this case.
+
+        !!! seealso "See Also"
+            * [elbo][viprs.model.VIPRS.VIPRS.elbo]
+
+
+        """
+        return self.elbo()
+
+    def elbo(self, sum_axis=None):
+        """
+        Compute the variational objective, the Evidence Lower-BOund (ELBO),
+        from GWAS summary statistics and the reference LD data. This implementation assumes
+        that the product of the LD matrix with the current estimate of the effect sizes
+        is already computed and stored in the `q` dictionary. If this is not the case,
+        we recommend computing q first and then calling this method.
+
+        :param sum_axis: The axis along which to sum the ELBO. If None, the ELBO is returned as a scalar.
+        """
+
+        # Concatenate the dictionary items for easy computation:
+        var_gamma = np.clip(dict_concat(self.var_gamma),
+                            a_min=self.float_resolution,
+                            a_max=1. - self.float_resolution)
+        # The gamma for the null component
+        null_gamma = np.clip(1. - dict_concat(self.compute_pip()),
+                             a_min=self.float_resolution,
+                             a_max=1. - self.float_resolution)
+        log_var_tau = dict_concat(self._log_var_tau)
+
+        if isinstance(self.pi, dict):
+            pi = dict_concat(self.pi)
+            null_pi = dict_concat(self.get_null_pi())
+        else:
+            pi = self.pi
+            null_pi = self.get_null_pi()
+
+        if isinstance(self.tau_beta, dict):
+            tau_beta = dict_concat(self.tau_beta)
+        else:
+            tau_beta = self.tau_beta
+
+        zeta = dict_concat(self.zeta)
+
+        # Initialize the ELBO:
+        elbo = 0.
+
+        # -----------------------------------------------
+        # (1) Compute the log of the joint density:
+
+        #
+        # (1.1) The following terms are an expansion of ||Y - X\beta||^2
+        #
+        # -N/2log(2pi*sigma_epsilon)
+        elbo -= np.log(2 * np.pi * self.sigma_epsilon)
+
+        # -Y'Y/(2*sigma_epsilon), where we assume Y'Y = N
+        # + (1./sigma_epsilon)*\beta*(XY), where we assume XY = N\hat{\beta}
+        if 'sigma_epsilon' not in self.fix_params:
+            # If sigma_epsilon was updated in the M-Step, then this expression would
+            # simply evaluate to 1. and there's no point in re-computing it again:
+            elbo -= 1.
+        else:
+
+            eta = dict_concat(self.eta)
+            std_beta = dict_concat(self.std_beta)
+
+            elbo -= (1. / self.sigma_epsilon) * (1. - 2.*std_beta.dot(eta) + self._sigma_g)
+
+        elbo *= 0.5*self.n
+
+        elbo -= np.multiply(var_gamma, np.log(var_gamma) - np.log(pi)).sum(axis=sum_axis)
+        elbo -= np.multiply(null_gamma, np.log(null_gamma) - np.log(null_pi)).sum(axis=sum_axis)
+
+        elbo += .5 * np.multiply(var_gamma, 1. - log_var_tau + np.log(tau_beta)).sum(axis=sum_axis)
+        elbo -= .5*(tau_beta*zeta).sum(axis=0)
+
+        try:
+            if len(elbo) == 1:
+                return elbo[0]
+            else:
+                return elbo
+        except TypeError:
+            return elbo
+
+    def get_sigma_epsilon(self):
+        """
+        :return: The value of the residual variance, `sigma_epsilon`.
+        """
+        return self.sigma_epsilon
+
+    def get_tau_beta(self, chrom=None):
+        """
+        :param chrom: Get the value of `tau_beta` for a given chromosome.
+
+        :return: The value of the prior precision on the effect size(s), `tau_beta`
+        """
+        if chrom is None:
+            return self.tau_beta
+        else:
+            if isinstance(self.tau_beta, dict):
+                return self.tau_beta[chrom]
+            else:
+                return self.tau_beta
+
+    def get_pi(self, chrom=None):
+        """
+        :param chrom: Get the value of `pi` for a given chromosome.
+
+        :return: The value of the prior probability of a variant being causal, `pi`.
+        """
+
+        if chrom is None:
+            return self.pi
+        else:
+            if isinstance(self.pi, dict):
+                return self.pi[chrom]
+            else:
+                return self.pi
+
+    def get_null_pi(self, chrom=None):
+        """
+        :param chrom: If provided, get the mixing proportion for the null component on a given chromosome.
+
+        :return: The value of the prior probability of a variant being null, `1 - pi`.
+        """
+
+        pi = self.get_pi(chrom=chrom)
+
+        if isinstance(pi, dict):
+            return {c: 1. - c_pi for c, c_pi in pi.items()}
+        else:
+            return 1. - pi
+
+    def get_proportion_causal(self):
+        """
+        :return: The proportion of causal variants in the model.
+        """
+        if isinstance(self.pi, dict):
+            return dict_mean(self.pi, axis=0)
+        else:
+            return self.pi
+
+    def get_average_effect_size_variance(self):
+        """
+        :return: The average per-SNP variance for the prior mixture components
+        """
+        if isinstance(self.pi, dict):
+            pi = dict_concat(self.pi, axis=0)
+        else:
+            pi = self.pi
+
+        if isinstance(self.tau_beta, dict):
+            tau_beta = dict_concat(self.tau_beta, axis=0)
+        else:
+            tau_beta = self.tau_beta
+
+        return np.sum(pi / tau_beta, axis=0)
+
+    def get_heritability(self):
+        """
+        :return: An estimate of the SNP heritability, or proportion of variance explained by SNPs.
+        """
+
+        return self._sigma_g / (self._sigma_g + self.sigma_epsilon)
+
+    def to_theta_table(self):
+        """
+        :return: A `pandas` DataFrame containing information about the estimated hyperparameters of the model.
+        """
+
+        theta_table = [
+            {'Parameter': 'Residual_variance', 'Value': self.sigma_epsilon},
+            {'Parameter': 'Heritability', 'Value': self.get_heritability()},
+            {'Parameter': 'Proportion_causal', 'Value': self.get_proportion_causal()},
+            {'Parameter': 'Average_effect_variance', 'Value': self.get_average_effect_size_variance()}
+        ]
+
+        if isinstance(self.tau_beta, dict):
+            taus = dict_mean(self.tau_beta, axis=0)
+        else:
+            taus = self.tau_beta
+
+        try:
+            taus = list(taus)
+            for i in range(len(taus)):
+                theta_table.append({'Parameter': f'tau_beta_{i+1}', 'Value': taus[i]})
+        except TypeError:
+            theta_table.append({'Parameter': 'tau_beta', 'Value': taus})
+
+        import pandas as pd
+
+        return pd.DataFrame(theta_table)
+
+    def write_inferred_theta(self, f_name, sep="\t"):
+        """
+        A convenience method to write the inferred (and fixed) hyperparameters of the model to file.
+        :param f_name: The file name
+        :param sep: The separator for the hyperparameter file.
+        """
+
+        # Write the table to file:
+        try:
+            self.to_theta_table().to_csv(f_name, sep=sep, index=False)
+        except Exception as e:
+            raise e
+
+    def update_theta_history(self):
+        """
+        A convenience method to update the history of the hyperparameters of the model,
+        if the user requested that they should be tracked.
+        """
+
+        for tt in self.tracked_theta:
+            if tt == 'pi':
+                self.history['pi'].append(self.get_proportion_causal())
+            elif tt == 'pis':
+                self.history['pis'].append(self.pi)
+            if tt == 'heritability':
+                self.history['heritability'].append(self.get_heritability())
+            if tt == 'sigma_epsilon':
+                self.history['sigma_epsilon'].append(self.sigma_epsilon)
+            elif tt == 'tau_beta':
+                self.history['tau_beta'].append(self.tau_beta)
+            elif tt == 'sigma_g':
+                self.history['sigma_g'].append(self._sigma_g)
+
+    def compute_pip(self):
+        """
+        :return: The posterior inclusion probability
+        """
+        return self.var_gamma.copy()
+
+    def compute_eta(self):
+        """
+        :return: The mean for the effect size under the variational posterior.
+        """
+        return {c: v*self.var_mu[c] for c, v in self.var_gamma.items()}
+
+    def compute_zeta(self):
+        """
+        :return: The expectation of the squared effect size under the variational posterior.
+        """
+        return {c: np.multiply(v, self.var_mu[c]**2 + 1./self.var_tau[c])
+                for c, v in self.var_gamma.items()}
+
+    def update_posterior_moments(self):
+        """
+        A convenience method to update the dictionaries containing the posterior moments,
+        including the PIP and posterior mean and variance for the effect size.
+        """
+
+        self.pip = {c: pip.copy() for c, pip in self.compute_pip().items()}
+        self.post_mean_beta = {c: eta.copy() for c, eta in self.eta.items()}
+        self.post_var_beta = {c: zeta - self.eta[c]**2 for c, zeta in self.zeta.items()}
+
+    def fit(self,
+            max_iter=1000,
+            theta_0=None,
+            param_0=None,
+            continued=False,
+            min_iter=3,
+            f_abs_tol=1e-6,
+            x_abs_tol=1e-7,
+            drop_r_tol=0.01,
+            patience=5):
+        """
+        A convenience method to fit the model using the Variational EM algorithm.
+
+        :param max_iter: Maximum number of iterations. 
+        :param theta_0: A dictionary of values to initialize the hyperparameters
+        :param param_0: A dictionary of values to initialize the variational parameters
+        :param continued: If true, continue the model fitting for more iterations from current parameters
+        instead of starting over.
+        :param min_iter: The minimum number of iterations to run before checking for convergence.
+        :param f_abs_tol: The absolute tolerance threshold for the objective (ELBO).
+        :param x_abs_tol: The absolute tolerance threshold for the variational parameters.
+        :param drop_r_tol: The relative tolerance for the drop in the ELBO to be considered as a red flag. It usually
+        happens around convergence that the objective fluctuates due to numerical errors. This is a way to
+        differentiate such random fluctuations from actual drops in the objective.
+        :param patience: The maximum number of times the objective is allowed to drop before termination.
+        """
+
+        if not continued:
+            self.initialize(theta_0, param_0)
+            start_idx = 1
+        else:
+            start_idx = len(self.history['ELBO']) + 1
+            # Update OptimizeResult object to enable continuation of the optimization:
+            self.optim_result.update(self.history['ELBO'][-1], increment=False)
+
+        if int(self.verbose) > 1:
+            logging.debug("> Performing model fit...")
+            if self.threads > 1:
+                logging.debug(f"> Using up to {self.threads} threads.")
+
+        # If the model is fit over a single chromosome, append this information to the
+        # tqdm progress bar:
+        if len(self.shapes) == 1:
+            chrom, num_snps = list(self.shapes.items())[0]
+            desc = f"Chromosome {chrom} ({num_snps} variants)"
+        else:
+            desc = None
+
+        # Progress bar:
+        pbar = tqdm(range(start_idx, start_idx + max_iter),
+                    disable=not self.verbose,
+                    desc=desc)
+
+        for i in pbar:
+
+            if self.optim_result.stop_iteration:
+                pbar.set_postfix({'Final ELBO': f"{self.optim_result.objective:.4f}"})
+                pbar.n = i - 1
+                pbar.total = i - 1
+                pbar.refresh()
+                pbar.close()
+                break
+
+            self.update_theta_history()
+
+            self.e_step()
+            self.m_step()
+
+            self.history['ELBO'].append(self.elbo())
+            pbar.set_postfix({'ELBO': f"{self.history['ELBO'][-1]:.4f}"})
+
+            if i > 1:
+
+                curr_elbo = self.history['ELBO'][-1]
+                prev_elbo = self.history['ELBO'][-2]
+
+                # Check for convergence in the objective + parameters:
+                if (i > min_iter) and np.isclose(prev_elbo, curr_elbo, atol=f_abs_tol, rtol=0.):
+                    self.optim_result.update(curr_elbo,
+                                             stop_iteration=True,
+                                             success=True,
+                                             message='Objective (ELBO) converged successfully.')
+                elif (i > min_iter) and max([np.max(np.abs(diff)) for diff in self.eta_diff.values()]) < x_abs_tol:
+                    self.optim_result.update(curr_elbo,
+                                             stop_iteration=True,
+                                             success=True,
+                                             message='Variational parameters converged successfully.')
+
+                # Check to see if the objective drops due to numerical instabilities:
+                elif curr_elbo < prev_elbo and not np.isclose(curr_elbo, prev_elbo, atol=0., rtol=drop_r_tol):
+                    patience -= 1
+
+                    if patience == 0:
+                        self.optim_result.update(curr_elbo,
+                                                 stop_iteration=True,
+                                                 success=False,
+                                                 message='Optimization is halted due to numerical instabilities.')
+                    else:
+                        self.optim_result.update(curr_elbo)
+
+                    # Continue so as not to update the posterior moments in this case
+                    continue
+
+                # Check if the model parameters behave in unexpected/pathological ways:
+                elif np.isnan(curr_elbo):
+                    raise OptimizationDivergence(f"Stopping at iteration {i}: "
+                                                 f"The optimization algorithm is not converging!\n"
+                                                 f"The objective (ELBO) is NaN.")
+                elif self.sigma_epsilon <= 0.:
+                    raise OptimizationDivergence(f"Stopping at iteration {i}: "
+                                                 f"The optimization algorithm is not converging!\n"
+                                                 f"The residual variance estimate is negative.")
+                elif self.sigma_epsilon >= 1.:
+                    if self.threads > 1 and self.low_memory:
+                        # If the model is running in low memory mode with multi-threading, it may
+                        # run into numerical instabilities. In this case, we reduce the number of threads to
+                        # stabilize the update and synchronize the parameters.
+                        logging.warning("Reducing the number of threads for better parameter synchronization.")
+                        self.threads -= 1
+                    else:
+                        raise OptimizationDivergence(f"Stopping at iteration {i}: "
+                                                     f"The optimization algorithm is not converging!\n"
+                                                     f"The residual variance estimate exceeded 1.")
+                elif self.get_heritability() >= 1.:
+                    raise OptimizationDivergence(f"Stopping at iteration {i}: "
+                                                 f"The optimization algorithm is not converging!\n"
+                                                 f"Value of estimated heritability exceeded 1.")
+                else:
+                    self.optim_result.update(curr_elbo)
+
+        # Update the posterior moments:
+        self.update_posterior_moments()
+
+        # Inspect the optim result:
+        if not self.optim_result.stop_iteration:
+            self.optim_result.update(self.history['ELBO'][-1],
+                                     stop_iteration=True,
+                                     success=False,
+                                     message="Maximum iterations reached without convergence.\n"
+                                             "You may need to run the model for more iterations.")
+
+        # Inform the user about potential issues:
+        if not self.optim_result.success:
+            logging.warning("\t" + self.optim_result.message)
+
+        logging.debug(f"> Final ELBO: {self.history['ELBO'][-1]:.6f}")
+        logging.debug(f"> Estimated heritability: {self.get_heritability():.6f}")
+        logging.debug(f"> Estimated proportion of causal variants: {self.get_proportion_causal():.6f}")
+
+        return self
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(gdl, fix_params=None, tracked_theta=None, verbose=True, float_precision='float32', order='F', low_memory=True, use_blas=True, use_cpp=True, dequantize_on_the_fly=False, threads=1) + +

+ + +
+ +

Initialize the VIPRS model.

+

.. note:: + The initialization of the model involves loading the LD matrix to memory.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

An instance of GWADataLoader containing harmonized GWAS summary statistics and LD matrices.

+
+
+ required +
fix_params + +
+

A dictionary of hyperparameters with their fixed values.

+
+
+ None +
tracked_theta + +
+

A list of hyperparameters to track throughout the optimization procedure. Useful for debugging/model checking. Currently, we allow the user to track the following: * The proportion of causal variants (pi). * The heritability ('heritability'). * The residual variance (sigma_epsilon).

+
+
+ None +
verbose + +
+

Verbosity of the information printed to standard output. Can be boolean or an integer. Provide a number greater than 1 for more detailed output.

+
+
+ True +
float_precision + +
+

The precision of the floating point variables. Options are: 'float32' or 'float64'.

+
+
+ 'float32' +
order + +
+

The order of the arrays in memory. Options are: 'C' or 'F'.

+
+
+ 'F' +
low_memory + +
+

A boolean flag to indicate whether to use low memory mode.

+
+
+ True +
use_blas + +
+

A boolean flag to indicate whether to use BLAS for linear algebra operations.

+
+
+ True +
use_cpp + +
+

A boolean flag to indicate whether to use the C++ backend.

+
+
+ True +
dequantize_on_the_fly + +
+

A boolean flag to indicate whether to dequantize the LD matrix on the fly.

+
+
+ False +
threads + +
+

The number of threads to use when fitting the model.

+
+
+ 1 +
+ +
+ Source code in viprs/model/VIPRS.py +
def __init__(self,
+             gdl,
+             fix_params=None,
+             tracked_theta=None,
+             verbose=True,
+             float_precision='float32',
+             order='F',
+             low_memory=True,
+             use_blas=True,
+             use_cpp=True,
+             dequantize_on_the_fly=False,
+             threads=1):
+
+    """
+
+    Initialize the VIPRS model.
+
+    .. note::
+        The initialization of the model involves loading the LD matrix to memory.
+
+    :param gdl: An instance of GWADataLoader containing harmonized GWAS summary statistics and LD matrices.
+    :param fix_params: A dictionary of hyperparameters with their fixed values.
+    :param tracked_theta: A list of hyperparameters to track throughout the optimization procedure. Useful
+    for debugging/model checking. Currently, we allow the user to track the following:
+
+        * The proportion of causal variants (`pi`).
+        * The heritability ('heritability').
+        * The residual variance (`sigma_epsilon`).
+
+    :param verbose: Verbosity of the information printed to standard output. Can be boolean or an integer.
+    Provide a number greater than 1 for more detailed output.
+    :param float_precision: The precision of the floating point variables. Options are: 'float32' or 'float64'.
+    :param order: The order of the arrays in memory. Options are: 'C' or 'F'.
+    :param low_memory: A boolean flag to indicate whether to use low memory mode.
+    :param use_blas: A boolean flag to indicate whether to use BLAS for linear algebra operations.
+    :param use_cpp: A boolean flag to indicate whether to use the C++ backend.
+    :param dequantize_on_the_fly: A boolean flag to indicate whether to dequantize the LD matrix on the fly.
+    :param threads: The number of threads to use when fitting the model.
+    """
+
+    super().__init__(gdl)
+
+    # ------------------- Sanity checks -------------------
+
+    assert gdl.ld is not None, "The LD matrices must be initialized in the GWADataLoader object."
+    assert gdl.sumstats_table is not None, ("The summary statistics must be "
+                                            "initialized in the GWADataLoader object.")
+
+    if dequantize_on_the_fly and not use_cpp:
+        raise Exception("Dequantization on the fly is only supported when using the C++ backend.")
+
+    # ------------------- Initialize the model -------------------
+
+    # Variational parameters:
+    self.var_gamma = {}
+    self.var_mu = {}
+    self.var_tau = {}
+
+    # Cache this quantity:
+    self._log_var_tau = {}
+
+    # Properties of proposed variational distribution:
+    self.eta = {}  # The posterior mean, E[B] = \gamma*\mu_beta
+    self.zeta = {}  # The expectation of B^2 under the posterior, E[B^2] = \gamma*(\mu_beta^2 + 1./\tau_beta)
+
+    # The difference between the etas in two consecutive iterations (can be used for checking convergence,
+    # or implementing optimized updates in the E-Step).
+    self.eta_diff = {}
+
+    # q-factor (keeps track of LD-related terms)
+    self.q = {}
+
+    # ---------- Model hyperparameters ----------
+
+    self.sigma_epsilon = None
+    self.tau_beta = None
+    self.pi = None
+    self._sigma_g = None  # A proxy for the additive genotypic variance
+
+    # ---------- Inputs to the model: ----------
+
+    # NOTE: Here, we typecast the inputs to the model to the specified float precision.
+    # This also needs to be done in the initialization methods.
+
+    # LD-related quantities:
+
+    self.ld_data = {}
+    self.ld_indptr = {}
+    self.ld_left_bound = {}
+
+    for c, ld_mat in self.gdl.get_ld_matrices().items():
+        # Load the data for the LD matrix:
+        if dequantize_on_the_fly and np.issubdtype(ld_mat.stored_dtype, np.integer):
+            # Cannot dequantize float16 on the fly due to lack of canonical representation
+            # for this data type:
+            dtype = ld_mat.stored_dtype
+        else:
+
+            if dequantize_on_the_fly:
+                logging.warning("Dequantization on the fly is only supported for "
+                                "integer data types. Ignoring this flag.")
+
+            dtype = float_precision
+            dequantize_on_the_fly = False
+
+        self.ld_data[c], self.ld_indptr[c], self.ld_left_bound[c] = ld_mat.load_data(
+            return_symmetric=not low_memory,
+            dtype=dtype
+        )
+
+    # Standardized betas:
+    self.std_beta = {c: ss.get_snp_pseudo_corr().astype(float_precision)
+                     for c, ss in self.gdl.sumstats_table.items()}
+
+    # Make sure that the data type for the sample size-per-SNP has the correct format:
+
+    self.Nj = {c: nj.astype(float_precision, order=order)
+               for c, nj in self.Nj.items()}
+
+    # ---------- General properties: ----------
+
+    self.threads = threads
+    self.fix_params = fix_params or {}
+
+    self.float_precision = float_precision
+    self.float_resolution = np.finfo(self.float_precision).resolution
+    self.order = order
+    self.low_memory = low_memory
+
+    self.dequantize_on_the_fly = dequantize_on_the_fly
+
+    if self.dequantize_on_the_fly:
+        info = np.iinfo(self.ld_data[c].dtype)
+        self.dequantize_scale = 2. / (info.max - (info.min + 1))
+    else:
+        self.dequantize_scale = 1.
+
+    self.use_cpp = use_cpp
+    self.use_blas = use_blas
+
+    self.optim_result = OptimizeResult()
+    self.verbose = verbose
+    self.history = {}
+    self.tracked_theta = tracked_theta or []
+
+
+
+ +
+ +
+ + +

+ compute_eta() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The mean for the effect size under the variational posterior.

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def compute_eta(self):
+    """
+    :return: The mean for the effect size under the variational posterior.
+    """
+    return {c: v*self.var_mu[c] for c, v in self.var_gamma.items()}
+
+
+
+ +
+ +
+ + +

+ compute_pip() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The posterior inclusion probability

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def compute_pip(self):
+    """
+    :return: The posterior inclusion probability
+    """
+    return self.var_gamma.copy()
+
+
+
+ +
+ +
+ + +

+ compute_zeta() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The expectation of the squared effect size under the variational posterior.

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def compute_zeta(self):
+    """
+    :return: The expectation of the squared effect size under the variational posterior.
+    """
+    return {c: np.multiply(v, self.var_mu[c]**2 + 1./self.var_tau[c])
+            for c, v in self.var_gamma.items()}
+
+
+
+ +
+ +
+ + +

+ e_step() + +

+ + +
+ +

Run the E-Step of the Variational EM algorithm. +Here, we update the variational parameters for each variant using coordinate +ascent optimization techniques. The update equations are outlined in +the Supplementary Material of the following paper:

+
+

Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference. +Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009. +Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.

+
+ +
+ Source code in viprs/model/VIPRS.py +
def e_step(self):
+    """
+    Run the E-Step of the Variational EM algorithm.
+    Here, we update the variational parameters for each variant using coordinate
+    ascent optimization techniques. The update equations are outlined in
+    the Supplementary Material of the following paper:
+
+    > Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference.
+    Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009.
+    Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.
+    """
+
+    for c, c_size in self.shapes.items():
+
+        # Get the priors:
+        tau_beta = self.get_tau_beta(c)
+        pi = self.get_pi(c)
+
+        # Updates for tau variational parameters:
+        self.var_tau[c] = (self.Nj[c] / self.sigma_epsilon) + tau_beta
+        np.log(self.var_tau[c], out=self._log_var_tau[c])
+
+        # Compute some quantities that are needed for the per-SNP updates:
+        mu_mult = self.Nj[c]/(self.var_tau[c]*self.sigma_epsilon)
+        u_logs = np.log(pi) - np.log(1. - pi) + .5*(np.log(tau_beta) - self._log_var_tau[c])
+
+        if self.use_cpp:
+            cpp_e_step(self.ld_left_bound[c],
+                       self.ld_indptr[c],
+                       self.ld_data[c],
+                       self.std_beta[c],
+                       self.var_gamma[c],
+                       self.var_mu[c],
+                       self.eta[c],
+                       self.q[c],
+                       self.eta_diff[c],
+                       u_logs,
+                       0.5*self.var_tau[c],
+                       mu_mult,
+                       self.dequantize_scale,
+                       self.threads,
+                       self.use_blas,
+                       self.low_memory)
+        else:
+
+            e_step(self.ld_left_bound[c],
+                   self.ld_indptr[c],
+                   self.ld_data[c],
+                   self.std_beta[c],
+                   self.var_gamma[c],
+                   self.var_mu[c],
+                   self.eta[c],
+                   self.q[c],
+                   self.eta_diff[c],
+                   u_logs,
+                   0.5*self.var_tau[c],
+                   mu_mult,
+                   self.threads,
+                   self.use_blas,
+                   self.low_memory)
+
+    self.zeta = self.compute_zeta()
+
+
+
+ +
+ +
+ + +

+ elbo(sum_axis=None) + +

+ + +
+ +

Compute the variational objective, the Evidence Lower-BOund (ELBO), +from GWAS summary statistics and the reference LD data. This implementation assumes +that the product of the LD matrix with the current estimate of the effect sizes +is already computed and stored in the q dictionary. If this is not the case, +we recommend computing q first and then calling this method.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sum_axis + +
+

The axis along which to sum the ELBO. If None, the ELBO is returned as a scalar.

+
+
+ None +
+ +
+ Source code in viprs/model/VIPRS.py +
def elbo(self, sum_axis=None):
+    """
+    Compute the variational objective, the Evidence Lower-BOund (ELBO),
+    from GWAS summary statistics and the reference LD data. This implementation assumes
+    that the product of the LD matrix with the current estimate of the effect sizes
+    is already computed and stored in the `q` dictionary. If this is not the case,
+    we recommend computing q first and then calling this method.
+
+    :param sum_axis: The axis along which to sum the ELBO. If None, the ELBO is returned as a scalar.
+    """
+
+    # Concatenate the dictionary items for easy computation:
+    var_gamma = np.clip(dict_concat(self.var_gamma),
+                        a_min=self.float_resolution,
+                        a_max=1. - self.float_resolution)
+    # The gamma for the null component
+    null_gamma = np.clip(1. - dict_concat(self.compute_pip()),
+                         a_min=self.float_resolution,
+                         a_max=1. - self.float_resolution)
+    log_var_tau = dict_concat(self._log_var_tau)
+
+    if isinstance(self.pi, dict):
+        pi = dict_concat(self.pi)
+        null_pi = dict_concat(self.get_null_pi())
+    else:
+        pi = self.pi
+        null_pi = self.get_null_pi()
+
+    if isinstance(self.tau_beta, dict):
+        tau_beta = dict_concat(self.tau_beta)
+    else:
+        tau_beta = self.tau_beta
+
+    zeta = dict_concat(self.zeta)
+
+    # Initialize the ELBO:
+    elbo = 0.
+
+    # -----------------------------------------------
+    # (1) Compute the log of the joint density:
+
+    #
+    # (1.1) The following terms are an expansion of ||Y - X\beta||^2
+    #
+    # -N/2log(2pi*sigma_epsilon)
+    elbo -= np.log(2 * np.pi * self.sigma_epsilon)
+
+    # -Y'Y/(2*sigma_epsilon), where we assume Y'Y = N
+    # + (1./sigma_epsilon)*\beta*(XY), where we assume XY = N\hat{\beta}
+    if 'sigma_epsilon' not in self.fix_params:
+        # If sigma_epsilon was updated in the M-Step, then this expression would
+        # simply evaluate to 1. and there's no point in re-computing it again:
+        elbo -= 1.
+    else:
+
+        eta = dict_concat(self.eta)
+        std_beta = dict_concat(self.std_beta)
+
+        elbo -= (1. / self.sigma_epsilon) * (1. - 2.*std_beta.dot(eta) + self._sigma_g)
+
+    elbo *= 0.5*self.n
+
+    elbo -= np.multiply(var_gamma, np.log(var_gamma) - np.log(pi)).sum(axis=sum_axis)
+    elbo -= np.multiply(null_gamma, np.log(null_gamma) - np.log(null_pi)).sum(axis=sum_axis)
+
+    elbo += .5 * np.multiply(var_gamma, 1. - log_var_tau + np.log(tau_beta)).sum(axis=sum_axis)
+    elbo -= .5*(tau_beta*zeta).sum(axis=0)
+
+    try:
+        if len(elbo) == 1:
+            return elbo[0]
+        else:
+            return elbo
+    except TypeError:
+        return elbo
+
+
+
+ +
+ +
+ + +

+ fit(max_iter=1000, theta_0=None, param_0=None, continued=False, min_iter=3, f_abs_tol=1e-06, x_abs_tol=1e-07, drop_r_tol=0.01, patience=5) + +

+ + +
+ +

A convenience method to fit the model using the Variational EM algorithm.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
max_iter + +
+

Maximum number of iterations.

+
+
+ 1000 +
theta_0 + +
+

A dictionary of values to initialize the hyperparameters

+
+
+ None +
param_0 + +
+

A dictionary of values to initialize the variational parameters

+
+
+ None +
continued + +
+

If true, continue the model fitting for more iterations from current parameters instead of starting over.

+
+
+ False +
min_iter + +
+

The minimum number of iterations to run before checking for convergence.

+
+
+ 3 +
f_abs_tol + +
+

The absolute tolerance threshold for the objective (ELBO).

+
+
+ 1e-06 +
x_abs_tol + +
+

The absolute tolerance threshold for the variational parameters.

+
+
+ 1e-07 +
drop_r_tol + +
+

The relative tolerance for the drop in the ELBO to be considered as a red flag. It usually happens around convergence that the objective fluctuates due to numerical errors. This is a way to differentiate such random fluctuations from actual drops in the objective.

+
+
+ 0.01 +
patience + +
+

The maximum number of times the objective is allowed to drop before termination.

+
+
+ 5 +
+ +
+ Source code in viprs/model/VIPRS.py +
def fit(self,
+        max_iter=1000,
+        theta_0=None,
+        param_0=None,
+        continued=False,
+        min_iter=3,
+        f_abs_tol=1e-6,
+        x_abs_tol=1e-7,
+        drop_r_tol=0.01,
+        patience=5):
+    """
+    A convenience method to fit the model using the Variational EM algorithm.
+
+    :param max_iter: Maximum number of iterations. 
+    :param theta_0: A dictionary of values to initialize the hyperparameters
+    :param param_0: A dictionary of values to initialize the variational parameters
+    :param continued: If true, continue the model fitting for more iterations from current parameters
+    instead of starting over.
+    :param min_iter: The minimum number of iterations to run before checking for convergence.
+    :param f_abs_tol: The absolute tolerance threshold for the objective (ELBO).
+    :param x_abs_tol: The absolute tolerance threshold for the variational parameters.
+    :param drop_r_tol: The relative tolerance for the drop in the ELBO to be considered as a red flag. It usually
+    happens around convergence that the objective fluctuates due to numerical errors. This is a way to
+    differentiate such random fluctuations from actual drops in the objective.
+    :param patience: The maximum number of times the objective is allowed to drop before termination.
+    """
+
+    if not continued:
+        self.initialize(theta_0, param_0)
+        start_idx = 1
+    else:
+        start_idx = len(self.history['ELBO']) + 1
+        # Update OptimizeResult object to enable continuation of the optimization:
+        self.optim_result.update(self.history['ELBO'][-1], increment=False)
+
+    if int(self.verbose) > 1:
+        logging.debug("> Performing model fit...")
+        if self.threads > 1:
+            logging.debug(f"> Using up to {self.threads} threads.")
+
+    # If the model is fit over a single chromosome, append this information to the
+    # tqdm progress bar:
+    if len(self.shapes) == 1:
+        chrom, num_snps = list(self.shapes.items())[0]
+        desc = f"Chromosome {chrom} ({num_snps} variants)"
+    else:
+        desc = None
+
+    # Progress bar:
+    pbar = tqdm(range(start_idx, start_idx + max_iter),
+                disable=not self.verbose,
+                desc=desc)
+
+    for i in pbar:
+
+        if self.optim_result.stop_iteration:
+            pbar.set_postfix({'Final ELBO': f"{self.optim_result.objective:.4f}"})
+            pbar.n = i - 1
+            pbar.total = i - 1
+            pbar.refresh()
+            pbar.close()
+            break
+
+        self.update_theta_history()
+
+        self.e_step()
+        self.m_step()
+
+        self.history['ELBO'].append(self.elbo())
+        pbar.set_postfix({'ELBO': f"{self.history['ELBO'][-1]:.4f}"})
+
+        if i > 1:
+
+            curr_elbo = self.history['ELBO'][-1]
+            prev_elbo = self.history['ELBO'][-2]
+
+            # Check for convergence in the objective + parameters:
+            if (i > min_iter) and np.isclose(prev_elbo, curr_elbo, atol=f_abs_tol, rtol=0.):
+                self.optim_result.update(curr_elbo,
+                                         stop_iteration=True,
+                                         success=True,
+                                         message='Objective (ELBO) converged successfully.')
+            elif (i > min_iter) and max([np.max(np.abs(diff)) for diff in self.eta_diff.values()]) < x_abs_tol:
+                self.optim_result.update(curr_elbo,
+                                         stop_iteration=True,
+                                         success=True,
+                                         message='Variational parameters converged successfully.')
+
+            # Check to see if the objective drops due to numerical instabilities:
+            elif curr_elbo < prev_elbo and not np.isclose(curr_elbo, prev_elbo, atol=0., rtol=drop_r_tol):
+                patience -= 1
+
+                if patience == 0:
+                    self.optim_result.update(curr_elbo,
+                                             stop_iteration=True,
+                                             success=False,
+                                             message='Optimization is halted due to numerical instabilities.')
+                else:
+                    self.optim_result.update(curr_elbo)
+
+                # Continue so as not to update the posterior moments in this case
+                continue
+
+            # Check if the model parameters behave in unexpected/pathological ways:
+            elif np.isnan(curr_elbo):
+                raise OptimizationDivergence(f"Stopping at iteration {i}: "
+                                             f"The optimization algorithm is not converging!\n"
+                                             f"The objective (ELBO) is NaN.")
+            elif self.sigma_epsilon <= 0.:
+                raise OptimizationDivergence(f"Stopping at iteration {i}: "
+                                             f"The optimization algorithm is not converging!\n"
+                                             f"The residual variance estimate is negative.")
+            elif self.sigma_epsilon >= 1.:
+                if self.threads > 1 and self.low_memory:
+                    # If the model is running in low memory mode with multi-threading, it may
+                    # run into numerical instabilities. In this case, we reduce the number of threads to
+                    # stabilize the update and synchronize the parameters.
+                    logging.warning("Reducing the number of threads for better parameter synchronization.")
+                    self.threads -= 1
+                else:
+                    raise OptimizationDivergence(f"Stopping at iteration {i}: "
+                                                 f"The optimization algorithm is not converging!\n"
+                                                 f"The residual variance estimate exceeded 1.")
+            elif self.get_heritability() >= 1.:
+                raise OptimizationDivergence(f"Stopping at iteration {i}: "
+                                             f"The optimization algorithm is not converging!\n"
+                                             f"Value of estimated heritability exceeded 1.")
+            else:
+                self.optim_result.update(curr_elbo)
+
+    # Update the posterior moments:
+    self.update_posterior_moments()
+
+    # Inspect the optim result:
+    if not self.optim_result.stop_iteration:
+        self.optim_result.update(self.history['ELBO'][-1],
+                                 stop_iteration=True,
+                                 success=False,
+                                 message="Maximum iterations reached without convergence.\n"
+                                         "You may need to run the model for more iterations.")
+
+    # Inform the user about potential issues:
+    if not self.optim_result.success:
+        logging.warning("\t" + self.optim_result.message)
+
+    logging.debug(f"> Final ELBO: {self.history['ELBO'][-1]:.6f}")
+    logging.debug(f"> Estimated heritability: {self.get_heritability():.6f}")
+    logging.debug(f"> Estimated proportion of causal variants: {self.get_proportion_causal():.6f}")
+
+    return self
+
+
+
+ +
+ +
+ + +

+ get_average_effect_size_variance() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The average per-SNP variance for the prior mixture components

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def get_average_effect_size_variance(self):
+    """
+    :return: The average per-SNP variance for the prior mixture components
+    """
+    if isinstance(self.pi, dict):
+        pi = dict_concat(self.pi, axis=0)
+    else:
+        pi = self.pi
+
+    if isinstance(self.tau_beta, dict):
+        tau_beta = dict_concat(self.tau_beta, axis=0)
+    else:
+        tau_beta = self.tau_beta
+
+    return np.sum(pi / tau_beta, axis=0)
+
+
+
+ +
+ +
+ + +

+ get_heritability() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

An estimate of the SNP heritability, or proportion of variance explained by SNPs.

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def get_heritability(self):
+    """
+    :return: An estimate of the SNP heritability, or proportion of variance explained by SNPs.
+    """
+
+    return self._sigma_g / (self._sigma_g + self.sigma_epsilon)
+
+
+
+ +
+ +
+ + +

+ get_null_pi(chrom=None) + +

+ + +
+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
chrom + +
+

If provided, get the mixing proportion for the null component on a given chromosome.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The value of the prior probability of a variant being null, 1 - pi.

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def get_null_pi(self, chrom=None):
+    """
+    :param chrom: If provided, get the mixing proportion for the null component on a given chromosome.
+
+    :return: The value of the prior probability of a variant being null, `1 - pi`.
+    """
+
+    pi = self.get_pi(chrom=chrom)
+
+    if isinstance(pi, dict):
+        return {c: 1. - c_pi for c, c_pi in pi.items()}
+    else:
+        return 1. - pi
+
+
+
+ +
+ +
+ + +

+ get_pi(chrom=None) + +

+ + +
+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
chrom + +
+

Get the value of pi for a given chromosome.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The value of the prior probability of a variant being causal, pi.

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def get_pi(self, chrom=None):
+    """
+    :param chrom: Get the value of `pi` for a given chromosome.
+
+    :return: The value of the prior probability of a variant being causal, `pi`.
+    """
+
+    if chrom is None:
+        return self.pi
+    else:
+        if isinstance(self.pi, dict):
+            return self.pi[chrom]
+        else:
+            return self.pi
+
+
+
+ +
+ +
+ + +

+ get_proportion_causal() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The proportion of causal variants in the model.

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def get_proportion_causal(self):
+    """
+    :return: The proportion of causal variants in the model.
+    """
+    if isinstance(self.pi, dict):
+        return dict_mean(self.pi, axis=0)
+    else:
+        return self.pi
+
+
+
+ +
+ +
+ + +

+ get_sigma_epsilon() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The value of the residual variance, sigma_epsilon.

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def get_sigma_epsilon(self):
+    """
+    :return: The value of the residual variance, `sigma_epsilon`.
+    """
+    return self.sigma_epsilon
+
+
+
+ +
+ +
+ + +

+ get_tau_beta(chrom=None) + +

+ + +
+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
chrom + +
+

Get the value of tau_beta for a given chromosome.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The value of the prior precision on the effect size(s), tau_beta

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def get_tau_beta(self, chrom=None):
+    """
+    :param chrom: Get the value of `tau_beta` for a given chromosome.
+
+    :return: The value of the prior precision on the effect size(s), `tau_beta`
+    """
+    if chrom is None:
+        return self.tau_beta
+    else:
+        if isinstance(self.tau_beta, dict):
+            return self.tau_beta[chrom]
+        else:
+            return self.tau_beta
+
+
+
+ +
+ +
+ + +

+ init_optim_meta() + +

+ + +
+ +

Initialize the various quantities/objects to keep track of the optimization process. + This method initializes the "history" object (which keeps track of the objective + other + hyperparameters requested by the user), in addition to the OptimizeResult objects.

+ +
+ Source code in viprs/model/VIPRS.py +
def init_optim_meta(self):
+    """
+    Initialize the various quantities/objects to keep track of the optimization process.
+     This method initializes the "history" object (which keeps track of the objective + other
+     hyperparameters requested by the user), in addition to the OptimizeResult objects.
+    """
+
+    self.history = {
+        'ELBO': [],
+    }
+
+    for tt in self.tracked_theta:
+        self.history[tt] = []
+
+    self.optim_result.reset()
+
+
+
+ +
+ +
+ + +

+ initialize(theta_0=None, param_0=None) + +

+ + +
+ +

A convenience method to initialize all the objects associated with the model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
theta_0 + +
+

A dictionary of initial values for the hyperparameters theta

+
+
+ None +
param_0 + +
+

A dictionary of initial values for the variational parameters

+
+
+ None +
+ +
+ Source code in viprs/model/VIPRS.py +
def initialize(self, theta_0=None, param_0=None):
+    """
+    A convenience method to initialize all the objects associated with the model.
+    :param theta_0: A dictionary of initial values for the hyperparameters theta
+    :param param_0: A dictionary of initial values for the variational parameters
+    """
+
+    logging.debug("> Initializing model parameters")
+
+    self.initialize_theta(theta_0)
+    self.initialize_variational_parameters(param_0)
+    self.init_optim_meta()
+
+
+
+ +
+ +
+ + +

+ initialize_theta(theta_0=None) + +

+ + +
+ +

Initialize the global hyperparameters of the model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
theta_0 + +
+

A dictionary of initial values for the hyperparameters theta

+
+
+ None +
+ +
+ Source code in viprs/model/VIPRS.py +
def initialize_theta(self, theta_0=None):
+    """
+    Initialize the global hyperparameters of the model.
+    :param theta_0: A dictionary of initial values for the hyperparameters theta
+    """
+
+    if theta_0 is not None and self.fix_params is not None:
+        theta_0.update(self.fix_params)
+    elif self.fix_params is not None:
+        theta_0 = self.fix_params
+    elif theta_0 is None:
+        theta_0 = {}
+
+    # ----------------------------------------------
+    # (1) If 'pi' is not set, initialize from a uniform
+    if 'pi' not in theta_0:
+        self.pi = np.random.uniform(low=max(0.005, 1. / self.n_snps), high=.1)
+    else:
+        self.pi = theta_0['pi']
+
+    # ----------------------------------------------
+    # (2) Initialize sigma_epsilon and tau_beta
+    # Assuming that the genotype and phenotype are normalized,
+    # these two quantities are conceptually linked.
+    # The initialization routine here assumes that:
+    # Var(y) = h2 + sigma_epsilon
+    # Where, by assumption, Var(y) = 1,
+    # And h2 ~= pi*M/tau_beta
+
+    if 'sigma_epsilon' not in theta_0:
+        if 'tau_beta' not in theta_0:
+
+            # If neither tau_beta nor sigma_epsilon are given,
+            # then initialize using the SNP heritability estimate
+
+            try:
+                naive_h2g = np.clip(simple_ldsc(self.gdl), a_min=1e-3, a_max=1. - 1e-3)
+            except Exception as e:
+                naive_h2g = np.random.uniform(low=.001, high=.999)
+
+            self.sigma_epsilon = 1. - naive_h2g
+            self.tau_beta = self.pi * self.n_snps / naive_h2g
+        else:
+
+            # If tau_beta is given, use it to initialize sigma_epsilon
+
+            self.tau_beta = theta_0['tau_beta']
+            self.sigma_epsilon = np.clip(1. - (self.pi * self.n_snps / self.tau_beta),
+                                         a_min=self.float_resolution,
+                                         a_max=1. - self.float_resolution)
+    else:
+
+        # If sigma_epsilon is given, use it in the initialization
+
+        self.sigma_epsilon = theta_0['sigma_epsilon']
+
+        if 'tau_beta' in theta_0:
+            self.tau_beta = theta_0['tau_beta']
+        else:
+            self.tau_beta = (self.pi * self.n_snps) / (1. - self.sigma_epsilon)
+
+    # Cast all the hyperparameters to conform to the precision set by the user:
+    self.sigma_epsilon = np.dtype(self.float_precision).type(self.sigma_epsilon)
+    self.tau_beta = np.dtype(self.float_precision).type(self.tau_beta)
+    self.pi = np.dtype(self.float_precision).type(self.pi)
+    self._sigma_g = np.dtype(self.float_precision).type(0.)
+
+
+
+ +
+ +
+ + +

+ initialize_variational_parameters(param_0=None) + +

+ + +
+ +

Initialize the variational parameters of the model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
param_0 + +
+

A dictionary of initial values for the variational parameters

+
+
+ None +
+ +
+ Source code in viprs/model/VIPRS.py +
def initialize_variational_parameters(self, param_0=None):
+    """
+    Initialize the variational parameters of the model.
+    :param param_0: A dictionary of initial values for the variational parameters
+    """
+
+    param_0 = param_0 or {}
+
+    self.var_mu = {}
+    self.var_tau = {}
+    self.var_gamma = {}
+
+    for c, shapes in self.shapes.items():
+
+        # Initialize the variational parameters according to the derived update equations,
+        # ignoring correlations between SNPs.
+        if 'tau' in param_0:
+            self.var_tau[c] = param_0['tau'][c]
+        else:
+            self.var_tau[c] = (self.Nj[c] / self.sigma_epsilon) + self.tau_beta
+
+        self.var_tau[c] = self.var_tau[c].astype(self.float_precision, order=self.order, copy=False)
+
+        if 'mu' in param_0:
+            self.var_mu[c] = param_0['mu'][c].astype(self.float_precision, order=self.order)
+        else:
+            self.var_mu[c] = np.zeros(shapes, dtype=self.float_precision, order=self.order)
+
+        if 'gamma' in param_0:
+            self.var_gamma[c] = param_0['gamma'][c].astype(self.float_precision, order=self.order)
+        else:
+            pi = self.get_pi(c)
+            if isinstance(self.pi, dict):
+                self.var_gamma[c] = pi.astype(self.float_precision, order=self.order)
+            else:
+                self.var_gamma[c] = pi*np.ones(shapes, dtype=self.float_precision, order=self.order)
+
+    self.eta = self.compute_eta()
+    self.zeta = self.compute_zeta()
+    self.eta_diff = {c: np.zeros_like(eta, dtype=self.float_precision) for c, eta in self.eta.items()}
+    self.q = {c: np.zeros_like(eta, dtype=self.float_precision) for c, eta in self.eta.items()}
+    self._log_var_tau = {c: np.log(self.var_tau[c]) for c in self.var_tau}
+
+
+
+ +
+ +
+ + +

+ m_step() + +

+ + +
+ +

Run the M-Step of the Variational EM algorithm. +Here, we update the hyperparameters of the model, by simply calling +the update functions for each hyperparameter separately.

+ +
+ Source code in viprs/model/VIPRS.py +
def m_step(self):
+    """
+    Run the M-Step of the Variational EM algorithm.
+    Here, we update the hyperparameters of the model, by simply calling
+    the update functions for each hyperparameter separately.
+
+    """
+
+    self.update_pi()
+    self.update_tau_beta()
+    self._update_sigma_g()
+    self.update_sigma_epsilon()
+
+
+
+ +
+ +
+ + +

+ objective() + +

+ + +
+ +

The optimization objective for the variational inference problem. The objective +for the VIPRS method is the Evidence Lower-Bound (ELBO) in this case.

+
+

See Also

+ +
+ +
+ Source code in viprs/model/VIPRS.py +
def objective(self):
+    """
+    The optimization objective for the variational inference problem. The objective
+    for the VIPRS method is the Evidence Lower-Bound (ELBO) in this case.
+
+    !!! seealso "See Also"
+        * [elbo][viprs.model.VIPRS.VIPRS.elbo]
+
+
+    """
+    return self.elbo()
+
+
+
+ +
+ +
+ + +

+ to_theta_table() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

A pandas DataFrame containing information about the estimated hyperparameters of the model.

+
+
+ +
+ Source code in viprs/model/VIPRS.py +
def to_theta_table(self):
+    """
+    :return: A `pandas` DataFrame containing information about the estimated hyperparameters of the model.
+    """
+
+    theta_table = [
+        {'Parameter': 'Residual_variance', 'Value': self.sigma_epsilon},
+        {'Parameter': 'Heritability', 'Value': self.get_heritability()},
+        {'Parameter': 'Proportion_causal', 'Value': self.get_proportion_causal()},
+        {'Parameter': 'Average_effect_variance', 'Value': self.get_average_effect_size_variance()}
+    ]
+
+    if isinstance(self.tau_beta, dict):
+        taus = dict_mean(self.tau_beta, axis=0)
+    else:
+        taus = self.tau_beta
+
+    try:
+        taus = list(taus)
+        for i in range(len(taus)):
+            theta_table.append({'Parameter': f'tau_beta_{i+1}', 'Value': taus[i]})
+    except TypeError:
+        theta_table.append({'Parameter': 'tau_beta', 'Value': taus})
+
+    import pandas as pd
+
+    return pd.DataFrame(theta_table)
+
+
+
+ +
+ +
+ + +

+ update_pi() + +

+ + +
+ +

Update the prior probability of a variant being causal, or the proportion of causal variants, pi.

+ +
+ Source code in viprs/model/VIPRS.py +
def update_pi(self):
+    """
+    Update the prior probability of a variant being causal, or the proportion of causal variants, `pi`.
+    """
+
+    if 'pi' not in self.fix_params:
+
+        # Get the average of the gammas:
+        self.pi = dict_mean(self.var_gamma, axis=0)
+
+
+
+ +
+ +
+ + +

+ update_posterior_moments() + +

+ + +
+ +

A convenience method to update the dictionaries containing the posterior moments, +including the PIP and posterior mean and variance for the effect size.

+ +
+ Source code in viprs/model/VIPRS.py +
def update_posterior_moments(self):
+    """
+    A convenience method to update the dictionaries containing the posterior moments,
+    including the PIP and posterior mean and variance for the effect size.
+    """
+
+    self.pip = {c: pip.copy() for c, pip in self.compute_pip().items()}
+    self.post_mean_beta = {c: eta.copy() for c, eta in self.eta.items()}
+    self.post_var_beta = {c: zeta - self.eta[c]**2 for c, zeta in self.zeta.items()}
+
+
+
+ +
+ +
+ + +

+ update_sigma_epsilon() + +

+ + +
+ +

Update the global residual variance parameter, sigma_epsilon.

+ +
+ Source code in viprs/model/VIPRS.py +
def update_sigma_epsilon(self):
+    """
+    Update the global residual variance parameter, `sigma_epsilon`.
+    """
+
+    if 'sigma_epsilon' not in self.fix_params:
+
+        sig_eps = 0.
+
+        for c, _ in self.shapes.items():
+            sig_eps -= 2.*self.std_beta[c].dot(self.eta[c])
+
+        self.sigma_epsilon = 1. + sig_eps + self._sigma_g
+
+
+
+ +
+ +
+ + +

+ update_tau_beta() + +

+ + +
+ +

Update the prior precision (inverse variance) for the effect size, tau_beta.

+ +
+ Source code in viprs/model/VIPRS.py +
def update_tau_beta(self):
+    """
+    Update the prior precision (inverse variance) for the effect size, `tau_beta`.
+    """
+
+    if 'tau_beta' not in self.fix_params:
+
+        # tau_beta estimate:
+        self.tau_beta = (self.pi * self.m / dict_sum(self.zeta, axis=0)).astype(self.float_precision)
+
+
+
+ +
+ +
+ + +

+ update_theta_history() + +

+ + +
+ +

A convenience method to update the history of the hyperparameters of the model, +if the user requested that they should be tracked.

+ +
+ Source code in viprs/model/VIPRS.py +
def update_theta_history(self):
+    """
+    A convenience method to update the history of the hyperparameters of the model,
+    if the user requested that they should be tracked.
+    """
+
+    for tt in self.tracked_theta:
+        if tt == 'pi':
+            self.history['pi'].append(self.get_proportion_causal())
+        elif tt == 'pis':
+            self.history['pis'].append(self.pi)
+        if tt == 'heritability':
+            self.history['heritability'].append(self.get_heritability())
+        if tt == 'sigma_epsilon':
+            self.history['sigma_epsilon'].append(self.sigma_epsilon)
+        elif tt == 'tau_beta':
+            self.history['tau_beta'].append(self.tau_beta)
+        elif tt == 'sigma_g':
+            self.history['sigma_g'].append(self._sigma_g)
+
+
+
+ +
+ +
+ + +

+ write_inferred_theta(f_name, sep='\t') + +

+ + +
+ +

A convenience method to write the inferred (and fixed) hyperparameters of the model to file.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
f_name + +
+

The file name

+
+
+ required +
sep + +
+

The separator for the hyperparameter file.

+
+
+ '\t' +
+ +
+ Source code in viprs/model/VIPRS.py +
def write_inferred_theta(self, f_name, sep="\t"):
+    """
+    A convenience method to write the inferred (and fixed) hyperparameters of the model to file.
+    :param f_name: The file name
+    :param sep: The separator for the hyperparameter file.
+    """
+
+    # Write the table to file:
+    try:
+        self.to_theta_table().to_csv(f_name, sep=sep, index=False)
+    except Exception as e:
+        raise e
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/model/VIPRSMix/index.html b/api/model/VIPRSMix/index.html new file mode 100644 index 0000000..3d358e0 --- /dev/null +++ b/api/model/VIPRSMix/index.html @@ -0,0 +1,2727 @@ + + + + + + + + + + + + + + + + + + + VIPRSMix - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

VIPRSMix

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ VIPRSMix + + +

+ + +
+

+ Bases: VIPRS

+ + +

A class for the Variational Inference for Polygenic Risk Scores (VIPRS) model +parametrized with the sparse mixture prior on the effect sizes. The class inherits +many of the methods and attributes from the VIPRS class unchanged. However, +there are many important updates and changes to the model, including the dimensionality +of the arrays representing the variational parameters.

+

Details for the algorithm can be found in the Supplementary Material of the following paper:

+
+

Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference. +Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009. +Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.

+
+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
K + +
+

The number of causal (i.e. non-null) components in the mixture prior (minimum 1). When K=1, this effectively reduces VIPRSMix to the VIPRS model.

+
+
d + +
+

Multiplier for the prior on the effect size (vector of size K).

+
+
+ +
+ Source code in viprs/model/VIPRSMix.py +
 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
class VIPRSMix(VIPRS):
+    """
+    A class for the Variational Inference for Polygenic Risk Scores (VIPRS) model
+    parametrized with the sparse mixture prior on the effect sizes. The class inherits
+    many of the methods and attributes from the `VIPRS` class unchanged. However,
+    there are many important updates and changes to the model, including the dimensionality
+    of the arrays representing the variational parameters.
+
+    Details for the algorithm can be found in the Supplementary Material of the following paper:
+
+    > Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference.
+    Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009.
+    Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.
+
+    :ivar K: The number of causal (i.e. non-null) components in the mixture prior (minimum 1). When `K=1`, this
+    effectively reduces `VIPRSMix` to the `VIPRS` model.
+    :ivar d: Multiplier for the prior on the effect size (vector of size K).
+
+    """
+
+    def __init__(self,
+                 gdl,
+                 K=1,
+                 prior_multipliers=None,
+                 **kwargs):
+
+        """
+        :param gdl: An instance of `GWADataLoader`
+        :param K: The number of causal (i.e. non-null) components in the mixture prior (minimum 1). When `K=1`, this
+            effectively reduces `VIPRSMix` to the `VIPRS` model.
+        :param prior_multipliers: Multiplier for the prior on the effect size (vector of size K).
+        :param kwargs: Additional keyword arguments to pass to the VIPRS model.
+        """
+
+        # Make sure that the matrices follow the C-contiguous order:
+        kwargs['order'] = 'C'
+
+        super().__init__(gdl, **kwargs)
+
+        # Sanity checks:
+        assert K > 0  # Check that there is at least 1 causal component
+        self.K = K
+
+        if prior_multipliers is not None:
+            assert len(prior_multipliers) == K
+            self.d = np.array(prior_multipliers).astype(self.float_precision)
+        else:
+            self.d = 2**np.linspace(-min(K - 1, 7), 0, K).astype(self.float_precision)
+
+        # Populate/update relevant fields:
+        self.shapes = {c: (shp, self.K) for c, shp in self.shapes.items()}
+        self.Nj = {c: Nj[:, None].astype(self.float_precision, order=self.order) for c, Nj in self.Nj.items()}
+
+    def initialize_theta(self, theta_0=None):
+        """
+        Initialize the global hyperparameters of the model
+        :param theta_0: A dictionary of initial values for the hyperparameters theta
+        """
+
+        if theta_0 is not None and self.fix_params is not None:
+            theta_0.update(self.fix_params)
+        elif self.fix_params is not None:
+            theta_0 = self.fix_params
+        elif theta_0 is None:
+            theta_0 = {}
+
+        # ----------------------------------------------
+        # (1) Initialize pi from a uniform
+        if 'pis' in theta_0:
+            self.pi = theta_0['pis']
+        else:
+            if 'pi' in theta_0:
+                overall_pi = theta_0['pi']
+            else:
+                overall_pi = np.random.uniform(low=max(0.005, 1. / self.n_snps), high=.1)
+
+            self.pi = overall_pi*np.random.dirichlet(np.ones(self.K))
+
+        # ----------------------------------------------
+        # (2) Initialize sigma_epsilon and sigma_beta
+        # Assuming that the genotype and phenotype are normalized,
+        # these two quantities are conceptually linked.
+        # The initialization routine here assumes that:
+        # Var(y) = h2 + sigma_epsilon
+        # Where, by assumption, Var(y) = 1,
+        # And h2 ~= pi*M*sigma_beta
+
+        if 'sigma_epsilon' not in theta_0:
+
+            if 'tau_betas' in theta_0:
+
+                # If tau_betas are given, use them to initialize sigma_epsilon
+
+                self.tau_beta = theta_0['tau_betas']
+
+                self.sigma_epsilon = np.clip(1. - np.dot(1./self.tau_beta, self.pi),
+                                             a_min=self.float_resolution,
+                                             a_max=1. - self.float_resolution)
+
+            elif 'tau_beta' in theta_0:
+                # NOTE: Here, we assume the provided `tau_beta` is a scalar.
+                # This is different from `tau_betas`
+
+                assert self.d is not None
+
+                self.tau_beta = theta_0['tau_beta'] * self.d
+                # Use the provided tau_beta to initialize sigma_epsilon.
+                # First, we derive a naive estimate of the heritability, based on the following equation:
+                # h2g/M = \sum_k pi_k \tau_k
+                # Where the per-SNP heritability is defined by the sum over the mixtures.
+
+                # Step (1): Given the provided tau_beta and associated multipliers,
+                # obtain a naive estimate of the heritability:
+                h2g_estimate = (self.n_snps*self.pi/self.tau_beta).sum()
+                # Step (2): Set sigma_epsilon to 1 - h2g_estimate:
+                self.sigma_epsilon = np.clip(1. - h2g_estimate,
+                                             a_min=self.float_resolution,
+                                             a_max=1. - self.float_resolution)
+
+            else:
+                # If neither sigma_beta nor sigma_epsilon are given,
+                # then initialize using the SNP heritability estimate based on summary statistics
+
+                try:
+                    naive_h2g = np.clip(simple_ldsc(self.gdl), 1e-3, 1. - 1e-3)
+                except Exception as e:
+                    naive_h2g = np.random.uniform(low=.001, high=.999)
+
+                self.sigma_epsilon = 1. - naive_h2g
+
+                global_tau = (self.n_snps * np.dot(1./self.d, self.pi) / naive_h2g)
+
+                self.tau_beta = self.d*global_tau
+        else:
+
+            # If sigma_epsilon is given, use it in the initialization
+
+            self.sigma_epsilon = theta_0['sigma_epsilon']
+
+            # Initialize tau_betas
+            if 'tau_betas' in theta_0:
+                self.tau_beta = theta_0['tau_betas']
+            elif 'tau_beta' in theta_0:
+                self.tau_beta = np.repeat(theta_0['tau_beta'], self.K)
+            else:
+                # If not provided, initialize using sigma_epsilon value
+                global_tau = (self.n_snps * np.dot(1./self.d, self.pi) / (1. - self.sigma_epsilon))
+
+                self.tau_beta = self.d * global_tau
+
+        # Cast all the hyperparameters to conform to the precision set by the user:
+        self.sigma_epsilon = np.dtype(self.float_precision).type(self.sigma_epsilon)
+        self.tau_beta = np.dtype(self.float_precision).type(self.tau_beta)
+        self.pi = np.dtype(self.float_precision).type(self.pi)
+
+    def e_step(self):
+        """
+        Run the E-Step of the Variational EM algorithm.
+        Here, we update the variational parameters for each variant using coordinate
+        ascent optimization techniques. The update equations are outlined in
+        the Supplementary Material of the following paper:
+
+        > Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference.
+        Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009.
+        Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.
+        """
+
+        for c, shapes in self.shapes.items():
+
+            # Get the priors:
+            tau_beta = self.get_tau_beta(c)
+            pi = self.get_pi(c)
+
+            # Updates for tau variational parameters:
+            self.var_tau[c] = (self.Nj[c] / self.sigma_epsilon) + tau_beta
+
+            if isinstance(self.pi, dict):
+                log_null_pi = (np.log(1. - self.pi[c].sum(axis=1)))
+            else:
+                log_null_pi = np.ones_like(self.eta[c])*np.log(1. - self.pi.sum())
+
+            # Compute some quantities that are needed for the per-SNP updates:
+            mu_mult = self.Nj[c] / (self.var_tau[c] * self.sigma_epsilon)
+            u_logs = np.log(pi) - np.log(1. - pi) + .5 * (np.log(tau_beta) - np.log(self.var_tau[c]))
+
+            if self.use_cpp:
+                cpp_e_step_mixture(self.ld_left_bound[c],
+                                   self.ld_indptr[c],
+                                   self.ld_data[c],
+                                   self.std_beta[c],
+                                   self.var_gamma[c],
+                                   self.var_mu[c],
+                                   self.eta[c],
+                                   self.q[c],
+                                   self.eta_diff[c],
+                                   log_null_pi,
+                                   u_logs,
+                                   0.5*self.var_tau[c],
+                                   mu_mult,
+                                   self.dequantize_scale,
+                                   self.threads,
+                                   self.use_blas,
+                                   self.low_memory)
+            else:
+                e_step_mixture(self.ld_left_bound[c],
+                               self.ld_indptr[c],
+                               self.ld_data[c],
+                               self.std_beta[c],
+                               self.var_gamma[c],
+                               self.var_mu[c],
+                               self.eta[c],
+                               self.q[c],
+                               self.eta_diff[c],
+                               log_null_pi,
+                               u_logs,
+                               0.5*self.var_tau[c],
+                               mu_mult,
+                               self.threads,
+                               self.use_blas,
+                               self.low_memory)
+
+        self.zeta = self.compute_zeta()
+
+    def update_pi(self):
+        """
+        Update the prior mixing proportions `pi`
+        """
+
+        if 'pis' not in self.fix_params:
+
+            pi_estimate = dict_sum(self.var_gamma, axis=0)
+
+            if 'pi' in self.fix_params:
+                # If the user provides an estimate for the total proportion of causal variants,
+                # update the pis such that the proportion of SNPs in the null component becomes 1. - pi.
+                pi_estimate = self.fix_params['pi']*pi_estimate / pi_estimate.sum()
+            else:
+                pi_estimate /= self.n_snps
+
+            # Set pi to the new estimate:
+            self.pi = pi_estimate
+
+    def update_tau_beta(self):
+        """
+        Update the prior precision (inverse variance) for the effect sizes, `tau_beta`
+        """
+
+        if 'tau_betas' not in self.fix_params:
+
+            # If a list of multipliers is provided,
+            # estimate the global sigma_beta and then multiply it
+            # by the per-component multiplier to get the final sigma_betas.
+
+            zetas = sum(self.compute_zeta(sum_axis=0).values())
+
+            tau_beta_estimate = np.sum(self.pi)*self.m / np.dot(self.d, zetas)
+            tau_beta_estimate = self.d*tau_beta_estimate
+
+            self.tau_beta = np.clip(tau_beta_estimate, a_min=1., a_max=None)
+
+    def get_null_pi(self, chrom=None):
+        """
+        Get the proportion of SNPs in the null component
+        :param chrom: If provided, get the mixing proportion for the null component on a given chromosome.
+        :return: The value of the mixing proportion for the null component
+        """
+
+        pi = self.get_pi(chrom=chrom)
+
+        if isinstance(pi, dict):
+            return {c: 1. - c_pi.sum(axis=1) for c, c_pi in pi.items()}
+        else:
+            return 1. - np.sum(pi)
+
+    def get_proportion_causal(self):
+        """
+        :return: The proportion of variants in the non-null components.
+        """
+        if isinstance(self.pi, dict):
+            dict_mean({c: pis.sum(axis=1) for c, pis in self.pi.items()})
+        else:
+            return np.sum(self.pi)
+
+    def get_average_effect_size_variance(self):
+        """
+        :return: The average per-SNP variance for the prior mixture components
+        """
+
+        avg_sigma = super().get_average_effect_size_variance()
+
+        try:
+            return avg_sigma.sum()
+        except Exception:
+            return avg_sigma
+
+    def compute_pip(self):
+        """
+        :return: The posterior inclusion probability
+        """
+        return {c: gamma.sum(axis=1) for c, gamma in self.var_gamma.items()}
+
+    def compute_eta(self):
+        """
+        :return: The mean for the effect size under the variational posterior.
+        """
+        return {c: (v * self.var_mu[c]).sum(axis=1) for c, v in self.var_gamma.items()}
+
+    def compute_zeta(self, sum_axis=1):
+        """
+        :return: The expectation of the squared effect size under the variational posterior.
+        """
+        return {c: (v * (self.var_mu[c] ** 2 + (1./self.var_tau[c]))).sum(axis=sum_axis)
+                for c, v in self.var_gamma.items()}
+
+    def to_theta_table(self):
+        """
+        :return: A `pandas` DataFrame containing information about the estimated hyperparameters of the model.
+        """
+
+        table = super().to_theta_table()
+
+        extra_theta = []
+
+        if isinstance(self.pi, dict):
+            pis = list(dict_mean(self.pi, axis=0))
+        else:
+            pis = self.pi
+
+        for i in range(self.K):
+            extra_theta.append({'Parameter': f'pi_{i + 1}', 'Value': pis[i]})
+
+        return pd.concat([table, pd.DataFrame(extra_theta)])
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(gdl, K=1, prior_multipliers=None, **kwargs) + +

+ + +
+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

An instance of GWADataLoader

+
+
+ required +
K + +
+

The number of causal (i.e. non-null) components in the mixture prior (minimum 1). When K=1, this effectively reduces VIPRSMix to the VIPRS model.

+
+
+ 1 +
prior_multipliers + +
+

Multiplier for the prior on the effect size (vector of size K).

+
+
+ None +
kwargs + +
+

Additional keyword arguments to pass to the VIPRS model.

+
+
+ {} +
+ +
+ Source code in viprs/model/VIPRSMix.py +
31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
def __init__(self,
+             gdl,
+             K=1,
+             prior_multipliers=None,
+             **kwargs):
+
+    """
+    :param gdl: An instance of `GWADataLoader`
+    :param K: The number of causal (i.e. non-null) components in the mixture prior (minimum 1). When `K=1`, this
+        effectively reduces `VIPRSMix` to the `VIPRS` model.
+    :param prior_multipliers: Multiplier for the prior on the effect size (vector of size K).
+    :param kwargs: Additional keyword arguments to pass to the VIPRS model.
+    """
+
+    # Make sure that the matrices follow the C-contiguous order:
+    kwargs['order'] = 'C'
+
+    super().__init__(gdl, **kwargs)
+
+    # Sanity checks:
+    assert K > 0  # Check that there is at least 1 causal component
+    self.K = K
+
+    if prior_multipliers is not None:
+        assert len(prior_multipliers) == K
+        self.d = np.array(prior_multipliers).astype(self.float_precision)
+    else:
+        self.d = 2**np.linspace(-min(K - 1, 7), 0, K).astype(self.float_precision)
+
+    # Populate/update relevant fields:
+    self.shapes = {c: (shp, self.K) for c, shp in self.shapes.items()}
+    self.Nj = {c: Nj[:, None].astype(self.float_precision, order=self.order) for c, Nj in self.Nj.items()}
+
+
+
+ +
+ +
+ + +

+ compute_eta() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The mean for the effect size under the variational posterior.

+
+
+ +
+ Source code in viprs/model/VIPRSMix.py +
def compute_eta(self):
+    """
+    :return: The mean for the effect size under the variational posterior.
+    """
+    return {c: (v * self.var_mu[c]).sum(axis=1) for c, v in self.var_gamma.items()}
+
+
+
+ +
+ +
+ + +

+ compute_pip() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The posterior inclusion probability

+
+
+ +
+ Source code in viprs/model/VIPRSMix.py +
def compute_pip(self):
+    """
+    :return: The posterior inclusion probability
+    """
+    return {c: gamma.sum(axis=1) for c, gamma in self.var_gamma.items()}
+
+
+
+ +
+ +
+ + +

+ compute_zeta(sum_axis=1) + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The expectation of the squared effect size under the variational posterior.

+
+
+ +
+ Source code in viprs/model/VIPRSMix.py +
def compute_zeta(self, sum_axis=1):
+    """
+    :return: The expectation of the squared effect size under the variational posterior.
+    """
+    return {c: (v * (self.var_mu[c] ** 2 + (1./self.var_tau[c]))).sum(axis=sum_axis)
+            for c, v in self.var_gamma.items()}
+
+
+
+ +
+ +
+ + +

+ e_step() + +

+ + +
+ +

Run the E-Step of the Variational EM algorithm. +Here, we update the variational parameters for each variant using coordinate +ascent optimization techniques. The update equations are outlined in +the Supplementary Material of the following paper:

+
+

Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference. +Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009. +Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.

+
+ +
+ Source code in viprs/model/VIPRSMix.py +
def e_step(self):
+    """
+    Run the E-Step of the Variational EM algorithm.
+    Here, we update the variational parameters for each variant using coordinate
+    ascent optimization techniques. The update equations are outlined in
+    the Supplementary Material of the following paper:
+
+    > Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference.
+    Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009.
+    Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.
+    """
+
+    for c, shapes in self.shapes.items():
+
+        # Get the priors:
+        tau_beta = self.get_tau_beta(c)
+        pi = self.get_pi(c)
+
+        # Updates for tau variational parameters:
+        self.var_tau[c] = (self.Nj[c] / self.sigma_epsilon) + tau_beta
+
+        if isinstance(self.pi, dict):
+            log_null_pi = (np.log(1. - self.pi[c].sum(axis=1)))
+        else:
+            log_null_pi = np.ones_like(self.eta[c])*np.log(1. - self.pi.sum())
+
+        # Compute some quantities that are needed for the per-SNP updates:
+        mu_mult = self.Nj[c] / (self.var_tau[c] * self.sigma_epsilon)
+        u_logs = np.log(pi) - np.log(1. - pi) + .5 * (np.log(tau_beta) - np.log(self.var_tau[c]))
+
+        if self.use_cpp:
+            cpp_e_step_mixture(self.ld_left_bound[c],
+                               self.ld_indptr[c],
+                               self.ld_data[c],
+                               self.std_beta[c],
+                               self.var_gamma[c],
+                               self.var_mu[c],
+                               self.eta[c],
+                               self.q[c],
+                               self.eta_diff[c],
+                               log_null_pi,
+                               u_logs,
+                               0.5*self.var_tau[c],
+                               mu_mult,
+                               self.dequantize_scale,
+                               self.threads,
+                               self.use_blas,
+                               self.low_memory)
+        else:
+            e_step_mixture(self.ld_left_bound[c],
+                           self.ld_indptr[c],
+                           self.ld_data[c],
+                           self.std_beta[c],
+                           self.var_gamma[c],
+                           self.var_mu[c],
+                           self.eta[c],
+                           self.q[c],
+                           self.eta_diff[c],
+                           log_null_pi,
+                           u_logs,
+                           0.5*self.var_tau[c],
+                           mu_mult,
+                           self.threads,
+                           self.use_blas,
+                           self.low_memory)
+
+    self.zeta = self.compute_zeta()
+
+
+
+ +
+ +
+ + +

+ get_average_effect_size_variance() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The average per-SNP variance for the prior mixture components

+
+
+ +
+ Source code in viprs/model/VIPRSMix.py +
def get_average_effect_size_variance(self):
+    """
+    :return: The average per-SNP variance for the prior mixture components
+    """
+
+    avg_sigma = super().get_average_effect_size_variance()
+
+    try:
+        return avg_sigma.sum()
+    except Exception:
+        return avg_sigma
+
+
+
+ +
+ +
+ + +

+ get_null_pi(chrom=None) + +

+ + +
+ +

Get the proportion of SNPs in the null component

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
chrom + +
+

If provided, get the mixing proportion for the null component on a given chromosome.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The value of the mixing proportion for the null component

+
+
+ +
+ Source code in viprs/model/VIPRSMix.py +
def get_null_pi(self, chrom=None):
+    """
+    Get the proportion of SNPs in the null component
+    :param chrom: If provided, get the mixing proportion for the null component on a given chromosome.
+    :return: The value of the mixing proportion for the null component
+    """
+
+    pi = self.get_pi(chrom=chrom)
+
+    if isinstance(pi, dict):
+        return {c: 1. - c_pi.sum(axis=1) for c, c_pi in pi.items()}
+    else:
+        return 1. - np.sum(pi)
+
+
+
+ +
+ +
+ + +

+ get_proportion_causal() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The proportion of variants in the non-null components.

+
+
+ +
+ Source code in viprs/model/VIPRSMix.py +
def get_proportion_causal(self):
+    """
+    :return: The proportion of variants in the non-null components.
+    """
+    if isinstance(self.pi, dict):
+        dict_mean({c: pis.sum(axis=1) for c, pis in self.pi.items()})
+    else:
+        return np.sum(self.pi)
+
+
+
+ +
+ +
+ + +

+ initialize_theta(theta_0=None) + +

+ + +
+ +

Initialize the global hyperparameters of the model

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
theta_0 + +
+

A dictionary of initial values for the hyperparameters theta

+
+
+ None +
+ +
+ Source code in viprs/model/VIPRSMix.py +
def initialize_theta(self, theta_0=None):
+    """
+    Initialize the global hyperparameters of the model
+    :param theta_0: A dictionary of initial values for the hyperparameters theta
+    """
+
+    if theta_0 is not None and self.fix_params is not None:
+        theta_0.update(self.fix_params)
+    elif self.fix_params is not None:
+        theta_0 = self.fix_params
+    elif theta_0 is None:
+        theta_0 = {}
+
+    # ----------------------------------------------
+    # (1) Initialize pi from a uniform
+    if 'pis' in theta_0:
+        self.pi = theta_0['pis']
+    else:
+        if 'pi' in theta_0:
+            overall_pi = theta_0['pi']
+        else:
+            overall_pi = np.random.uniform(low=max(0.005, 1. / self.n_snps), high=.1)
+
+        self.pi = overall_pi*np.random.dirichlet(np.ones(self.K))
+
+    # ----------------------------------------------
+    # (2) Initialize sigma_epsilon and sigma_beta
+    # Assuming that the genotype and phenotype are normalized,
+    # these two quantities are conceptually linked.
+    # The initialization routine here assumes that:
+    # Var(y) = h2 + sigma_epsilon
+    # Where, by assumption, Var(y) = 1,
+    # And h2 ~= pi*M*sigma_beta
+
+    if 'sigma_epsilon' not in theta_0:
+
+        if 'tau_betas' in theta_0:
+
+            # If tau_betas are given, use them to initialize sigma_epsilon
+
+            self.tau_beta = theta_0['tau_betas']
+
+            self.sigma_epsilon = np.clip(1. - np.dot(1./self.tau_beta, self.pi),
+                                         a_min=self.float_resolution,
+                                         a_max=1. - self.float_resolution)
+
+        elif 'tau_beta' in theta_0:
+            # NOTE: Here, we assume the provided `tau_beta` is a scalar.
+            # This is different from `tau_betas`
+
+            assert self.d is not None
+
+            self.tau_beta = theta_0['tau_beta'] * self.d
+            # Use the provided tau_beta to initialize sigma_epsilon.
+            # First, we derive a naive estimate of the heritability, based on the following equation:
+            # h2g/M = \sum_k pi_k \tau_k
+            # Where the per-SNP heritability is defined by the sum over the mixtures.
+
+            # Step (1): Given the provided tau_beta and associated multipliers,
+            # obtain a naive estimate of the heritability:
+            h2g_estimate = (self.n_snps*self.pi/self.tau_beta).sum()
+            # Step (2): Set sigma_epsilon to 1 - h2g_estimate:
+            self.sigma_epsilon = np.clip(1. - h2g_estimate,
+                                         a_min=self.float_resolution,
+                                         a_max=1. - self.float_resolution)
+
+        else:
+            # If neither sigma_beta nor sigma_epsilon are given,
+            # then initialize using the SNP heritability estimate based on summary statistics
+
+            try:
+                naive_h2g = np.clip(simple_ldsc(self.gdl), 1e-3, 1. - 1e-3)
+            except Exception as e:
+                naive_h2g = np.random.uniform(low=.001, high=.999)
+
+            self.sigma_epsilon = 1. - naive_h2g
+
+            global_tau = (self.n_snps * np.dot(1./self.d, self.pi) / naive_h2g)
+
+            self.tau_beta = self.d*global_tau
+    else:
+
+        # If sigma_epsilon is given, use it in the initialization
+
+        self.sigma_epsilon = theta_0['sigma_epsilon']
+
+        # Initialize tau_betas
+        if 'tau_betas' in theta_0:
+            self.tau_beta = theta_0['tau_betas']
+        elif 'tau_beta' in theta_0:
+            self.tau_beta = np.repeat(theta_0['tau_beta'], self.K)
+        else:
+            # If not provided, initialize using sigma_epsilon value
+            global_tau = (self.n_snps * np.dot(1./self.d, self.pi) / (1. - self.sigma_epsilon))
+
+            self.tau_beta = self.d * global_tau
+
+    # Cast all the hyperparameters to conform to the precision set by the user:
+    self.sigma_epsilon = np.dtype(self.float_precision).type(self.sigma_epsilon)
+    self.tau_beta = np.dtype(self.float_precision).type(self.tau_beta)
+    self.pi = np.dtype(self.float_precision).type(self.pi)
+
+
+
+ +
+ +
+ + +

+ to_theta_table() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

A pandas DataFrame containing information about the estimated hyperparameters of the model.

+
+
+ +
+ Source code in viprs/model/VIPRSMix.py +
def to_theta_table(self):
+    """
+    :return: A `pandas` DataFrame containing information about the estimated hyperparameters of the model.
+    """
+
+    table = super().to_theta_table()
+
+    extra_theta = []
+
+    if isinstance(self.pi, dict):
+        pis = list(dict_mean(self.pi, axis=0))
+    else:
+        pis = self.pi
+
+    for i in range(self.K):
+        extra_theta.append({'Parameter': f'pi_{i + 1}', 'Value': pis[i]})
+
+    return pd.concat([table, pd.DataFrame(extra_theta)])
+
+
+
+ +
+ +
+ + +

+ update_pi() + +

+ + +
+ +

Update the prior mixing proportions pi

+ +
+ Source code in viprs/model/VIPRSMix.py +
def update_pi(self):
+    """
+    Update the prior mixing proportions `pi`
+    """
+
+    if 'pis' not in self.fix_params:
+
+        pi_estimate = dict_sum(self.var_gamma, axis=0)
+
+        if 'pi' in self.fix_params:
+            # If the user provides an estimate for the total proportion of causal variants,
+            # update the pis such that the proportion of SNPs in the null component becomes 1. - pi.
+            pi_estimate = self.fix_params['pi']*pi_estimate / pi_estimate.sum()
+        else:
+            pi_estimate /= self.n_snps
+
+        # Set pi to the new estimate:
+        self.pi = pi_estimate
+
+
+
+ +
+ +
+ + +

+ update_tau_beta() + +

+ + +
+ +

Update the prior precision (inverse variance) for the effect sizes, tau_beta

+ +
+ Source code in viprs/model/VIPRSMix.py +
def update_tau_beta(self):
+    """
+    Update the prior precision (inverse variance) for the effect sizes, `tau_beta`
+    """
+
+    if 'tau_betas' not in self.fix_params:
+
+        # If a list of multipliers is provided,
+        # estimate the global sigma_beta and then multiply it
+        # by the per-component multiplier to get the final sigma_betas.
+
+        zetas = sum(self.compute_zeta(sum_axis=0).values())
+
+        tau_beta_estimate = np.sum(self.pi)*self.m / np.dot(self.d, zetas)
+        tau_beta_estimate = self.d*tau_beta_estimate
+
+        self.tau_beta = np.clip(tau_beta_estimate, a_min=1., a_max=None)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/model/gridsearch/HyperparameterGrid/index.html b/api/model/gridsearch/HyperparameterGrid/index.html new file mode 100644 index 0000000..5b56885 --- /dev/null +++ b/api/model/gridsearch/HyperparameterGrid/index.html @@ -0,0 +1,2048 @@ + + + + + + + + + + + + + + + + + + + HyperparameterGrid - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

HyperparameterGrid

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ HyperparameterGrid + + +

+ + +
+

+ Bases: object

+ + +

A utility class to facilitate generating grids for the +hyperparameters of the standard VIPRS models. It is designed to +interface with models that operate on grids of hyperparameters, +such as VIPRSGridSeach and VIPRSBMA. The hyperparameters for +the standard VIPRS model are:

+
    +
  • sigma_epsilon: The residual variance for the phenotype.
  • +
  • tau_beta: The precision (inverse variance) of the prior for the effect sizes.
  • +
  • pi: The proportion of non-zero effect sizes (polygenicity).
  • +
+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
sigma_epsilon + +
+

A grid of values for the residual variance hyperparameter.

+
+
tau_beta + +
+

A grid of values for the precision of the prior for the effect sizes.

+
+
pi + +
+

A grid of values for the proportion of non-zero effect sizes.

+
+
h2_est + +
+

An estimate of the heritability for the trait under consideration.

+
+
h2_se + +
+

The standard error of the heritability estimate.

+
+
n_snps + +
+

The number of common variants that may be relevant for this analysis.

+
+
+ +
+ Source code in viprs/model/gridsearch/HyperparameterGrid.py +
  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
class HyperparameterGrid(object):
+    """
+    A utility class to facilitate generating grids for the
+    hyperparameters of the standard `VIPRS` models. It is designed to
+    interface with models that operate on grids of hyperparameters,
+    such as `VIPRSGridSeach` and `VIPRSBMA`. The hyperparameters for
+    the standard VIPRS model are:
+
+    * `sigma_epsilon`: The residual variance for the phenotype.
+    * `tau_beta`: The precision (inverse variance) of the prior for the effect sizes.
+    * `pi`: The proportion of non-zero effect sizes (polygenicity).
+
+    :ivar sigma_epsilon: A grid of values for the residual variance hyperparameter.
+    :ivar tau_beta: A grid of values for the precision of the prior for the effect sizes.
+    :ivar pi: A grid of values for the proportion of non-zero effect sizes.
+    :ivar h2_est: An estimate of the heritability for the trait under consideration.
+    :ivar h2_se: The standard error of the heritability estimate.
+    :ivar n_snps: The number of common variants that may be relevant for this analysis.
+
+    """
+
+    def __init__(self,
+                 sigma_epsilon_grid=None,
+                 sigma_epsilon_steps=None,
+                 tau_beta_grid=None,
+                 tau_beta_steps=None,
+                 pi_grid=None,
+                 pi_steps=None,
+                 h2_est=None,
+                 h2_se=None,
+                 n_snps=1e6):
+        """
+
+        Create a hyperparameter grid for the standard VIPRS model with the
+        spike-and-slab prior. The hyperparameters for this model are:
+
+        * `sigma_epsilon`: The residual variance
+        * `tau_beta`: The precision (inverse variance) of the prior for the effect sizes
+        * `pi`: The proportion of non-zero effect sizes
+
+        For each of these hyperparameters, we can provide a grid of values to search over.
+        If the heritability estimate and standard error (from e.g. LDSC) are provided,
+        we can generate grids for sigma_epsilon and tau_beta that are informed by these estimates.
+
+        For each hyperparameter to be included in the grid, user must specify either the grid
+        itself, or the number of steps to use to generate the grid.
+
+        :param sigma_epsilon_grid: An array containing a grid of values for the sigma_epsilon hyperparameter.
+        :param sigma_epsilon_steps: The number of steps for the sigma_epsilon grid
+        :param tau_beta_grid: An array containing a grid of values for the tau_beta hyperparameter.
+        :param tau_beta_steps: The number of steps for the tau_beta grid
+        :param pi_grid: An array containing a grid of values for the pi hyperparameter
+        :param pi_steps: The number of steps for the pi grid
+        :param h2_est: An estimate of the heritability for the trait under consideration. If provided,
+        we can generate grids for some of the hyperparameters that are consistent with this estimate.
+        :param h2_se: The standard error of the heritability estimate. If provided, we can generate grids
+        for some of the hyperparameters that are consistent with this estimate.
+        :param n_snps: Number of common variants that may be relevant for this analysis. This estimate can
+        be used to generate grids that are based on this number.
+        """
+
+        # If the heritability estimate is not provided, use a reasonable default value of 0.1
+        # with a wide standard error of 0.1.
+        if h2_est is None:
+            self.h2_est = 0.1
+            self.h2_se = 0.1
+        else:
+            self.h2_est = h2_est
+            self.h2_se = h2_se
+
+        self.n_snps = n_snps
+        self._search_params = []
+
+        # Initialize the grid for sigma_epsilon:
+        self.sigma_epsilon = sigma_epsilon_grid
+        if self.sigma_epsilon is not None:
+            self._search_params.append('sigma_epsilon')
+        elif sigma_epsilon_steps is not None:
+            self.generate_sigma_epsilon_grid(steps=sigma_epsilon_steps)
+
+        # Initialize the grid for the tau_beta:
+        self.tau_beta = tau_beta_grid
+        if self.tau_beta is not None:
+            self._search_params.append('tau_beta')
+        elif tau_beta_steps is not None:
+            self.generate_tau_beta_grid(steps=tau_beta_steps)
+
+        # Initialize the grid for pi:
+        self.pi = pi_grid
+        if self.pi is not None:
+            self._search_params.append('pi')
+        elif pi_steps is not None:
+            self.generate_pi_grid(steps=pi_steps)
+
+    def _generate_h2_grid(self, steps=5):
+        """
+        Use the heritability estimate and standard error to generate a grid of values for
+        the heritability parameter. Specifically, given the estimate and standard error, we
+        generate heritability estimates from the percentiles of the normal distribution,
+        with mean `h2_est` and standard deviation `h2_se`. The grid values range from the 10th
+        percentile to the 90th percentile of this normal distribution.
+
+        :param steps: The number of steps for the heritability grid.
+        :return: A grid of values for the heritability parameter.
+
+        """
+
+        assert steps > 0
+        assert self.h2_est is not None
+
+        # If the heritability standard error is not provided, we use half of the heritability estimate
+        # by default.
+        # *Justification*: Under the assumption that heritability for the trait being analyzed
+        # is significantly greater than 0, the standard error should be, at a maximum,
+        # half of the heritability estimate itself to get us a Z-score with absolute value
+        # greater than 2.
+        if self.h2_se is None:
+            h2_se = self.h2_est * 0.5
+        else:
+            h2_se = self.h2_se
+
+        # Sanity checking steps:
+        assert 0. < self.h2_est < 1.
+        assert h2_se > 0
+
+        from scipy.stats import norm
+
+        # First, determine the percentile boundaries to avoid producing
+        # invalid values for the heritability grid:
+
+        percentile_start = max(0.1, norm.cdf(1e-5, loc=self.h2_est, scale=h2_se))
+        percentile_stop = min(0.9, norm.cdf(1. - 1e-5, loc=self.h2_est, scale=h2_se))
+
+        # Generate the heritability grid:
+        return norm.ppf(np.linspace(percentile_start, percentile_stop, steps),
+                        loc=self.h2_est, scale=h2_se)
+
+    def generate_sigma_epsilon_grid(self, steps=5):
+        """
+        Generate a grid of values for the `sigma_epsilon` (residual variance) hyperparameter.
+
+        :param steps: The number of steps for the sigma_epsilon grid.
+        """
+
+        assert steps > 0
+
+        h2_grid = self._generate_h2_grid(steps)
+        self.sigma_epsilon = 1. - h2_grid
+
+        if 'sigma_epsilon' not in self._search_params:
+            self._search_params.append('sigma_epsilon')
+
+    def generate_tau_beta_grid(self, steps=5):
+        """
+        Generate a grid of values for the `tau_beta`
+        (precision of the prior for the effect sizes) hyperparameter.
+        :param steps: The number of steps for the `tau_beta` grid
+        """
+
+        assert steps > 0
+
+        h2_grid = self._generate_h2_grid(steps)
+        # Assume ~1% of SNPs are causal:
+        self.tau_beta = 0.01*self.n_snps / h2_grid
+
+        if 'tau_beta' not in self._search_params:
+            self._search_params.append('tau_beta')
+
+    def generate_pi_grid(self, steps=5):
+        """
+        Generate a grid of values for the `pi` (proportion of non-zero effect sizes) hyperparameter.
+        :param steps: The number of steps for the `pi` grid
+        """
+
+        assert steps > 0
+
+        self.pi = np.unique(np.clip(10. ** (-np.linspace(np.floor(np.log10(self.n_snps)), 0., steps)),
+                                    a_min=1. / self.n_snps,
+                                    a_max=1. - 1. / self.n_snps))
+
+        if 'pi' not in self._search_params:
+            self._search_params.append('pi')
+
+    def combine_grids(self):
+        """
+        Weave together the different hyperparameter grids and return a list of
+        dictionaries where the key is the hyperparameter name and the value is
+        value for that hyperparameter.
+
+        :return: A list of dictionaries containing the hyperparameter values.
+        :raises ValueError: If all the grids are empty.
+
+        """
+        hyp_names = [name for name, value in self.__dict__.items()
+                     if value is not None and name in self._search_params]
+
+        if len(hyp_names) > 0:
+            hyp_values = itertools.product(*[hyp_grid for hyp_name, hyp_grid in self.__dict__.items()
+                                             if hyp_grid is not None and hyp_name in hyp_names])
+
+            return [dict(zip(hyp_names, hyp_v)) for hyp_v in hyp_values]
+        else:
+            raise ValueError("All the grids are empty!")
+
+    def to_table(self):
+        """
+        :return: The hyperparameter grid as a pandas `DataFrame`.
+        """
+
+        combined_grids = self.combine_grids()
+        if combined_grids:
+            return pd.DataFrame(combined_grids)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(sigma_epsilon_grid=None, sigma_epsilon_steps=None, tau_beta_grid=None, tau_beta_steps=None, pi_grid=None, pi_steps=None, h2_est=None, h2_se=None, n_snps=1000000.0) + +

+ + +
+ +

Create a hyperparameter grid for the standard VIPRS model with the +spike-and-slab prior. The hyperparameters for this model are:

+
    +
  • sigma_epsilon: The residual variance
  • +
  • tau_beta: The precision (inverse variance) of the prior for the effect sizes
  • +
  • pi: The proportion of non-zero effect sizes
  • +
+

For each of these hyperparameters, we can provide a grid of values to search over. +If the heritability estimate and standard error (from e.g. LDSC) are provided, +we can generate grids for sigma_epsilon and tau_beta that are informed by these estimates.

+

For each hyperparameter to be included in the grid, user must specify either the grid +itself, or the number of steps to use to generate the grid.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sigma_epsilon_grid + +
+

An array containing a grid of values for the sigma_epsilon hyperparameter.

+
+
+ None +
sigma_epsilon_steps + +
+

The number of steps for the sigma_epsilon grid

+
+
+ None +
tau_beta_grid + +
+

An array containing a grid of values for the tau_beta hyperparameter.

+
+
+ None +
tau_beta_steps + +
+

The number of steps for the tau_beta grid

+
+
+ None +
pi_grid + +
+

An array containing a grid of values for the pi hyperparameter

+
+
+ None +
pi_steps + +
+

The number of steps for the pi grid

+
+
+ None +
h2_est + +
+

An estimate of the heritability for the trait under consideration. If provided, we can generate grids for some of the hyperparameters that are consistent with this estimate.

+
+
+ None +
h2_se + +
+

The standard error of the heritability estimate. If provided, we can generate grids for some of the hyperparameters that are consistent with this estimate.

+
+
+ None +
n_snps + +
+

Number of common variants that may be relevant for this analysis. This estimate can be used to generate grids that are based on this number.

+
+
+ 1000000.0 +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterGrid.py +
27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
def __init__(self,
+             sigma_epsilon_grid=None,
+             sigma_epsilon_steps=None,
+             tau_beta_grid=None,
+             tau_beta_steps=None,
+             pi_grid=None,
+             pi_steps=None,
+             h2_est=None,
+             h2_se=None,
+             n_snps=1e6):
+    """
+
+    Create a hyperparameter grid for the standard VIPRS model with the
+    spike-and-slab prior. The hyperparameters for this model are:
+
+    * `sigma_epsilon`: The residual variance
+    * `tau_beta`: The precision (inverse variance) of the prior for the effect sizes
+    * `pi`: The proportion of non-zero effect sizes
+
+    For each of these hyperparameters, we can provide a grid of values to search over.
+    If the heritability estimate and standard error (from e.g. LDSC) are provided,
+    we can generate grids for sigma_epsilon and tau_beta that are informed by these estimates.
+
+    For each hyperparameter to be included in the grid, user must specify either the grid
+    itself, or the number of steps to use to generate the grid.
+
+    :param sigma_epsilon_grid: An array containing a grid of values for the sigma_epsilon hyperparameter.
+    :param sigma_epsilon_steps: The number of steps for the sigma_epsilon grid
+    :param tau_beta_grid: An array containing a grid of values for the tau_beta hyperparameter.
+    :param tau_beta_steps: The number of steps for the tau_beta grid
+    :param pi_grid: An array containing a grid of values for the pi hyperparameter
+    :param pi_steps: The number of steps for the pi grid
+    :param h2_est: An estimate of the heritability for the trait under consideration. If provided,
+    we can generate grids for some of the hyperparameters that are consistent with this estimate.
+    :param h2_se: The standard error of the heritability estimate. If provided, we can generate grids
+    for some of the hyperparameters that are consistent with this estimate.
+    :param n_snps: Number of common variants that may be relevant for this analysis. This estimate can
+    be used to generate grids that are based on this number.
+    """
+
+    # If the heritability estimate is not provided, use a reasonable default value of 0.1
+    # with a wide standard error of 0.1.
+    if h2_est is None:
+        self.h2_est = 0.1
+        self.h2_se = 0.1
+    else:
+        self.h2_est = h2_est
+        self.h2_se = h2_se
+
+    self.n_snps = n_snps
+    self._search_params = []
+
+    # Initialize the grid for sigma_epsilon:
+    self.sigma_epsilon = sigma_epsilon_grid
+    if self.sigma_epsilon is not None:
+        self._search_params.append('sigma_epsilon')
+    elif sigma_epsilon_steps is not None:
+        self.generate_sigma_epsilon_grid(steps=sigma_epsilon_steps)
+
+    # Initialize the grid for the tau_beta:
+    self.tau_beta = tau_beta_grid
+    if self.tau_beta is not None:
+        self._search_params.append('tau_beta')
+    elif tau_beta_steps is not None:
+        self.generate_tau_beta_grid(steps=tau_beta_steps)
+
+    # Initialize the grid for pi:
+    self.pi = pi_grid
+    if self.pi is not None:
+        self._search_params.append('pi')
+    elif pi_steps is not None:
+        self.generate_pi_grid(steps=pi_steps)
+
+
+
+ +
+ +
+ + +

+ combine_grids() + +

+ + +
+ +

Weave together the different hyperparameter grids and return a list of +dictionaries where the key is the hyperparameter name and the value is +value for that hyperparameter.

+ + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

A list of dictionaries containing the hyperparameter values.

+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

If all the grids are empty.

+
+
+ +
+ Source code in viprs/model/gridsearch/HyperparameterGrid.py +
def combine_grids(self):
+    """
+    Weave together the different hyperparameter grids and return a list of
+    dictionaries where the key is the hyperparameter name and the value is
+    value for that hyperparameter.
+
+    :return: A list of dictionaries containing the hyperparameter values.
+    :raises ValueError: If all the grids are empty.
+
+    """
+    hyp_names = [name for name, value in self.__dict__.items()
+                 if value is not None and name in self._search_params]
+
+    if len(hyp_names) > 0:
+        hyp_values = itertools.product(*[hyp_grid for hyp_name, hyp_grid in self.__dict__.items()
+                                         if hyp_grid is not None and hyp_name in hyp_names])
+
+        return [dict(zip(hyp_names, hyp_v)) for hyp_v in hyp_values]
+    else:
+        raise ValueError("All the grids are empty!")
+
+
+
+ +
+ +
+ + +

+ generate_pi_grid(steps=5) + +

+ + +
+ +

Generate a grid of values for the pi (proportion of non-zero effect sizes) hyperparameter.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
steps + +
+

The number of steps for the pi grid

+
+
+ 5 +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterGrid.py +
def generate_pi_grid(self, steps=5):
+    """
+    Generate a grid of values for the `pi` (proportion of non-zero effect sizes) hyperparameter.
+    :param steps: The number of steps for the `pi` grid
+    """
+
+    assert steps > 0
+
+    self.pi = np.unique(np.clip(10. ** (-np.linspace(np.floor(np.log10(self.n_snps)), 0., steps)),
+                                a_min=1. / self.n_snps,
+                                a_max=1. - 1. / self.n_snps))
+
+    if 'pi' not in self._search_params:
+        self._search_params.append('pi')
+
+
+
+ +
+ +
+ + +

+ generate_sigma_epsilon_grid(steps=5) + +

+ + +
+ +

Generate a grid of values for the sigma_epsilon (residual variance) hyperparameter.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
steps + +
+

The number of steps for the sigma_epsilon grid.

+
+
+ 5 +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterGrid.py +
def generate_sigma_epsilon_grid(self, steps=5):
+    """
+    Generate a grid of values for the `sigma_epsilon` (residual variance) hyperparameter.
+
+    :param steps: The number of steps for the sigma_epsilon grid.
+    """
+
+    assert steps > 0
+
+    h2_grid = self._generate_h2_grid(steps)
+    self.sigma_epsilon = 1. - h2_grid
+
+    if 'sigma_epsilon' not in self._search_params:
+        self._search_params.append('sigma_epsilon')
+
+
+
+ +
+ +
+ + +

+ generate_tau_beta_grid(steps=5) + +

+ + +
+ +

Generate a grid of values for the tau_beta +(precision of the prior for the effect sizes) hyperparameter.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
steps + +
+

The number of steps for the tau_beta grid

+
+
+ 5 +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterGrid.py +
def generate_tau_beta_grid(self, steps=5):
+    """
+    Generate a grid of values for the `tau_beta`
+    (precision of the prior for the effect sizes) hyperparameter.
+    :param steps: The number of steps for the `tau_beta` grid
+    """
+
+    assert steps > 0
+
+    h2_grid = self._generate_h2_grid(steps)
+    # Assume ~1% of SNPs are causal:
+    self.tau_beta = 0.01*self.n_snps / h2_grid
+
+    if 'tau_beta' not in self._search_params:
+        self._search_params.append('tau_beta')
+
+
+
+ +
+ +
+ + +

+ to_table() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The hyperparameter grid as a pandas DataFrame.

+
+
+ +
+ Source code in viprs/model/gridsearch/HyperparameterGrid.py +
def to_table(self):
+    """
+    :return: The hyperparameter grid as a pandas `DataFrame`.
+    """
+
+    combined_grids = self.combine_grids()
+    if combined_grids:
+        return pd.DataFrame(combined_grids)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/model/gridsearch/HyperparameterSearch/index.html b/api/model/gridsearch/HyperparameterSearch/index.html new file mode 100644 index 0000000..1507da1 --- /dev/null +++ b/api/model/gridsearch/HyperparameterSearch/index.html @@ -0,0 +1,3478 @@ + + + + + + + + + + + + + + + + + + + HyperparameterSearch - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

HyperparameterSearch

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ BMA + + +

+ + +
+

+ Bases: BayesPRSModel

+ + +

Bayesian Model Averaging fitting procedure

+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
class BMA(BayesPRSModel):
+    """
+    Bayesian Model Averaging fitting procedure
+    """
+
+    def __init__(self,
+                 gdl,
+                 grid,
+                 model=None,
+                 normalization='softmax',
+                 verbose=False,
+                 n_jobs=1):
+        """
+        Integrate out hyperparameters using Bayesian Model Averaging
+        :param gdl: A GWADataLoader object
+        :param grid: A HyperParameterGrid object
+        :param model: A `PRSModel`-derived object (e.g. VIPRS).
+        :param normalization: The normalization scheme for the final ELBOs. Options are (`softmax`, `sum`).
+        :param verbose: Detailed messages and print statements.
+        :param n_jobs: The number of processes to use for the BMA
+        """
+
+        super().__init__(gdl)
+
+        assert normalization in ('softmax', 'sum')
+
+        self.grid = grid
+        self.n_jobs = n_jobs
+        self.verbose = verbose
+
+        if model is None:
+            self.model = VIPRS(gdl)
+        else:
+            self.model = model
+
+        self.model.verbose = verbose
+        self.model.threads = 1
+
+        self.normalization = normalization
+
+        self.var_gamma = None
+        self.var_mu = None
+        self.var_sigma = None
+
+    def initialize(self):
+
+        self.var_gamma = {c: np.zeros(c_size) for c, c_size in self.shapes.items()}
+        self.var_mu = {c: np.zeros(c_size) for c, c_size in self.shapes.items()}
+        self.var_sigma = {c: np.zeros(c_size) for c, c_size in self.shapes.items()}
+
+    def fit(self, max_iter=100, f_abs_tol=1e-3, x_abs_tol=1e-8, **grid_kwargs):
+
+        self.initialize()
+
+        print("> Performing Bayesian Model Averaging with the following grid:")
+        print(self.grid.to_table())
+
+        opts = [(self.model, g, {'max_iter': max_iter,
+                                 'f_abs_tol': f_abs_tol,
+                                 'x_abs_tol': x_abs_tol})
+                for g in self.grid.combine_grids()]
+
+        elbos = []
+        var_gammas = []
+        var_mus = []
+        var_sigmas = []
+
+        ctx = multiprocessing.get_context("spawn")
+
+        with ctx.Pool(self.n_jobs, maxtasksperchild=1) as pool:
+            for fitted_model in tqdm(pool.imap_unordered(fit_model_fixed_params, opts), total=len(opts)):
+
+                if fitted_model is None:
+                    continue
+
+                elbos.append(fitted_model.elbo())
+                var_gammas.append(fitted_model.var_gamma)
+                var_mus.append(fitted_model.var_mu)
+                var_sigmas.append(fitted_model.var_sigma)
+
+        elbos = np.array(elbos)
+
+        if self.normalization == 'softmax':
+            from scipy.special import softmax
+            elbos = softmax(elbos)
+        elif self.normalization == 'sum':
+            # Correction for negative ELBOs:
+            elbos = elbos - elbos.min() + 1.
+            elbos /= elbos.sum()
+
+        for idx in range(len(elbos)):
+            for c in self.shapes:
+                self.var_gamma[c] += var_gammas[idx][c]*elbos[idx]
+                self.var_mu[c] += var_mus[idx][c]*elbos[idx]
+                self.var_sigma[c] += var_sigmas[idx][c]*elbos[idx]
+
+        self.pip = {}
+        self.post_mean_beta = {}
+        self.post_var_beta = {}
+
+        for c, v_gamma in self.var_gamma.items():
+
+            if len(v_gamma.shape) > 1:
+                self.pip[c] = v_gamma.sum(axis=1)
+                self.post_mean_beta[c] = (v_gamma*self.var_mu[c]).sum(axis=1)
+                self.post_var_beta[c] = ((v_gamma * (self.var_mu[c] ** 2 + self.var_sigma[c])).sum(axis=1) -
+                                         self.post_mean_beta[c]**2)
+            else:
+                self.pip[c] = v_gamma
+                self.post_mean_beta[c] = v_gamma * self.var_mu[c]
+                self.post_var_beta[c] = (v_gamma * (self.var_mu[c] ** 2 + self.var_sigma[c]) -
+                                         self.post_mean_beta[c]**2)
+
+        return self
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(gdl, grid, model=None, normalization='softmax', verbose=False, n_jobs=1) + +

+ + +
+ +

Integrate out hyperparameters using Bayesian Model Averaging

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

A GWADataLoader object

+
+
+ required +
grid + +
+

A HyperParameterGrid object

+
+
+ required +
model + +
+

A PRSModel-derived object (e.g. VIPRS).

+
+
+ None +
normalization + +
+

The normalization scheme for the final ELBOs. Options are (softmax, sum).

+
+
+ 'softmax' +
verbose + +
+

Detailed messages and print statements.

+
+
+ False +
n_jobs + +
+

The number of processes to use for the BMA

+
+
+ 1 +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
def __init__(self,
+             gdl,
+             grid,
+             model=None,
+             normalization='softmax',
+             verbose=False,
+             n_jobs=1):
+    """
+    Integrate out hyperparameters using Bayesian Model Averaging
+    :param gdl: A GWADataLoader object
+    :param grid: A HyperParameterGrid object
+    :param model: A `PRSModel`-derived object (e.g. VIPRS).
+    :param normalization: The normalization scheme for the final ELBOs. Options are (`softmax`, `sum`).
+    :param verbose: Detailed messages and print statements.
+    :param n_jobs: The number of processes to use for the BMA
+    """
+
+    super().__init__(gdl)
+
+    assert normalization in ('softmax', 'sum')
+
+    self.grid = grid
+    self.n_jobs = n_jobs
+    self.verbose = verbose
+
+    if model is None:
+        self.model = VIPRS(gdl)
+    else:
+        self.model = model
+
+    self.model.verbose = verbose
+    self.model.threads = 1
+
+    self.normalization = normalization
+
+    self.var_gamma = None
+    self.var_mu = None
+    self.var_sigma = None
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ BayesOpt + + +

+ + +
+

+ Bases: HyperparameterSearch

+ + +

Hyperparameter search using Bayesian optimization

+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
class BayesOpt(HyperparameterSearch):
+    """
+    Hyperparameter search using Bayesian optimization
+    """
+
+    def __init__(self,
+                 gdl,
+                 opt_params,
+                 param_bounds=None,
+                 model=None,
+                 criterion='ELBO',
+                 validation_gdl=None,
+                 verbose=False,
+                 n_jobs=1):
+        """
+        Perform hyperparameter search using Bayesian optimization
+        :param gdl: A GWADataLoader object
+        :param opt_params: A list of the hyperparameters to optimize over (e.g. 'pi', 'sigma_epsilon', 'sigma_beta').
+        :param param_bounds: The bounds for each hyperparameter included in the optimization. A list of tuples,
+        where each tuples records the (min, max) values for each hyperparameter.
+        :param model: A `PRSModel`-derived object (e.g. VIPRS).
+        :param criterion: The objective function for the hyperparameter search (ELBO or validation).
+        :param validation_gdl: If the objective is validation, provide the GWADataLoader object for the validation
+        dataset.
+        :param verbose: Detailed messages and print statements.
+        :param n_jobs: The number of processes to use for the hyperparameters search (not applicable here).
+        """
+
+        super().__init__(gdl,
+                         model=model,
+                         criterion=criterion,
+                         validation_gdl=validation_gdl,
+                         verbose=verbose,
+                         n_jobs=n_jobs)
+
+        self._opt_params = opt_params
+        self._param_bounds = param_bounds or {
+            'sigma_epsilon': (1e-6, 1. - 1e-6),
+            'tau_beta': (1e-3, None),
+            'pi': (1e-6, 1. - 1e-6)
+        }
+
+        # Convert the `pi` limits to log-scale:
+        if 'pi' in self._param_bounds:
+            self._param_bounds['pi'] = tuple(np.log10(list(self._param_bounds['pi'])))
+
+        assert all([opp in self._param_bounds for opp in self._opt_params])
+
+    def fit(self,
+            max_iter=50,
+            f_abs_tol=1e-4,
+            n_calls=30,
+            n_random_starts=5,
+            acq_func="gp_hedge"):
+        """
+        Perform model fitting and hyperparameter search using Bayesian optimization.
+
+        :param n_calls: The number of model runs with different hyperparameter settings.
+        :param n_random_starts: The number of random starts to initialize the optimizer.
+        :param acq_func: The acquisition function (default: `gp_hedge`)
+        :param max_iter: The maximum number of iterations within the search (default: 50).
+        :param f_abs_tol: The absolute tolerance for the objective (ELBO) within the search
+        """
+
+        from skopt import gp_minimize
+
+        def opt_func(p):
+
+            fix_params = dict(zip(self._opt_params, p))
+            if 'pi' in fix_params:
+                fix_params['pi'] = 10**fix_params['pi']
+
+            fitted_model = fit_model_fixed_params((self.model, fix_params,
+                                                   {'max_iter': max_iter,
+                                                    'f_abs_tol': f_abs_tol}))
+
+            if fitted_model is None:
+                return np.inf
+            else:
+                return -self.objective(fitted_model)
+
+        res = gp_minimize(opt_func,  # the function to minimize
+                          [self._param_bounds[op] for op in self._opt_params],  # the bounds on each dimension of x
+                          acq_func=acq_func,  # the acquisition function
+                          n_calls=n_calls,  # the number of evaluations of f
+                          n_random_starts=n_random_starts)  # the random seed
+
+        # Store validation result
+        self.validation_result = []
+        for obj, x in zip(res.func_vals, res.x_iters):
+            v_res = dict(zip(self._opt_params, x))
+            if 'pi' in v_res:
+                v_res['pi'] = 10**v_res['pi']
+
+            if self.criterion == 'ELBO':
+                v_res['ELBO'] = -obj
+            elif self.criterion == 'pseudo_validation':
+                v_res['Pseudo_Validation_Corr'] = -obj
+            else:
+                v_res['Validation_R2'] = -obj
+
+            self.validation_result.append(v_res)
+
+        # Extract the best performing hyperparameters:
+        final_best_params = dict(zip(self._opt_params, res.x))
+        if 'pi' in final_best_params:
+            final_best_params['pi'] = 10 ** final_best_params['pi']
+
+        print("> Bayesian Optimization identified the best hyperparameters as:")
+        pprint(final_best_params)
+
+        print("> Refitting the model with the best hyperparameters...")
+
+        self.model.fix_params = final_best_params
+        return self.model.fit()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(gdl, opt_params, param_bounds=None, model=None, criterion='ELBO', validation_gdl=None, verbose=False, n_jobs=1) + +

+ + +
+ +

Perform hyperparameter search using Bayesian optimization

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

A GWADataLoader object

+
+
+ required +
opt_params + +
+

A list of the hyperparameters to optimize over (e.g. 'pi', 'sigma_epsilon', 'sigma_beta').

+
+
+ required +
param_bounds + +
+

The bounds for each hyperparameter included in the optimization. A list of tuples, where each tuples records the (min, max) values for each hyperparameter.

+
+
+ None +
model + +
+

A PRSModel-derived object (e.g. VIPRS).

+
+
+ None +
criterion + +
+

The objective function for the hyperparameter search (ELBO or validation).

+
+
+ 'ELBO' +
validation_gdl + +
+

If the objective is validation, provide the GWADataLoader object for the validation dataset.

+
+
+ None +
verbose + +
+

Detailed messages and print statements.

+
+
+ False +
n_jobs + +
+

The number of processes to use for the hyperparameters search (not applicable here).

+
+
+ 1 +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
def __init__(self,
+             gdl,
+             opt_params,
+             param_bounds=None,
+             model=None,
+             criterion='ELBO',
+             validation_gdl=None,
+             verbose=False,
+             n_jobs=1):
+    """
+    Perform hyperparameter search using Bayesian optimization
+    :param gdl: A GWADataLoader object
+    :param opt_params: A list of the hyperparameters to optimize over (e.g. 'pi', 'sigma_epsilon', 'sigma_beta').
+    :param param_bounds: The bounds for each hyperparameter included in the optimization. A list of tuples,
+    where each tuples records the (min, max) values for each hyperparameter.
+    :param model: A `PRSModel`-derived object (e.g. VIPRS).
+    :param criterion: The objective function for the hyperparameter search (ELBO or validation).
+    :param validation_gdl: If the objective is validation, provide the GWADataLoader object for the validation
+    dataset.
+    :param verbose: Detailed messages and print statements.
+    :param n_jobs: The number of processes to use for the hyperparameters search (not applicable here).
+    """
+
+    super().__init__(gdl,
+                     model=model,
+                     criterion=criterion,
+                     validation_gdl=validation_gdl,
+                     verbose=verbose,
+                     n_jobs=n_jobs)
+
+    self._opt_params = opt_params
+    self._param_bounds = param_bounds or {
+        'sigma_epsilon': (1e-6, 1. - 1e-6),
+        'tau_beta': (1e-3, None),
+        'pi': (1e-6, 1. - 1e-6)
+    }
+
+    # Convert the `pi` limits to log-scale:
+    if 'pi' in self._param_bounds:
+        self._param_bounds['pi'] = tuple(np.log10(list(self._param_bounds['pi'])))
+
+    assert all([opp in self._param_bounds for opp in self._opt_params])
+
+
+
+ +
+ +
+ + +

+ fit(max_iter=50, f_abs_tol=0.0001, n_calls=30, n_random_starts=5, acq_func='gp_hedge') + +

+ + +
+ +

Perform model fitting and hyperparameter search using Bayesian optimization.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
n_calls + +
+

The number of model runs with different hyperparameter settings.

+
+
+ 30 +
n_random_starts + +
+

The number of random starts to initialize the optimizer.

+
+
+ 5 +
acq_func + +
+

The acquisition function (default: gp_hedge)

+
+
+ 'gp_hedge' +
max_iter + +
+

The maximum number of iterations within the search (default: 50).

+
+
+ 50 +
f_abs_tol + +
+

The absolute tolerance for the objective (ELBO) within the search

+
+
+ 0.0001 +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
def fit(self,
+        max_iter=50,
+        f_abs_tol=1e-4,
+        n_calls=30,
+        n_random_starts=5,
+        acq_func="gp_hedge"):
+    """
+    Perform model fitting and hyperparameter search using Bayesian optimization.
+
+    :param n_calls: The number of model runs with different hyperparameter settings.
+    :param n_random_starts: The number of random starts to initialize the optimizer.
+    :param acq_func: The acquisition function (default: `gp_hedge`)
+    :param max_iter: The maximum number of iterations within the search (default: 50).
+    :param f_abs_tol: The absolute tolerance for the objective (ELBO) within the search
+    """
+
+    from skopt import gp_minimize
+
+    def opt_func(p):
+
+        fix_params = dict(zip(self._opt_params, p))
+        if 'pi' in fix_params:
+            fix_params['pi'] = 10**fix_params['pi']
+
+        fitted_model = fit_model_fixed_params((self.model, fix_params,
+                                               {'max_iter': max_iter,
+                                                'f_abs_tol': f_abs_tol}))
+
+        if fitted_model is None:
+            return np.inf
+        else:
+            return -self.objective(fitted_model)
+
+    res = gp_minimize(opt_func,  # the function to minimize
+                      [self._param_bounds[op] for op in self._opt_params],  # the bounds on each dimension of x
+                      acq_func=acq_func,  # the acquisition function
+                      n_calls=n_calls,  # the number of evaluations of f
+                      n_random_starts=n_random_starts)  # the random seed
+
+    # Store validation result
+    self.validation_result = []
+    for obj, x in zip(res.func_vals, res.x_iters):
+        v_res = dict(zip(self._opt_params, x))
+        if 'pi' in v_res:
+            v_res['pi'] = 10**v_res['pi']
+
+        if self.criterion == 'ELBO':
+            v_res['ELBO'] = -obj
+        elif self.criterion == 'pseudo_validation':
+            v_res['Pseudo_Validation_Corr'] = -obj
+        else:
+            v_res['Validation_R2'] = -obj
+
+        self.validation_result.append(v_res)
+
+    # Extract the best performing hyperparameters:
+    final_best_params = dict(zip(self._opt_params, res.x))
+    if 'pi' in final_best_params:
+        final_best_params['pi'] = 10 ** final_best_params['pi']
+
+    print("> Bayesian Optimization identified the best hyperparameters as:")
+    pprint(final_best_params)
+
+    print("> Refitting the model with the best hyperparameters...")
+
+    self.model.fix_params = final_best_params
+    return self.model.fit()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ GridSearch + + +

+ + +
+

+ Bases: HyperparameterSearch

+ + +

Hyperparameter search using Grid Search

+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
class GridSearch(HyperparameterSearch):
+    """
+    Hyperparameter search using Grid Search
+    """
+
+    def __init__(self,
+                 gdl,
+                 grid,
+                 model=None,
+                 criterion='ELBO',
+                 validation_gdl=None,
+                 verbose=False,
+                 n_jobs=1):
+
+        """
+        Perform hyperparameter search using grid search
+        :param gdl: A GWADataLoader object
+        :param grid: A HyperParameterGrid object
+        :param model: A `PRSModel`-derived object (e.g. VIPRS).
+        :param criterion: The objective function for the grid search (ELBO or validation).
+        :param validation_gdl: If the objective is validation, provide the GWADataLoader object for the validation
+        dataset.
+        :param verbose: Detailed messages and print statements.
+        :param n_jobs: The number of processes to use for the grid search
+        """
+
+        super().__init__(gdl, model=model, criterion=criterion,
+                         validation_gdl=validation_gdl,
+                         verbose=verbose,
+                         n_jobs=n_jobs)
+
+        self.grid = grid
+        self.model.threads = 1
+
+    def fit(self, max_iter=50, f_abs_tol=1e-3, x_abs_tol=1e-8):
+
+        print("> Performing Grid Search over the following grid:")
+        print(self.grid.to_table())
+
+        opts = [(self.model, g, {'max_iter': max_iter,
+                                 'f_abs_tol': f_abs_tol,
+                                 'x_abs_tol': x_abs_tol})
+                for g in self.grid.combine_grids()]
+
+        assert len(opts) > 1
+
+        self.validation_result = []
+        fit_results = []
+        params = []
+
+        ctx = multiprocessing.get_context("spawn")
+
+        with ctx.Pool(self.n_jobs, maxtasksperchild=1) as pool:
+
+            for idx, fitted_model in tqdm(enumerate(pool.imap(fit_model_fixed_params, opts)), total=len(opts)):
+
+                if fitted_model is None:
+                    continue
+
+                fit_results.append(fitted_model)
+                params.append(copy.copy(opts[idx][1]))
+                self.validation_result.append(copy.copy(opts[idx][1]))
+                self.validation_result[-1]['ELBO'] = fitted_model.elbo()
+
+        if len(fit_results) > 1:
+            res_objectives = self.multi_objective(fit_results)
+        else:
+            raise Exception("Error: Convergence was achieved for less than 2 models.")
+
+        if self.criterion == 'validation':
+            for i in range(len(self.validation_result)):
+                self.validation_result[i]['Validation_R2'] = res_objectives[i]
+        elif self.criterion == 'pseudo_validation':
+            for i in range(len(self.validation_result)):
+                self.validation_result[i]['Pseudo_Validation_Corr'] = res_objectives[i]
+
+        best_idx = np.argmax(res_objectives)
+        best_params = params[best_idx]
+
+        print("> Grid search identified the best hyperparameters as:")
+        pprint(best_params)
+
+        print("> Refitting the model with the best hyperparameters...")
+
+        self.model.fix_params = best_params
+        return self.model.fit()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(gdl, grid, model=None, criterion='ELBO', validation_gdl=None, verbose=False, n_jobs=1) + +

+ + +
+ +

Perform hyperparameter search using grid search

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

A GWADataLoader object

+
+
+ required +
grid + +
+

A HyperParameterGrid object

+
+
+ required +
model + +
+

A PRSModel-derived object (e.g. VIPRS).

+
+
+ None +
criterion + +
+

The objective function for the grid search (ELBO or validation).

+
+
+ 'ELBO' +
validation_gdl + +
+

If the objective is validation, provide the GWADataLoader object for the validation dataset.

+
+
+ None +
verbose + +
+

Detailed messages and print statements.

+
+
+ False +
n_jobs + +
+

The number of processes to use for the grid search

+
+
+ 1 +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
def __init__(self,
+             gdl,
+             grid,
+             model=None,
+             criterion='ELBO',
+             validation_gdl=None,
+             verbose=False,
+             n_jobs=1):
+
+    """
+    Perform hyperparameter search using grid search
+    :param gdl: A GWADataLoader object
+    :param grid: A HyperParameterGrid object
+    :param model: A `PRSModel`-derived object (e.g. VIPRS).
+    :param criterion: The objective function for the grid search (ELBO or validation).
+    :param validation_gdl: If the objective is validation, provide the GWADataLoader object for the validation
+    dataset.
+    :param verbose: Detailed messages and print statements.
+    :param n_jobs: The number of processes to use for the grid search
+    """
+
+    super().__init__(gdl, model=model, criterion=criterion,
+                     validation_gdl=validation_gdl,
+                     verbose=verbose,
+                     n_jobs=n_jobs)
+
+    self.grid = grid
+    self.model.threads = 1
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ HyperparameterSearch + + +

+ + +
+

+ Bases: object

+ + +

A generic class for performing hyperparameter search on the +VIPRS model. This interface is old and will likely be deprecated +in future releases. It is recommended to use the VIPRSGrid class +and its derivatives for performing grid search instead.

+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
class HyperparameterSearch(object):
+    """
+    A generic class for performing hyperparameter search on the
+    `VIPRS` model. This interface is old and will likely be deprecated
+    in future releases. It is recommended to use the `VIPRSGrid` class
+    and its derivatives for performing grid search instead.
+    """
+
+    def __init__(self,
+                 gdl,
+                 model=None,
+                 criterion='ELBO',
+                 validation_gdl=None,
+                 verbose=False,
+                 n_jobs=1):
+        """
+        A generic hyperparameter search class that implements common functionalities
+        that may be required by hyperparameter search strategies.
+        :param gdl: A GWADataLoader object
+        :param model: A `PRSModel`-derived object (e.g. VIPRS).
+        :param criterion: The objective function for the hyperparameter search.
+        Options are: `ELBO`, `pseudo_validation` or `validation`.
+        :param validation_gdl: If the objective is validation, provide the GWADataLoader object for the validation
+        dataset.
+        :param verbose: Detailed messages and print statements.
+        :param n_jobs: The number of processes to use for the hyperparameters search.
+        """
+
+        # Sanity checking:
+        assert criterion in ('ELBO', 'validation', 'pseudo_validation')
+
+        self.gdl = gdl
+        self.n_jobs = n_jobs
+
+        if model is None:
+            self.model = VIPRS(gdl)
+        else:
+            import inspect
+            if inspect.isclass(model):
+                self.model = model(gdl)
+            else:
+                self.model = model
+
+        self.validation_result = None
+
+        self.criterion = criterion
+        self._validation_gdl = validation_gdl
+
+        self.verbose = verbose
+        self.model.verbose = verbose
+
+        if self._validation_gdl is not None:
+            self._validation_gdl.verbose = verbose
+
+        if self.criterion == 'ELBO':
+            assert hasattr(self.model, 'elbo')
+        elif self.criterion == 'pseudo_validation':
+            assert self._validation_gdl is not None
+            assert self._validation_gdl.sumstats_table is not None
+        if self.criterion == 'validation':
+            assert self._validation_gdl is not None
+            assert self._validation_gdl.genotype is not None
+            assert self._validation_gdl.sample_table.phenotype is not None
+
+    def to_validation_table(self):
+        """
+        Summarize the validation results in a pandas table.
+        """
+        if self.validation_result is None:
+            raise Exception("Validation result is not set!")
+        elif len(self.validation_result) < 1:
+            raise Exception("Validation result is not set!")
+
+        return pd.DataFrame(self.validation_result)
+
+    def write_validation_result(self, v_filename, sep="\t"):
+        """
+        After performing hyperparameter search, write a table
+        that records that value of the objective for each combination
+        of hyperparameters.
+        :param v_filename: The filename for the validation table.
+        :param sep: The separator for the validation table
+        """
+
+        v_df = self.to_validation_table()
+        v_df.to_csv(v_filename, index=False, sep=sep)
+
+    def multi_objective(self, models):
+        """
+        This method evaluates multiple PRS models simultaneously. This can be faster for
+        some evaluation criteria, such as the validation R^2, because we only need to
+        multiply the inferred effect sizes with the genotype matrix only once.
+
+        :param models: A list of PRS models that we wish to evaluate.
+        """
+
+        if len(models) == 1:
+            return self.objective(models[0])
+
+        if self.criterion == 'ELBO':
+            return [m.elbo() for m in models]
+
+        elif self.criterion == 'pseudo_validation':
+            return [m.pseudo_validate(validation_gdl=self._validation_gdl) for m in models]
+        else:
+
+            prs_m = BayesPRSModel(self._validation_gdl)
+
+            eff_table = models[0].to_table(per_chromosome=False)
+            eff_table = eff_table[['CHR', 'SNP', 'A1', 'A2', 'BETA']]
+            eff_table.rename(columns={'BETA': 'BETA_0'}, inplace=True)
+
+            eff_table[[f'BETA_{i}' for i in range(1, len(models))]] = np.array(
+                [models[i].to_table(per_chromosome=False)['BETA'].values for i in range(1, len(models))]
+            ).T
+
+            prs_m.set_model_parameters(eff_table)
+
+            prs = prs_m.predict(test_gdl=self._validation_gdl)
+
+            if self._validation_gdl.phenotype_likelihood == 'binomial':
+                eval_func = roc_auc
+            else:
+                eval_func = r2
+
+            metrics = [eval_func(prs[:, i].flatten(), self._validation_gdl.sample_table.phenotype)
+                       for i in range(len(models))]
+
+            return metrics
+
+    def objective(self, model):
+        """
+        A method that takes the result of fitting the model
+        and returns the desired objective (either `ELBO`, `pseudo_validation`, or `validation`).
+        :param model: The PRS model to evaluate
+        """
+
+        if self.criterion == 'ELBO':
+            return model.elbo()
+        elif self.criterion == 'pseudo_validation':
+            return model.pseudo_validate(validation_gdl=self._validation_gdl)
+        else:
+
+            # Predict:
+            prs = model.predict(test_gdl=self._validation_gdl)
+
+            if self._validation_gdl.phenotype_likelihood == 'binomial':
+                eval_func = roc_auc
+            else:
+                eval_func = r2
+
+            return eval_func(prs, self._validation_gdl.sample_table.phenotype)
+
+    def fit(self):
+        raise NotImplementedError
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(gdl, model=None, criterion='ELBO', validation_gdl=None, verbose=False, n_jobs=1) + +

+ + +
+ +

A generic hyperparameter search class that implements common functionalities +that may be required by hyperparameter search strategies.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

A GWADataLoader object

+
+
+ required +
model + +
+

A PRSModel-derived object (e.g. VIPRS).

+
+
+ None +
criterion + +
+

The objective function for the hyperparameter search. Options are: ELBO, pseudo_validation or validation.

+
+
+ 'ELBO' +
validation_gdl + +
+

If the objective is validation, provide the GWADataLoader object for the validation dataset.

+
+
+ None +
verbose + +
+

Detailed messages and print statements.

+
+
+ False +
n_jobs + +
+

The number of processes to use for the hyperparameters search.

+
+
+ 1 +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
def __init__(self,
+             gdl,
+             model=None,
+             criterion='ELBO',
+             validation_gdl=None,
+             verbose=False,
+             n_jobs=1):
+    """
+    A generic hyperparameter search class that implements common functionalities
+    that may be required by hyperparameter search strategies.
+    :param gdl: A GWADataLoader object
+    :param model: A `PRSModel`-derived object (e.g. VIPRS).
+    :param criterion: The objective function for the hyperparameter search.
+    Options are: `ELBO`, `pseudo_validation` or `validation`.
+    :param validation_gdl: If the objective is validation, provide the GWADataLoader object for the validation
+    dataset.
+    :param verbose: Detailed messages and print statements.
+    :param n_jobs: The number of processes to use for the hyperparameters search.
+    """
+
+    # Sanity checking:
+    assert criterion in ('ELBO', 'validation', 'pseudo_validation')
+
+    self.gdl = gdl
+    self.n_jobs = n_jobs
+
+    if model is None:
+        self.model = VIPRS(gdl)
+    else:
+        import inspect
+        if inspect.isclass(model):
+            self.model = model(gdl)
+        else:
+            self.model = model
+
+    self.validation_result = None
+
+    self.criterion = criterion
+    self._validation_gdl = validation_gdl
+
+    self.verbose = verbose
+    self.model.verbose = verbose
+
+    if self._validation_gdl is not None:
+        self._validation_gdl.verbose = verbose
+
+    if self.criterion == 'ELBO':
+        assert hasattr(self.model, 'elbo')
+    elif self.criterion == 'pseudo_validation':
+        assert self._validation_gdl is not None
+        assert self._validation_gdl.sumstats_table is not None
+    if self.criterion == 'validation':
+        assert self._validation_gdl is not None
+        assert self._validation_gdl.genotype is not None
+        assert self._validation_gdl.sample_table.phenotype is not None
+
+
+
+ +
+ +
+ + +

+ multi_objective(models) + +

+ + +
+ +

This method evaluates multiple PRS models simultaneously. This can be faster for +some evaluation criteria, such as the validation R^2, because we only need to +multiply the inferred effect sizes with the genotype matrix only once.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
models + +
+

A list of PRS models that we wish to evaluate.

+
+
+ required +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
def multi_objective(self, models):
+    """
+    This method evaluates multiple PRS models simultaneously. This can be faster for
+    some evaluation criteria, such as the validation R^2, because we only need to
+    multiply the inferred effect sizes with the genotype matrix only once.
+
+    :param models: A list of PRS models that we wish to evaluate.
+    """
+
+    if len(models) == 1:
+        return self.objective(models[0])
+
+    if self.criterion == 'ELBO':
+        return [m.elbo() for m in models]
+
+    elif self.criterion == 'pseudo_validation':
+        return [m.pseudo_validate(validation_gdl=self._validation_gdl) for m in models]
+    else:
+
+        prs_m = BayesPRSModel(self._validation_gdl)
+
+        eff_table = models[0].to_table(per_chromosome=False)
+        eff_table = eff_table[['CHR', 'SNP', 'A1', 'A2', 'BETA']]
+        eff_table.rename(columns={'BETA': 'BETA_0'}, inplace=True)
+
+        eff_table[[f'BETA_{i}' for i in range(1, len(models))]] = np.array(
+            [models[i].to_table(per_chromosome=False)['BETA'].values for i in range(1, len(models))]
+        ).T
+
+        prs_m.set_model_parameters(eff_table)
+
+        prs = prs_m.predict(test_gdl=self._validation_gdl)
+
+        if self._validation_gdl.phenotype_likelihood == 'binomial':
+            eval_func = roc_auc
+        else:
+            eval_func = r2
+
+        metrics = [eval_func(prs[:, i].flatten(), self._validation_gdl.sample_table.phenotype)
+                   for i in range(len(models))]
+
+        return metrics
+
+
+
+ +
+ +
+ + +

+ objective(model) + +

+ + +
+ +

A method that takes the result of fitting the model +and returns the desired objective (either ELBO, pseudo_validation, or validation).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model + +
+

The PRS model to evaluate

+
+
+ required +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
def objective(self, model):
+    """
+    A method that takes the result of fitting the model
+    and returns the desired objective (either `ELBO`, `pseudo_validation`, or `validation`).
+    :param model: The PRS model to evaluate
+    """
+
+    if self.criterion == 'ELBO':
+        return model.elbo()
+    elif self.criterion == 'pseudo_validation':
+        return model.pseudo_validate(validation_gdl=self._validation_gdl)
+    else:
+
+        # Predict:
+        prs = model.predict(test_gdl=self._validation_gdl)
+
+        if self._validation_gdl.phenotype_likelihood == 'binomial':
+            eval_func = roc_auc
+        else:
+            eval_func = r2
+
+        return eval_func(prs, self._validation_gdl.sample_table.phenotype)
+
+
+
+ +
+ +
+ + +

+ to_validation_table() + +

+ + +
+ +

Summarize the validation results in a pandas table.

+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
def to_validation_table(self):
+    """
+    Summarize the validation results in a pandas table.
+    """
+    if self.validation_result is None:
+        raise Exception("Validation result is not set!")
+    elif len(self.validation_result) < 1:
+        raise Exception("Validation result is not set!")
+
+    return pd.DataFrame(self.validation_result)
+
+
+
+ +
+ +
+ + +

+ write_validation_result(v_filename, sep='\t') + +

+ + +
+ +

After performing hyperparameter search, write a table +that records that value of the objective for each combination +of hyperparameters.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
v_filename + +
+

The filename for the validation table.

+
+
+ required +
sep + +
+

The separator for the validation table

+
+
+ '\t' +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
def write_validation_result(self, v_filename, sep="\t"):
+    """
+    After performing hyperparameter search, write a table
+    that records that value of the objective for each combination
+    of hyperparameters.
+    :param v_filename: The filename for the validation table.
+    :param sep: The separator for the validation table
+    """
+
+    v_df = self.to_validation_table()
+    v_df.to_csv(v_filename, index=False, sep=sep)
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ fit_model_fixed_params(params) + +

+ + +
+ +

Perform model fitting using a set of fixed parameters. +This is a helper function to allow us to use the multiprocessing module +to fit PRS models in parallel.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
params + +
+

A tuple of (BayesPRSModel, fixed parameters dictionary, and kwargs for the .fit() method).

+
+
+ required +
+ +
+ Source code in viprs/model/gridsearch/HyperparameterSearch.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
def fit_model_fixed_params(params):
+    """
+    Perform model fitting using a set of fixed parameters.
+    This is a helper function to allow us to use the `multiprocessing` module
+    to fit PRS models in parallel.
+    :param params: A tuple of (BayesPRSModel, fixed parameters dictionary, and kwargs for the .fit() method).
+    """
+
+    # vi_model, fixed_params, **fit_kwargs
+    vi_model, fixed_params, fit_kwargs = params
+    vi_model.fix_params = fixed_params
+
+    try:
+        vi_model.fit(**fit_kwargs)
+    except Exception as e:
+        return None
+
+    return vi_model
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/model/gridsearch/VIPRSBMA/index.html b/api/model/gridsearch/VIPRSBMA/index.html new file mode 100644 index 0000000..0392a45 --- /dev/null +++ b/api/model/gridsearch/VIPRSBMA/index.html @@ -0,0 +1,1239 @@ + + + + + + + + + + + + + + + + + + + VIPRSBMA - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

VIPRSBMA

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ VIPRSBMA + + +

+ + +
+

+ Bases: VIPRSGrid

+ + +

The VIPRSBMA class is an extension of the VIPRSGrid class that +implements Bayesian model averaging for the VIPRS models in the grid. +Bayesian model averaging is a technique that allows us to combine the +results of multiple models by weighting them according to their evidence. +In this context, we weigh the model by their final ELBO values.

+

For more details on the BMA procedure implemented here, refer to the +Supplementary material of:

+
+

Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference. +Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009. +Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.

+
+ +
+ Source code in viprs/model/gridsearch/VIPRSBMA.py +
 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
class VIPRSBMA(VIPRSGrid):
+    """
+    The `VIPRSBMA` class is an extension of the `VIPRSGrid` class that
+    implements Bayesian model averaging for the `VIPRS` models in the grid.
+    Bayesian model averaging is a technique that allows us to combine the
+    results of multiple models by weighting them according to their evidence.
+    In this context, we weigh the model by their final ELBO values.
+
+    For more details on the BMA procedure implemented here, refer to the
+    Supplementary material of:
+
+    > Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference.
+    Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009.
+    Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.
+
+    """
+
+    def __init__(self,
+                 gdl,
+                 grid,
+                 **kwargs):
+        """
+
+        Initialize the `VIPRSBMA` model.
+
+        :param gdl: An instance of `GWADataLoader`
+        :param grid: An instance of `HyperparameterGrid`
+        :param kwargs: Additional keyword arguments for the VIPRS model
+        """
+
+        super().__init__(gdl, grid=grid, **kwargs)
+
+    def average_models(self, normalization='softmax'):
+        """
+        Use Bayesian model averaging (BMA) to obtain final weights for each parameter.
+        We average the weights by using the final ELBO for each model.
+
+        :param normalization: The normalization scheme for the final ELBOs.
+        Options are (`softmax`, `sum`).
+        :raises KeyError: If the normalization scheme is not recognized.
+        """
+
+        if self.n_models < 2:
+            return self
+
+        # Extract the models that converged successfully:
+        models_to_keep = np.where(self.models_to_keep)[0]
+
+        elbos = self.history['ELBO'][-1][models_to_keep]
+
+        if normalization == 'softmax':
+            from scipy.special import softmax
+            weights = np.array(softmax(elbos))
+        elif normalization == 'sum':
+            weights = np.array(elbos)
+
+            # Correction for negative ELBOs:
+            weights = weights - weights.min() + 1.
+            weights /= weights.sum()
+        else:
+            raise KeyError("Normalization scheme not recognized. "
+                           "Valid options are: `softmax`, `sum`. "
+                           "Got: {}".format(normalization))
+
+        if int(self.verbose) > 1:
+            logging.info("Averaging PRS models with weights:", weights)
+
+        # Average the model parameters:
+        for param in (self.pip, self.post_mean_beta, self.post_var_beta,
+                      self.var_gamma, self.var_mu, self.var_tau,
+                      self.eta, self.zeta, self.q):
+
+            for c in param:
+                param[c] = (param[c][:, models_to_keep]*weights).sum(axis=1)
+
+        # Set the number of models to 1:
+        self.n_models = 1
+
+        return self
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(gdl, grid, **kwargs) + +

+ + +
+ +

Initialize the VIPRSBMA model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

An instance of GWADataLoader

+
+
+ required +
grid + +
+

An instance of HyperparameterGrid

+
+
+ required +
kwargs + +
+

Additional keyword arguments for the VIPRS model

+
+
+ {} +
+ +
+ Source code in viprs/model/gridsearch/VIPRSBMA.py +
24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
def __init__(self,
+             gdl,
+             grid,
+             **kwargs):
+    """
+
+    Initialize the `VIPRSBMA` model.
+
+    :param gdl: An instance of `GWADataLoader`
+    :param grid: An instance of `HyperparameterGrid`
+    :param kwargs: Additional keyword arguments for the VIPRS model
+    """
+
+    super().__init__(gdl, grid=grid, **kwargs)
+
+
+
+ +
+ +
+ + +

+ average_models(normalization='softmax') + +

+ + +
+ +

Use Bayesian model averaging (BMA) to obtain final weights for each parameter. +We average the weights by using the final ELBO for each model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
normalization + +
+

The normalization scheme for the final ELBOs. Options are (softmax, sum).

+
+
+ 'softmax' +
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ KeyError + +
+

If the normalization scheme is not recognized.

+
+
+ +
+ Source code in viprs/model/gridsearch/VIPRSBMA.py +
39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
def average_models(self, normalization='softmax'):
+    """
+    Use Bayesian model averaging (BMA) to obtain final weights for each parameter.
+    We average the weights by using the final ELBO for each model.
+
+    :param normalization: The normalization scheme for the final ELBOs.
+    Options are (`softmax`, `sum`).
+    :raises KeyError: If the normalization scheme is not recognized.
+    """
+
+    if self.n_models < 2:
+        return self
+
+    # Extract the models that converged successfully:
+    models_to_keep = np.where(self.models_to_keep)[0]
+
+    elbos = self.history['ELBO'][-1][models_to_keep]
+
+    if normalization == 'softmax':
+        from scipy.special import softmax
+        weights = np.array(softmax(elbos))
+    elif normalization == 'sum':
+        weights = np.array(elbos)
+
+        # Correction for negative ELBOs:
+        weights = weights - weights.min() + 1.
+        weights /= weights.sum()
+    else:
+        raise KeyError("Normalization scheme not recognized. "
+                       "Valid options are: `softmax`, `sum`. "
+                       "Got: {}".format(normalization))
+
+    if int(self.verbose) > 1:
+        logging.info("Averaging PRS models with weights:", weights)
+
+    # Average the model parameters:
+    for param in (self.pip, self.post_mean_beta, self.post_var_beta,
+                  self.var_gamma, self.var_mu, self.var_tau,
+                  self.eta, self.zeta, self.q):
+
+        for c in param:
+            param[c] = (param[c][:, models_to_keep]*weights).sum(axis=1)
+
+    # Set the number of models to 1:
+    self.n_models = 1
+
+    return self
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/model/gridsearch/VIPRSGrid/index.html b/api/model/gridsearch/VIPRSGrid/index.html new file mode 100644 index 0000000..f8cfa30 --- /dev/null +++ b/api/model/gridsearch/VIPRSGrid/index.html @@ -0,0 +1,3043 @@ + + + + + + + + + + + + + + + + + + + VIPRSGrid - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

VIPRSGrid

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ VIPRSGrid + + +

+ + +
+

+ Bases: VIPRS

+ + +

A class to fit the VIPRS model to data using a grid of hyperparameters. +Instead of having a single set of hyperparameters, we simultaneously fit +multiple models with different hyperparameters and compare their performance +at the end. This class is generic and does not support any model selection or +averaging schemes.

+

The class inherits all the basic attributes from the VIPRS class.

+
+

See Also

+ +
+ + +

Attributes:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescription
grid_table + +
+

A pandas table containing the hyperparameters for each model.

+
+
n_models + +
+

The number of models to fit.

+
+
shapes + +
+

A dictionary containing the shapes of the data matrices.

+
+
active_models + +
+

A boolean array indicating which models are still active (i.e. not converged).

+
+
+ +
+ Source code in viprs/model/gridsearch/VIPRSGrid.py +
 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
class VIPRSGrid(VIPRS):
+    """
+    A class to fit the `VIPRS` model to data using a grid of hyperparameters.
+    Instead of having a single set of hyperparameters, we simultaneously fit
+    multiple models with different hyperparameters and compare their performance
+    at the end. This class is generic and does not support any model selection or
+    averaging schemes.
+
+    The class inherits all the basic attributes from the [VIPRS][viprs.model.VIPRS.VIPRS] class.
+
+    !!! seealso "See Also"
+        * [VIPRSGridSearch][viprs.model.gridsearch.VIPRSGridSearch.VIPRSGridSearch]
+        * [VIPRSBMA][viprs.model.gridsearch.VIPRSBMA.VIPRSBMA]
+
+    :ivar grid_table: A pandas table containing the hyperparameters for each model.
+    :ivar n_models: The number of models to fit.
+    :ivar shapes: A dictionary containing the shapes of the data matrices.
+    :ivar active_models: A boolean array indicating which models are still active (i.e. not converged).
+
+    """
+
+    def __init__(self,
+                 gdl,
+                 grid,
+                 **kwargs):
+        """
+        Initialize the `VIPRS` model with a grid of hyperparameters.
+
+        :param gdl: An instance of `GWADataLoader`
+        :param grid: An instance of `HyperparameterGrid`
+        :param kwargs: Additional keyword arguments to pass to the parent `VIPRS` class.
+        """
+
+        self.grid_table = grid.to_table()
+        self.n_models = len(self.grid_table)
+        assert self.n_models > 1
+
+        grid_params = {c: self.grid_table[c].values for c in self.grid_table.columns}
+
+        if 'fix_params' not in kwargs:
+            kwargs['fix_params'] = grid_params
+        else:
+            kwargs['fix_params'].update(grid_params)
+
+        # Make sure that the matrices are in Fortran order:
+        kwargs['order'] = 'F'
+
+        super().__init__(gdl, **kwargs)
+
+        self.shapes = {c: (shp, self.n_models)
+                       for c, shp in self.shapes.items()}
+        self.active_models = None
+        self.Nj = {c: Nj[:, None].astype(self.float_precision, order=self.order) for c, Nj in self.Nj.items()}
+        self.optim_results = [OptimizeResult() for _ in range(self.n_models)]
+
+    @property
+    def models_to_keep(self):
+        """
+        :return: A boolean array indicating which models have converged successfully.
+        """
+        return np.logical_or(self.active_models, self.converged_models)
+
+    @property
+    def converged_models(self):
+        return np.array([optr.success for optr in self.optim_results])
+
+    def initialize_theta(self, theta_0=None):
+        """
+        Initialize the global hyperparameters of the model.
+        :param theta_0: A dictionary of initial values for the hyperparameters theta
+        """
+
+        self.active_models = np.array([True for _ in range(self.n_models)])
+
+        super().initialize_theta(theta_0=theta_0)
+
+        try:
+            if self.pi.shape != (self.n_models, ):
+                self.pi *= np.ones(shape=(self.n_models, ), dtype=self.float_precision)
+        except AttributeError:
+            self.pi *= np.ones(shape=(self.n_models,), dtype=self.float_precision)
+
+        try:
+            if self.tau_beta.shape != (self.n_models, ):
+                self.tau_beta *= np.ones(shape=(self.n_models, ), dtype=self.float_precision)
+        except AttributeError:
+            self.tau_beta *= np.ones(shape=(self.n_models,), dtype=self.float_precision)
+
+        try:
+            if self.sigma_epsilon.shape != (self.n_models, ):
+                self.sigma_epsilon *= np.ones(shape=(self.n_models, ), dtype=self.float_precision)
+        except AttributeError:
+            self.sigma_epsilon *= np.ones(shape=(self.n_models,), dtype=self.float_precision)
+
+        try:
+            if self._sigma_g.shape != (self.n_models, ):
+                self._sigma_g *= np.ones(shape=(self.n_models, ), dtype=self.float_precision)
+        except AttributeError:
+            self._sigma_g *= np.ones(shape=(self.n_models,), dtype=self.float_precision)
+
+    def init_optim_meta(self):
+        """
+        Initialize the various quantities/objects to keep track of the optimization process.
+         This method initializes the "history" object (which keeps track of the objective + other
+         hyperparameters requested by the user), in addition to the OptimizeResult objects.
+        """
+        super().init_optim_meta()
+
+        # Reset the OptimizeResult objects:
+        for optr in self.optim_results:
+            optr.reset()
+
+    def e_step(self):
+        """
+        Run the E-Step of the Variational EM algorithm.
+        Here, we update the variational parameters for each variant using coordinate
+        ascent optimization techniques. The coordinate ascent procedure is run on all the models
+        in the grid simultaneously. The update equations are outlined in
+        the Supplementary Material of the following paper:
+
+        > Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference.
+        Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009.
+        Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.
+        """
+
+        active_model_idx = np.where(self.active_models)[0].astype(np.int32)
+
+        for c, shapes in self.shapes.items():
+
+            # Get the priors:
+            tau_beta = self.get_tau_beta(c)
+            pi = self.get_pi(c)
+
+            # Updates for tau variational parameters:
+            # NOTE: Here, we compute the variational sigma in-place to avoid the need
+            # to change the order of the resulting matrix or its float precision:
+            np.add(self.Nj[c] / self.sigma_epsilon, tau_beta,
+                   out=self.var_tau[c])
+            np.log(self.var_tau[c], out=self._log_var_tau[c])
+
+            # Compute some quantities that are needed for the per-SNP updates:
+            mu_mult = self.Nj[c] / (self.var_tau[c] * self.sigma_epsilon)
+            u_logs = np.log(pi) - np.log(1. - pi) + .5 * (np.log(tau_beta) - self._log_var_tau[c])
+
+            if self.use_cpp:
+                cpp_e_step_grid(self.ld_left_bound[c],
+                                self.ld_indptr[c],
+                                self.ld_data[c],
+                                self.std_beta[c],
+                                self.var_gamma[c],
+                                self.var_mu[c],
+                                self.eta[c],
+                                self.q[c],
+                                self.eta_diff[c],
+                                u_logs,
+                                0.5 * self.var_tau[c],
+                                mu_mult,
+                                self.dequantize_scale,
+                                active_model_idx,
+                                self.threads,
+                                self.use_blas,
+                                self.low_memory)
+            else:
+
+                e_step_grid(self.ld_left_bound[c],
+                            self.ld_indptr[c],
+                            self.ld_data[c],
+                            self.std_beta[c],
+                            self.var_gamma[c],
+                            self.var_mu[c],
+                            self.eta[c],
+                            self.q[c],
+                            self.eta_diff[c],
+                            u_logs,
+                            0.5 * self.var_tau[c],
+                            mu_mult,
+                            active_model_idx,
+                            self.threads,
+                            self.use_blas,
+                            self.low_memory)
+
+        self.zeta = self.compute_zeta()
+
+    def to_theta_table(self):
+        """
+        :return: A `pandas` DataFrame containing information about the hyperparameters of the model.
+        """
+
+        if self.n_models == 1:
+            return super(VIPRSGrid, self).to_theta_table()
+
+        sig_e = self.sigma_epsilon
+        h2 = self.get_heritability()
+        pi = self.get_proportion_causal()
+
+        if isinstance(self.tau_beta, dict):
+            taus = dict_mean(self.tau_beta, axis=0)
+        else:
+            taus = self.tau_beta
+
+        theta_table = []
+
+        for m in range(self.n_models):
+
+            theta_table += [
+                {'Model': m, 'Parameter': 'Residual_variance', 'Value': sig_e[m]},
+                {'Model': m, 'Parameter': 'Heritability', 'Value': h2[m]},
+                {'Model': m, 'Parameter': 'Proportion_causal', 'Value': pi[m]},
+                {'Model': m, 'Parameter': 'sigma_beta', 'Value': taus[m]}
+            ]
+
+        return pd.DataFrame(theta_table)
+
+    def to_validation_table(self):
+        """
+        :return: The validation table summarizing the performance of each model.
+        :raises ValueError: if the validation result is not set.
+        """
+        if self.validation_result is None:
+            raise ValueError("Validation result is not set!")
+        elif len(self.validation_result) < 1:
+            raise ValueError("Validation result is not set!")
+
+        return pd.DataFrame(self.validation_result)
+
+    def write_validation_result(self, v_filename, sep="\t"):
+        """
+        After performing hyperparameter search, write a table
+        that records that value of the objective for each combination
+        of hyperparameters.
+        :param v_filename: The filename for the validation table.
+        :param sep: The separator for the validation table
+        """
+
+        v_df = self.to_validation_table()
+        v_df.to_csv(v_filename, index=False, sep=sep)
+
+    def fit(self,
+            max_iter=1000,
+            theta_0=None,
+            param_0=None,
+            continued=False,
+            min_iter=3,
+            f_abs_tol=1e-6,
+            x_abs_tol=1e-7,
+            drop_r_tol=1e-6,
+            patience=5):
+        """
+        A convenience method to fit all the models in the grid using the Variational EM algorithm.
+
+        :param max_iter: Maximum number of iterations. 
+        :param theta_0: A dictionary of values to initialize the hyperparameters
+        :param param_0: A dictionary of values to initialize the variational parameters
+        :param continued: If true, continue the model fitting for more iterations.
+        :param min_iter: The minimum number of iterations to run before checking for convergence.
+        :param f_abs_tol: The absolute tolerance threshold for the objective (ELBO).
+        :param x_abs_tol: The absolute tolerance threshold for the variational parameters.
+        :param drop_r_tol: The relative tolerance for the drop in the ELBO to be considered as a red flag. It usually
+        happens around convergence that the objective fluctuates due to numerical errors. This is a way to
+        differentiate such random fluctuations from actual drops in the objective.
+        :param patience: The maximum number of times the objective is allowed to drop before termination.
+        """
+
+        if not continued:
+            self.initialize(theta_0, param_0)
+            start_idx = 1
+        else:
+            start_idx = len(self.history['ELBO']) + 1
+            for i, optr in enumerate(self.optim_results):
+                self.active_models[i] = True
+                optr.update(self.history['ELBO'][i], increment=False)
+
+        patience = patience*np.ones(self.n_models)
+
+        logging.debug("> Performing model fit...")
+        if self.threads > 1:
+            logging.debug(f"> Using up to {self.threads} threads.")
+
+        # If the model is fit over a single chromosome, append this information to the
+        # tqdm progress bar:
+        if len(self.shapes) == 1:
+            chrom, shape = list(self.shapes.items())[0]
+            desc = f"Chromosome {chrom} ({shape[0]} variants)"
+        else:
+            desc = None
+
+        # Progress bar:
+        pbar = tqdm(range(start_idx, start_idx + max_iter),
+                    disable=not self.verbose,
+                    desc=desc)
+
+        for i in pbar:
+
+            if all([optr.stop_iteration for optr in self.optim_results]):
+
+                # If converged, update the progress bar before exiting:
+                pbar.set_postfix({
+                    'Best ELBO': f"{self.history['ELBO'][-1][self.models_to_keep].max():.4f}",
+                    'Models converged': f"{self.n_models - np.sum(self.active_models)}/{self.n_models}"
+                })
+                pbar.n = i - 1
+                pbar.total = i - 1
+                pbar.refresh()
+                pbar.close()
+
+                pbar.close()
+                break
+
+            self.update_theta_history()
+
+            self.e_step()
+            self.m_step()
+
+            self.history['ELBO'].append(self.elbo(sum_axis=0))
+            h2 = self.get_heritability()
+
+            if i > 1:
+
+                # Get the current and previous ELBO values
+                curr_elbo = self.history['ELBO'][-1]
+                prev_elbo = self.history['ELBO'][-2]
+
+                for m in np.where(self.active_models)[0]:
+
+                    if (i > min_iter) and np.isclose(prev_elbo[m], curr_elbo[m], atol=f_abs_tol, rtol=0.):
+                        self.active_models[m] = False
+                        self.optim_results[m].update(curr_elbo[m],
+                                                     stop_iteration=True,
+                                                     success=True,
+                                                     message='Objective (ELBO) converged successfully.')
+                    elif (i > min_iter) and max([np.max(np.abs(diff[:, m]))
+                                               for diff in self.eta_diff.values()]) < x_abs_tol:
+                        self.active_models[m] = False
+                        self.optim_results[m].update(curr_elbo[m],
+                                                     stop_iteration=True,
+                                                     success=True,
+                                                     message='Variational parameters converged successfully.')
+
+                    # Check to see if the objective drops due to numerical instabilities:
+                    elif curr_elbo[m] < prev_elbo[m] and not np.isclose(curr_elbo[m],
+                                                                        prev_elbo[m],
+                                                                        atol=0.,
+                                                                        rtol=drop_r_tol):
+                        patience[m] -= 1
+
+                        if patience[m] == 0:
+                            self.active_models[m] = False
+                            self.optim_results[m].update(curr_elbo[m],
+                                                         stop_iteration=True,
+                                                         success=False,
+                                                         message='Optimization is halted '
+                                                                 'due to numerical instabilities.')
+                        else:
+                            self.optim_results[m].update(curr_elbo)
+
+                    # Check if the model parameters behave in unexpected/pathological ways:
+                    elif np.isnan(curr_elbo[m]):
+                        self.active_models[m] = False
+                        self.optim_results[m].update(curr_elbo[m],
+                                                     stop_iteration=True,
+                                                     success=False,
+                                                     message='The objective (ELBO) is NaN.')
+                    elif self.sigma_epsilon[m] <= 0.:
+                        self.active_models[m] = False
+                        self.optim_results[m].update(curr_elbo[m],
+                                                     stop_iteration=True,
+                                                     success=False,
+                                                     message='Optimization is halted (sigma_epsilon <= 0).')
+                    elif h2[m] >= 1.:
+                        self.active_models[m] = False
+                        self.optim_results[m].update(curr_elbo[m],
+                                                     stop_iteration=True,
+                                                     success=False,
+                                                     message='Optimization is halted (h2 >= 1).')
+                    else:
+                        self.optim_results[m].update(curr_elbo)
+
+                # -----------------------------------------------------------------------
+
+            if self.models_to_keep.sum() > 0:
+                pbar.set_postfix({
+                    'Best ELBO': f"{self.history['ELBO'][-1][self.models_to_keep].max():.4f}",
+                    'Models converged': f"{self.n_models - np.sum(self.active_models)}/{self.n_models}"
+                })
+
+        # Update posterior moments:
+        self.update_posterior_moments()
+
+        # Inspect the optimization results:
+        for m, optr in enumerate(self.optim_results):
+            if not optr.stop_iteration:
+                self.active_models[m] = False
+                optr.update(self.history['ELBO'][-1][m],
+                            stop_iteration=True,
+                            success=False,
+                            message="Maximum iterations reached without convergence.\n"
+                                    "You may need to run the model for more iterations.")
+
+        # Inform the user about potential issues:
+        if int(self.verbose) > 1:
+
+            if self.models_to_keep.sum() > 0:
+                logging.info(f"> Optimization is complete for all {self.n_models} models.")
+                logging.info(f"> {np.sum(self.models_to_keep)} model(s) converged successfully.")
+            else:
+                logging.error("> All models failed to converge. Please check the optimization results.")
+
+        self.validation_result = self.grid_table.copy()
+        self.validation_result['ELBO'] = [optr.fun for optr in self.optim_results]
+        self.validation_result['Converged'] = self.models_to_keep
+        self.validation_result['Optimization_message'] = [optr.message for optr in self.optim_results]
+
+        return self
+
+
+ + + +
+ + + + + + + +
+ + + +

+ models_to_keep + + + property + + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

A boolean array indicating which models have converged successfully.

+
+
+
+ +
+ + + +
+ + +

+ __init__(gdl, grid, **kwargs) + +

+ + +
+ +

Initialize the VIPRS model with a grid of hyperparameters.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

An instance of GWADataLoader

+
+
+ required +
grid + +
+

An instance of HyperparameterGrid

+
+
+ required +
kwargs + +
+

Additional keyword arguments to pass to the parent VIPRS class.

+
+
+ {} +
+ +
+ Source code in viprs/model/gridsearch/VIPRSGrid.py +
34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
def __init__(self,
+             gdl,
+             grid,
+             **kwargs):
+    """
+    Initialize the `VIPRS` model with a grid of hyperparameters.
+
+    :param gdl: An instance of `GWADataLoader`
+    :param grid: An instance of `HyperparameterGrid`
+    :param kwargs: Additional keyword arguments to pass to the parent `VIPRS` class.
+    """
+
+    self.grid_table = grid.to_table()
+    self.n_models = len(self.grid_table)
+    assert self.n_models > 1
+
+    grid_params = {c: self.grid_table[c].values for c in self.grid_table.columns}
+
+    if 'fix_params' not in kwargs:
+        kwargs['fix_params'] = grid_params
+    else:
+        kwargs['fix_params'].update(grid_params)
+
+    # Make sure that the matrices are in Fortran order:
+    kwargs['order'] = 'F'
+
+    super().__init__(gdl, **kwargs)
+
+    self.shapes = {c: (shp, self.n_models)
+                   for c, shp in self.shapes.items()}
+    self.active_models = None
+    self.Nj = {c: Nj[:, None].astype(self.float_precision, order=self.order) for c, Nj in self.Nj.items()}
+    self.optim_results = [OptimizeResult() for _ in range(self.n_models)]
+
+
+
+ +
+ +
+ + +

+ e_step() + +

+ + +
+ +

Run the E-Step of the Variational EM algorithm. +Here, we update the variational parameters for each variant using coordinate +ascent optimization techniques. The coordinate ascent procedure is run on all the models +in the grid simultaneously. The update equations are outlined in +the Supplementary Material of the following paper:

+
+

Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference. +Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009. +Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.

+
+ +
+ Source code in viprs/model/gridsearch/VIPRSGrid.py +
def e_step(self):
+    """
+    Run the E-Step of the Variational EM algorithm.
+    Here, we update the variational parameters for each variant using coordinate
+    ascent optimization techniques. The coordinate ascent procedure is run on all the models
+    in the grid simultaneously. The update equations are outlined in
+    the Supplementary Material of the following paper:
+
+    > Zabad S, Gravel S, Li Y. Fast and accurate Bayesian polygenic risk modeling with variational inference.
+    Am J Hum Genet. 2023 May 4;110(5):741-761. doi: 10.1016/j.ajhg.2023.03.009.
+    Epub 2023 Apr 7. PMID: 37030289; PMCID: PMC10183379.
+    """
+
+    active_model_idx = np.where(self.active_models)[0].astype(np.int32)
+
+    for c, shapes in self.shapes.items():
+
+        # Get the priors:
+        tau_beta = self.get_tau_beta(c)
+        pi = self.get_pi(c)
+
+        # Updates for tau variational parameters:
+        # NOTE: Here, we compute the variational sigma in-place to avoid the need
+        # to change the order of the resulting matrix or its float precision:
+        np.add(self.Nj[c] / self.sigma_epsilon, tau_beta,
+               out=self.var_tau[c])
+        np.log(self.var_tau[c], out=self._log_var_tau[c])
+
+        # Compute some quantities that are needed for the per-SNP updates:
+        mu_mult = self.Nj[c] / (self.var_tau[c] * self.sigma_epsilon)
+        u_logs = np.log(pi) - np.log(1. - pi) + .5 * (np.log(tau_beta) - self._log_var_tau[c])
+
+        if self.use_cpp:
+            cpp_e_step_grid(self.ld_left_bound[c],
+                            self.ld_indptr[c],
+                            self.ld_data[c],
+                            self.std_beta[c],
+                            self.var_gamma[c],
+                            self.var_mu[c],
+                            self.eta[c],
+                            self.q[c],
+                            self.eta_diff[c],
+                            u_logs,
+                            0.5 * self.var_tau[c],
+                            mu_mult,
+                            self.dequantize_scale,
+                            active_model_idx,
+                            self.threads,
+                            self.use_blas,
+                            self.low_memory)
+        else:
+
+            e_step_grid(self.ld_left_bound[c],
+                        self.ld_indptr[c],
+                        self.ld_data[c],
+                        self.std_beta[c],
+                        self.var_gamma[c],
+                        self.var_mu[c],
+                        self.eta[c],
+                        self.q[c],
+                        self.eta_diff[c],
+                        u_logs,
+                        0.5 * self.var_tau[c],
+                        mu_mult,
+                        active_model_idx,
+                        self.threads,
+                        self.use_blas,
+                        self.low_memory)
+
+    self.zeta = self.compute_zeta()
+
+
+
+ +
+ +
+ + +

+ fit(max_iter=1000, theta_0=None, param_0=None, continued=False, min_iter=3, f_abs_tol=1e-06, x_abs_tol=1e-07, drop_r_tol=1e-06, patience=5) + +

+ + +
+ +

A convenience method to fit all the models in the grid using the Variational EM algorithm.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
max_iter + +
+

Maximum number of iterations.

+
+
+ 1000 +
theta_0 + +
+

A dictionary of values to initialize the hyperparameters

+
+
+ None +
param_0 + +
+

A dictionary of values to initialize the variational parameters

+
+
+ None +
continued + +
+

If true, continue the model fitting for more iterations.

+
+
+ False +
min_iter + +
+

The minimum number of iterations to run before checking for convergence.

+
+
+ 3 +
f_abs_tol + +
+

The absolute tolerance threshold for the objective (ELBO).

+
+
+ 1e-06 +
x_abs_tol + +
+

The absolute tolerance threshold for the variational parameters.

+
+
+ 1e-07 +
drop_r_tol + +
+

The relative tolerance for the drop in the ELBO to be considered as a red flag. It usually happens around convergence that the objective fluctuates due to numerical errors. This is a way to differentiate such random fluctuations from actual drops in the objective.

+
+
+ 1e-06 +
patience + +
+

The maximum number of times the objective is allowed to drop before termination.

+
+
+ 5 +
+ +
+ Source code in viprs/model/gridsearch/VIPRSGrid.py +
def fit(self,
+        max_iter=1000,
+        theta_0=None,
+        param_0=None,
+        continued=False,
+        min_iter=3,
+        f_abs_tol=1e-6,
+        x_abs_tol=1e-7,
+        drop_r_tol=1e-6,
+        patience=5):
+    """
+    A convenience method to fit all the models in the grid using the Variational EM algorithm.
+
+    :param max_iter: Maximum number of iterations. 
+    :param theta_0: A dictionary of values to initialize the hyperparameters
+    :param param_0: A dictionary of values to initialize the variational parameters
+    :param continued: If true, continue the model fitting for more iterations.
+    :param min_iter: The minimum number of iterations to run before checking for convergence.
+    :param f_abs_tol: The absolute tolerance threshold for the objective (ELBO).
+    :param x_abs_tol: The absolute tolerance threshold for the variational parameters.
+    :param drop_r_tol: The relative tolerance for the drop in the ELBO to be considered as a red flag. It usually
+    happens around convergence that the objective fluctuates due to numerical errors. This is a way to
+    differentiate such random fluctuations from actual drops in the objective.
+    :param patience: The maximum number of times the objective is allowed to drop before termination.
+    """
+
+    if not continued:
+        self.initialize(theta_0, param_0)
+        start_idx = 1
+    else:
+        start_idx = len(self.history['ELBO']) + 1
+        for i, optr in enumerate(self.optim_results):
+            self.active_models[i] = True
+            optr.update(self.history['ELBO'][i], increment=False)
+
+    patience = patience*np.ones(self.n_models)
+
+    logging.debug("> Performing model fit...")
+    if self.threads > 1:
+        logging.debug(f"> Using up to {self.threads} threads.")
+
+    # If the model is fit over a single chromosome, append this information to the
+    # tqdm progress bar:
+    if len(self.shapes) == 1:
+        chrom, shape = list(self.shapes.items())[0]
+        desc = f"Chromosome {chrom} ({shape[0]} variants)"
+    else:
+        desc = None
+
+    # Progress bar:
+    pbar = tqdm(range(start_idx, start_idx + max_iter),
+                disable=not self.verbose,
+                desc=desc)
+
+    for i in pbar:
+
+        if all([optr.stop_iteration for optr in self.optim_results]):
+
+            # If converged, update the progress bar before exiting:
+            pbar.set_postfix({
+                'Best ELBO': f"{self.history['ELBO'][-1][self.models_to_keep].max():.4f}",
+                'Models converged': f"{self.n_models - np.sum(self.active_models)}/{self.n_models}"
+            })
+            pbar.n = i - 1
+            pbar.total = i - 1
+            pbar.refresh()
+            pbar.close()
+
+            pbar.close()
+            break
+
+        self.update_theta_history()
+
+        self.e_step()
+        self.m_step()
+
+        self.history['ELBO'].append(self.elbo(sum_axis=0))
+        h2 = self.get_heritability()
+
+        if i > 1:
+
+            # Get the current and previous ELBO values
+            curr_elbo = self.history['ELBO'][-1]
+            prev_elbo = self.history['ELBO'][-2]
+
+            for m in np.where(self.active_models)[0]:
+
+                if (i > min_iter) and np.isclose(prev_elbo[m], curr_elbo[m], atol=f_abs_tol, rtol=0.):
+                    self.active_models[m] = False
+                    self.optim_results[m].update(curr_elbo[m],
+                                                 stop_iteration=True,
+                                                 success=True,
+                                                 message='Objective (ELBO) converged successfully.')
+                elif (i > min_iter) and max([np.max(np.abs(diff[:, m]))
+                                           for diff in self.eta_diff.values()]) < x_abs_tol:
+                    self.active_models[m] = False
+                    self.optim_results[m].update(curr_elbo[m],
+                                                 stop_iteration=True,
+                                                 success=True,
+                                                 message='Variational parameters converged successfully.')
+
+                # Check to see if the objective drops due to numerical instabilities:
+                elif curr_elbo[m] < prev_elbo[m] and not np.isclose(curr_elbo[m],
+                                                                    prev_elbo[m],
+                                                                    atol=0.,
+                                                                    rtol=drop_r_tol):
+                    patience[m] -= 1
+
+                    if patience[m] == 0:
+                        self.active_models[m] = False
+                        self.optim_results[m].update(curr_elbo[m],
+                                                     stop_iteration=True,
+                                                     success=False,
+                                                     message='Optimization is halted '
+                                                             'due to numerical instabilities.')
+                    else:
+                        self.optim_results[m].update(curr_elbo)
+
+                # Check if the model parameters behave in unexpected/pathological ways:
+                elif np.isnan(curr_elbo[m]):
+                    self.active_models[m] = False
+                    self.optim_results[m].update(curr_elbo[m],
+                                                 stop_iteration=True,
+                                                 success=False,
+                                                 message='The objective (ELBO) is NaN.')
+                elif self.sigma_epsilon[m] <= 0.:
+                    self.active_models[m] = False
+                    self.optim_results[m].update(curr_elbo[m],
+                                                 stop_iteration=True,
+                                                 success=False,
+                                                 message='Optimization is halted (sigma_epsilon <= 0).')
+                elif h2[m] >= 1.:
+                    self.active_models[m] = False
+                    self.optim_results[m].update(curr_elbo[m],
+                                                 stop_iteration=True,
+                                                 success=False,
+                                                 message='Optimization is halted (h2 >= 1).')
+                else:
+                    self.optim_results[m].update(curr_elbo)
+
+            # -----------------------------------------------------------------------
+
+        if self.models_to_keep.sum() > 0:
+            pbar.set_postfix({
+                'Best ELBO': f"{self.history['ELBO'][-1][self.models_to_keep].max():.4f}",
+                'Models converged': f"{self.n_models - np.sum(self.active_models)}/{self.n_models}"
+            })
+
+    # Update posterior moments:
+    self.update_posterior_moments()
+
+    # Inspect the optimization results:
+    for m, optr in enumerate(self.optim_results):
+        if not optr.stop_iteration:
+            self.active_models[m] = False
+            optr.update(self.history['ELBO'][-1][m],
+                        stop_iteration=True,
+                        success=False,
+                        message="Maximum iterations reached without convergence.\n"
+                                "You may need to run the model for more iterations.")
+
+    # Inform the user about potential issues:
+    if int(self.verbose) > 1:
+
+        if self.models_to_keep.sum() > 0:
+            logging.info(f"> Optimization is complete for all {self.n_models} models.")
+            logging.info(f"> {np.sum(self.models_to_keep)} model(s) converged successfully.")
+        else:
+            logging.error("> All models failed to converge. Please check the optimization results.")
+
+    self.validation_result = self.grid_table.copy()
+    self.validation_result['ELBO'] = [optr.fun for optr in self.optim_results]
+    self.validation_result['Converged'] = self.models_to_keep
+    self.validation_result['Optimization_message'] = [optr.message for optr in self.optim_results]
+
+    return self
+
+
+
+ +
+ +
+ + +

+ init_optim_meta() + +

+ + +
+ +

Initialize the various quantities/objects to keep track of the optimization process. + This method initializes the "history" object (which keeps track of the objective + other + hyperparameters requested by the user), in addition to the OptimizeResult objects.

+ +
+ Source code in viprs/model/gridsearch/VIPRSGrid.py +
def init_optim_meta(self):
+    """
+    Initialize the various quantities/objects to keep track of the optimization process.
+     This method initializes the "history" object (which keeps track of the objective + other
+     hyperparameters requested by the user), in addition to the OptimizeResult objects.
+    """
+    super().init_optim_meta()
+
+    # Reset the OptimizeResult objects:
+    for optr in self.optim_results:
+        optr.reset()
+
+
+
+ +
+ +
+ + +

+ initialize_theta(theta_0=None) + +

+ + +
+ +

Initialize the global hyperparameters of the model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
theta_0 + +
+

A dictionary of initial values for the hyperparameters theta

+
+
+ None +
+ +
+ Source code in viprs/model/gridsearch/VIPRSGrid.py +
def initialize_theta(self, theta_0=None):
+    """
+    Initialize the global hyperparameters of the model.
+    :param theta_0: A dictionary of initial values for the hyperparameters theta
+    """
+
+    self.active_models = np.array([True for _ in range(self.n_models)])
+
+    super().initialize_theta(theta_0=theta_0)
+
+    try:
+        if self.pi.shape != (self.n_models, ):
+            self.pi *= np.ones(shape=(self.n_models, ), dtype=self.float_precision)
+    except AttributeError:
+        self.pi *= np.ones(shape=(self.n_models,), dtype=self.float_precision)
+
+    try:
+        if self.tau_beta.shape != (self.n_models, ):
+            self.tau_beta *= np.ones(shape=(self.n_models, ), dtype=self.float_precision)
+    except AttributeError:
+        self.tau_beta *= np.ones(shape=(self.n_models,), dtype=self.float_precision)
+
+    try:
+        if self.sigma_epsilon.shape != (self.n_models, ):
+            self.sigma_epsilon *= np.ones(shape=(self.n_models, ), dtype=self.float_precision)
+    except AttributeError:
+        self.sigma_epsilon *= np.ones(shape=(self.n_models,), dtype=self.float_precision)
+
+    try:
+        if self._sigma_g.shape != (self.n_models, ):
+            self._sigma_g *= np.ones(shape=(self.n_models, ), dtype=self.float_precision)
+    except AttributeError:
+        self._sigma_g *= np.ones(shape=(self.n_models,), dtype=self.float_precision)
+
+
+
+ +
+ +
+ + +

+ to_theta_table() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

A pandas DataFrame containing information about the hyperparameters of the model.

+
+
+ +
+ Source code in viprs/model/gridsearch/VIPRSGrid.py +
def to_theta_table(self):
+    """
+    :return: A `pandas` DataFrame containing information about the hyperparameters of the model.
+    """
+
+    if self.n_models == 1:
+        return super(VIPRSGrid, self).to_theta_table()
+
+    sig_e = self.sigma_epsilon
+    h2 = self.get_heritability()
+    pi = self.get_proportion_causal()
+
+    if isinstance(self.tau_beta, dict):
+        taus = dict_mean(self.tau_beta, axis=0)
+    else:
+        taus = self.tau_beta
+
+    theta_table = []
+
+    for m in range(self.n_models):
+
+        theta_table += [
+            {'Model': m, 'Parameter': 'Residual_variance', 'Value': sig_e[m]},
+            {'Model': m, 'Parameter': 'Heritability', 'Value': h2[m]},
+            {'Model': m, 'Parameter': 'Proportion_causal', 'Value': pi[m]},
+            {'Model': m, 'Parameter': 'sigma_beta', 'Value': taus[m]}
+        ]
+
+    return pd.DataFrame(theta_table)
+
+
+
+ +
+ +
+ + +

+ to_validation_table() + +

+ + +
+ + + + +

Returns:

+ + + + + + + + + + + + + +
TypeDescription
+ +
+

The validation table summarizing the performance of each model.

+
+
+ + +

Raises:

+ + + + + + + + + + + + + +
TypeDescription
+ ValueError + +
+

if the validation result is not set.

+
+
+ +
+ Source code in viprs/model/gridsearch/VIPRSGrid.py +
def to_validation_table(self):
+    """
+    :return: The validation table summarizing the performance of each model.
+    :raises ValueError: if the validation result is not set.
+    """
+    if self.validation_result is None:
+        raise ValueError("Validation result is not set!")
+    elif len(self.validation_result) < 1:
+        raise ValueError("Validation result is not set!")
+
+    return pd.DataFrame(self.validation_result)
+
+
+
+ +
+ +
+ + +

+ write_validation_result(v_filename, sep='\t') + +

+ + +
+ +

After performing hyperparameter search, write a table +that records that value of the objective for each combination +of hyperparameters.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
v_filename + +
+

The filename for the validation table.

+
+
+ required +
sep + +
+

The separator for the validation table

+
+
+ '\t' +
+ +
+ Source code in viprs/model/gridsearch/VIPRSGrid.py +
def write_validation_result(self, v_filename, sep="\t"):
+    """
+    After performing hyperparameter search, write a table
+    that records that value of the objective for each combination
+    of hyperparameters.
+    :param v_filename: The filename for the validation table.
+    :param sep: The separator for the validation table
+    """
+
+    v_df = self.to_validation_table()
+    v_df.to_csv(v_filename, index=False, sep=sep)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/model/gridsearch/VIPRSGridSearch/index.html b/api/model/gridsearch/VIPRSGridSearch/index.html new file mode 100644 index 0000000..654a6a9 --- /dev/null +++ b/api/model/gridsearch/VIPRSGridSearch/index.html @@ -0,0 +1,1357 @@ + + + + + + + + + + + + + + + + + + + VIPRSGridSearch - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

VIPRSGridSearch

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ VIPRSGridSearch + + +

+ + +
+

+ Bases: VIPRSGrid

+ + +

The VIPRSGridSearch class is an extension of the VIPRSGrid class that +implements grid search for the VIPRS models. The grid search procedure +fits multiple models to the data, each with different hyperparameters, +and selects the best model based on user-defined criteria.

+

The criteria supported are:

+
    +
  • ELBO: The model with the highest ELBO is selected.
  • +
  • validation: The model with the highest R^2 on the validation set is selected.
  • +
  • pseudo_validation: The model with the highest pseudo-validation R^2 is selected.
  • +
+

Note that the validation and pseudo_validation criteria require the user to provide +validation data in the form of paired genotype/phenotype data or external GWAS summary +statistics.

+ +
+ Source code in viprs/model/gridsearch/VIPRSGridSearch.py +
  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
class VIPRSGridSearch(VIPRSGrid):
+    """
+    The `VIPRSGridSearch` class is an extension of the `VIPRSGrid` class that
+    implements grid search for the `VIPRS` models. The grid search procedure
+    fits multiple models to the data, each with different hyperparameters,
+    and selects the best model based on user-defined criteria.
+
+    The criteria supported are:
+
+    * `ELBO`: The model with the highest ELBO is selected.
+    * `validation`: The model with the highest R^2 on the validation set is selected.
+    * `pseudo_validation`: The model with the highest pseudo-validation R^2 is selected.
+
+    Note that the `validation` and `pseudo_validation` criteria require the user to provide
+    validation data in the form of paired genotype/phenotype data or external GWAS summary
+    statistics.
+
+    """
+
+    def __init__(self,
+                 gdl,
+                 grid,
+                 **kwargs):
+        """
+        Initialize the `VIPRSGridSearch` model.
+
+        :param gdl: An instance of `GWADataLoader`
+        :param grid: An instance of `HyperparameterGrid`
+        :param kwargs: Additional keyword arguments to pass to the parent `VIPRSGrid` class.
+        """
+
+        super().__init__(gdl, grid=grid, **kwargs)
+
+    def select_best_model(self, validation_gdl=None, criterion='ELBO'):
+        """
+        From the grid of models that were fit to the data, select the best 
+        model according to the specified `criterion`. If the criterion is the ELBO,
+        the model with the highest ELBO will be selected. If the criterion is
+        validation or pseudo-validation, the model with the highest R^2 on the
+        validation set will be selected.
+
+        :param validation_gdl: An instance of `GWADataLoader` containing data from the validation set.
+        Must be provided if criterion is `validation` or `pseudo_validation`.
+        :param criterion: The criterion for selecting the best model. 
+        Options are: (`ELBO`, `validation`, `pseudo_validation`)
+        """
+
+        assert criterion in ('ELBO', 'validation', 'pseudo_validation')
+
+        # Extract the models that converged successfully:
+        models_converged = self.models_to_keep
+
+        if criterion == 'ELBO':
+            elbo = self.history['ELBO'][-1]
+            elbo[~models_converged] = -np.inf
+            best_model_idx = np.argmax(self.history['ELBO'][-1])
+        elif criterion == 'validation':
+
+            assert validation_gdl is not None
+            assert validation_gdl.sample_table is not None
+            assert validation_gdl.sample_table.phenotype is not None
+
+            from viprs.eval.continuous_metrics import r2
+
+            prs = self.predict(test_gdl=validation_gdl)
+            prs_r2 = np.array([r2(prs[:, i], validation_gdl.sample_table.phenotype) for i in range(self.n_models)])
+            prs_r2[~models_converged] = -np.inf
+            self.validation_result['Validation_R2'] = prs_r2
+            best_model_idx = np.argmax(prs_r2)
+        elif criterion == 'pseudo_validation':
+
+            pseudo_corr = self.pseudo_validate(validation_gdl, metric='r2')
+            pseudo_corr[~models_converged] = -np.inf
+            self.validation_result['Pseudo_Validation_Corr'] = pseudo_corr
+            best_model_idx = np.argmax(np.nan_to_num(pseudo_corr, nan=-1., neginf=-1., posinf=-1.))
+
+        if int(self.verbose) > 1:
+            logging.info(f"> Based on the {criterion} criterion, selected model: {best_model_idx}")
+            logging.info("> Model details:\n")
+            logging.info(self.validation_result.iloc[best_model_idx, :])
+
+        # Update the variational parameters and their dependencies:
+        for param in (self.pip, self.post_mean_beta, self.post_var_beta,
+                      self.var_gamma, self.var_mu, self.var_tau,
+                      self.eta, self.zeta, self.q):
+
+            for c in param:
+                param[c] = param[c][:, best_model_idx]
+
+        # Update sigma epsilon:
+        self.sigma_epsilon = self.sigma_epsilon[best_model_idx]
+
+        # Update sigma beta:
+        if isinstance(self.tau_beta, dict):
+            for c in self.tau_beta:
+                self.tau_beta[c] = self.tau_beta[c][:, best_model_idx]
+        else:
+            self.tau_beta = self.tau_beta[best_model_idx]
+
+        # Update pi
+
+        if isinstance(self.pi, dict):
+            for c in self.pi:
+                self.pi[c] = self.pi[c][:, best_model_idx]
+        else:
+            self.pi = self.pi[best_model_idx]
+
+        # Set the number of models to 1:
+        self.n_models = 1
+
+        return self
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ __init__(gdl, grid, **kwargs) + +

+ + +
+ +

Initialize the VIPRSGridSearch model.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
gdl + +
+

An instance of GWADataLoader

+
+
+ required +
grid + +
+

An instance of HyperparameterGrid

+
+
+ required +
kwargs + +
+

Additional keyword arguments to pass to the parent VIPRSGrid class.

+
+
+ {} +
+ +
+ Source code in viprs/model/gridsearch/VIPRSGridSearch.py +
25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
def __init__(self,
+             gdl,
+             grid,
+             **kwargs):
+    """
+    Initialize the `VIPRSGridSearch` model.
+
+    :param gdl: An instance of `GWADataLoader`
+    :param grid: An instance of `HyperparameterGrid`
+    :param kwargs: Additional keyword arguments to pass to the parent `VIPRSGrid` class.
+    """
+
+    super().__init__(gdl, grid=grid, **kwargs)
+
+
+
+ +
+ +
+ + +

+ select_best_model(validation_gdl=None, criterion='ELBO') + +

+ + +
+ +

From the grid of models that were fit to the data, select the best +model according to the specified criterion. If the criterion is the ELBO, +the model with the highest ELBO will be selected. If the criterion is +validation or pseudo-validation, the model with the highest R^2 on the +validation set will be selected.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
validation_gdl + +
+

An instance of GWADataLoader containing data from the validation set. Must be provided if criterion is validation or pseudo_validation.

+
+
+ None +
criterion + +
+

The criterion for selecting the best model. Options are: (ELBO, validation, pseudo_validation)

+
+
+ 'ELBO' +
+ +
+ Source code in viprs/model/gridsearch/VIPRSGridSearch.py +
def select_best_model(self, validation_gdl=None, criterion='ELBO'):
+    """
+    From the grid of models that were fit to the data, select the best 
+    model according to the specified `criterion`. If the criterion is the ELBO,
+    the model with the highest ELBO will be selected. If the criterion is
+    validation or pseudo-validation, the model with the highest R^2 on the
+    validation set will be selected.
+
+    :param validation_gdl: An instance of `GWADataLoader` containing data from the validation set.
+    Must be provided if criterion is `validation` or `pseudo_validation`.
+    :param criterion: The criterion for selecting the best model. 
+    Options are: (`ELBO`, `validation`, `pseudo_validation`)
+    """
+
+    assert criterion in ('ELBO', 'validation', 'pseudo_validation')
+
+    # Extract the models that converged successfully:
+    models_converged = self.models_to_keep
+
+    if criterion == 'ELBO':
+        elbo = self.history['ELBO'][-1]
+        elbo[~models_converged] = -np.inf
+        best_model_idx = np.argmax(self.history['ELBO'][-1])
+    elif criterion == 'validation':
+
+        assert validation_gdl is not None
+        assert validation_gdl.sample_table is not None
+        assert validation_gdl.sample_table.phenotype is not None
+
+        from viprs.eval.continuous_metrics import r2
+
+        prs = self.predict(test_gdl=validation_gdl)
+        prs_r2 = np.array([r2(prs[:, i], validation_gdl.sample_table.phenotype) for i in range(self.n_models)])
+        prs_r2[~models_converged] = -np.inf
+        self.validation_result['Validation_R2'] = prs_r2
+        best_model_idx = np.argmax(prs_r2)
+    elif criterion == 'pseudo_validation':
+
+        pseudo_corr = self.pseudo_validate(validation_gdl, metric='r2')
+        pseudo_corr[~models_converged] = -np.inf
+        self.validation_result['Pseudo_Validation_Corr'] = pseudo_corr
+        best_model_idx = np.argmax(np.nan_to_num(pseudo_corr, nan=-1., neginf=-1., posinf=-1.))
+
+    if int(self.verbose) > 1:
+        logging.info(f"> Based on the {criterion} criterion, selected model: {best_model_idx}")
+        logging.info("> Model details:\n")
+        logging.info(self.validation_result.iloc[best_model_idx, :])
+
+    # Update the variational parameters and their dependencies:
+    for param in (self.pip, self.post_mean_beta, self.post_var_beta,
+                  self.var_gamma, self.var_mu, self.var_tau,
+                  self.eta, self.zeta, self.q):
+
+        for c in param:
+            param[c] = param[c][:, best_model_idx]
+
+    # Update sigma epsilon:
+    self.sigma_epsilon = self.sigma_epsilon[best_model_idx]
+
+    # Update sigma beta:
+    if isinstance(self.tau_beta, dict):
+        for c in self.tau_beta:
+            self.tau_beta[c] = self.tau_beta[c][:, best_model_idx]
+    else:
+        self.tau_beta = self.tau_beta[best_model_idx]
+
+    # Update pi
+
+    if isinstance(self.pi, dict):
+        for c in self.pi:
+            self.pi[c] = self.pi[c][:, best_model_idx]
+    else:
+        self.pi = self.pi[best_model_idx]
+
+    # Set the number of models to 1:
+    self.n_models = 1
+
+    return self
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/overview/index.html b/api/overview/index.html new file mode 100644 index 0000000..2467b11 --- /dev/null +++ b/api/overview/index.html @@ -0,0 +1,866 @@ + + + + + + + + + + + + + + + + + + + + + API Reference - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

API Reference

+ +

Models

+
    +
  • BayesPRSModel: A base class for all Bayesian PRS models.
  • +
  • VIPRS: Implementation of VIPRS with the "spike-and-slab" prior.
      +
    • Implementation of VIPRS with other priors:
        +
      • VIPRSMix: VIPRS with a sparse Gaussian mixture prior.
      • +
      +
    • +
    +
  • +
  • Hyperparameter Tuning: Models/modules for performing hyperparameter search with VIPRS models. +
  • +
  • Baseline Models:
      +
    • LDPredInf: Implementation of the LDPred-inf model.
    • +
    +
  • +
+

Model Evaluation

+ +

Utilities

+
    +
  • Data utilities: Utilities for downloading and processing relevant data.
  • +
  • Compute utilities: Utilities for computing various statistics / quantities over python data structures.
  • +
  • Exceptions: Custom exceptions used in the package.
  • +
  • OptimizeResult: A class to store the result of an optimization routine.
  • +
+

Plotting

+
    +
  • Diagnostic plots: Functions for plotting various quantities / results from VIPRS or other PRS models.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/plot/diagnostics/index.html b/api/plot/diagnostics/index.html new file mode 100644 index 0000000..9b9ad76 --- /dev/null +++ b/api/plot/diagnostics/index.html @@ -0,0 +1,876 @@ + + + + + + + + + + + + + + + + + + + Diagnostics - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Diagnostics

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ plot_history(prs_model, quantity=None) + +

+ + +
+ +

This function plots the optimization history for various model parameters and/or objectives. For +every iteration step, we generally save quantities such as the ELBO, the heritability, etc. For the purposes +of debugging and checking model convergence, it is useful to visually observe the trajectory +of these quantities as a function of training iteration.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
prs_model + +
+

A VIPRS (or its derived classes) object.

+
+
+ required +
quantity + +
+

The quantities to plot (e.g. ELBO, heritability, etc.).

+
+
+ None +
+ +
+ Source code in viprs/plot/diagnostics.py +
 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
def plot_history(prs_model, quantity=None):
+    """
+    This function plots the optimization history for various model parameters and/or objectives. For
+    every iteration step, we generally save quantities such as the ELBO, the heritability, etc. For the purposes
+    of debugging and checking model convergence, it is useful to visually observe the trajectory
+    of these quantities as a function of training iteration.
+
+    :param prs_model: A `VIPRS` (or its derived classes) object.
+    :param quantity: The quantities to plot (e.g. `ELBO`, `heritability`, etc.).
+    """
+
+    if quantity is None:
+        quantity = prs_model.history.keys()
+    elif isinstance(quantity, str):
+        quantity = [quantity]
+
+    q_dfs = []
+
+    for attr in quantity:
+
+        df = pd.DataFrame({'Value': prs_model.history[attr]})
+        df.reset_index(inplace=True)
+        df.columns = ['Step', 'Value']
+        df['Quantity'] = attr
+
+        q_dfs.append(df)
+
+    q_dfs = pd.concat(q_dfs)
+
+    g = sns.relplot(
+        data=q_dfs, x="Step", y="Value",
+        row="Quantity",
+        facet_kws={'sharey': False, 'sharex': True},
+        kind="scatter",
+        marker="."
+    )
+
+    return g
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/utils/OptimizeResult/index.html b/api/utils/OptimizeResult/index.html new file mode 100644 index 0000000..9587073 --- /dev/null +++ b/api/utils/OptimizeResult/index.html @@ -0,0 +1,1154 @@ + + + + + + + + + + + + + + + + + + + OptimizeResult - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

OptimizeResult

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ OptimizeResult + + +

+ + +
+

+ Bases: object

+ + +

A class to store the results/progress of an optimization algorithm. +Similar to the OptimizeResult class from scipy.optimize, +but with a few additional fields.

+ +
+ Source code in viprs/utils/OptimizeResult.py +
 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
class OptimizeResult(object):
+    """
+    A class to store the results/progress of an optimization algorithm.
+    Similar to the `OptimizeResult` class from `scipy.optimize`,
+    but with a few additional fields.
+    """
+
+    def __init__(self):
+
+        self.message = None
+        self.stop_iteration = None
+        self.success = None
+        self.fun = None
+        self.nit = None
+
+    @property
+    def iterations(self):
+        """
+        Return the number of iterations at its current value.
+        """
+        return self.nit
+
+    @property
+    def objective(self):
+        """
+        Return the objective function value at its current value.
+        """
+        return self.fun
+
+    def reset(self):
+        """
+        Reset the stored values to their initial state.
+        """
+
+        self.message = None
+        self.stop_iteration = False
+        self.success = False
+        self.fun = None
+        self.nit = 0
+
+    def update(self, fun, stop_iteration=False, success=False, message=None, increment=True):
+        """
+        Update the stored values with new values.
+        :param fun: The new objective function value
+        :param stop_iteration: A flag to indicate whether the optimization algorithm has stopped iterating
+        :param success: A flag to indicate whether the optimization algorithm has succeeded
+        :param message: A detailed message about the optimization result.
+        :param increment: A flag to indicate whether to increment the number of iterations.
+        """
+
+        self.fun = fun
+        self.stop_iteration = stop_iteration
+        self.success = success
+        self.message = message
+
+        self.nit += int(increment)
+
+    def __str__(self):
+        return str(self.__dict__)
+
+
+ + + +
+ + + + + + + +
+ + + +

+ iterations + + + property + + +

+ + +
+ +

Return the number of iterations at its current value.

+
+ +
+ +
+ + + +

+ objective + + + property + + +

+ + +
+ +

Return the objective function value at its current value.

+
+ +
+ + + +
+ + +

+ reset() + +

+ + +
+ +

Reset the stored values to their initial state.

+ +
+ Source code in viprs/utils/OptimizeResult.py +
32
+33
+34
+35
+36
+37
+38
+39
+40
+41
def reset(self):
+    """
+    Reset the stored values to their initial state.
+    """
+
+    self.message = None
+    self.stop_iteration = False
+    self.success = False
+    self.fun = None
+    self.nit = 0
+
+
+
+ +
+ +
+ + +

+ update(fun, stop_iteration=False, success=False, message=None, increment=True) + +

+ + +
+ +

Update the stored values with new values.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
fun + +
+

The new objective function value

+
+
+ required +
stop_iteration + +
+

A flag to indicate whether the optimization algorithm has stopped iterating

+
+
+ False +
success + +
+

A flag to indicate whether the optimization algorithm has succeeded

+
+
+ False +
message + +
+

A detailed message about the optimization result.

+
+
+ None +
increment + +
+

A flag to indicate whether to increment the number of iterations.

+
+
+ True +
+ +
+ Source code in viprs/utils/OptimizeResult.py +
43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
def update(self, fun, stop_iteration=False, success=False, message=None, increment=True):
+    """
+    Update the stored values with new values.
+    :param fun: The new objective function value
+    :param stop_iteration: A flag to indicate whether the optimization algorithm has stopped iterating
+    :param success: A flag to indicate whether the optimization algorithm has succeeded
+    :param message: A detailed message about the optimization result.
+    :param increment: A flag to indicate whether to increment the number of iterations.
+    """
+
+    self.fun = fun
+    self.stop_iteration = stop_iteration
+    self.success = success
+    self.message = message
+
+    self.nit += int(increment)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/utils/compute_utils/index.html b/api/utils/compute_utils/index.html new file mode 100644 index 0000000..02a3eb3 --- /dev/null +++ b/api/utils/compute_utils/index.html @@ -0,0 +1,1675 @@ + + + + + + + + + + + + + + + + + + + Compute utils - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compute utils

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ dict_concat(d, axis=0) + +

+ + +
+ +

Concatenate the values of a dictionary into a single vector

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
d + +
+

A dictionary where values are numeric scalars or vectors

+
+
+ required +
axis + +
+

Concatenate along given axis.

+
+
+ 0 +
+ +
+ Source code in viprs/utils/compute_utils.py +
21
+22
+23
+24
+25
+26
+27
+28
+29
+30
def dict_concat(d, axis=0):
+    """
+    Concatenate the values of a dictionary into a single vector
+    :param d: A dictionary where values are numeric scalars or vectors
+    :param axis: Concatenate along given axis.
+    """
+    if len(d) == 1:
+        return d[next(iter(d))]
+    else:
+        return np.concatenate([d[c] for c in sorted(d.keys())], axis=axis)
+
+
+
+ +
+ +
+ + +

+ dict_dot(d1, d2) + +

+ + +
+ +

Perform dot product on the elements of d1 and d2

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
d1 + +
+

A dictionary where values are numeric scalars or vectors

+
+
+ required +
d2 + +
+

A dictionary where values are numeric scalars or vectors

+
+
+ required +
+ +
+ Source code in viprs/utils/compute_utils.py +
74
+75
+76
+77
+78
+79
+80
def dict_dot(d1, d2):
+    """
+    Perform dot product on the elements of d1 and d2
+    :param d1: A dictionary where values are numeric scalars or vectors
+    :param d2: A dictionary where values are numeric scalars or vectors
+    """
+    return np.sum([np.dot(d1[c], d2[c]) for c in d1.keys()])
+
+
+
+ +
+ +
+ + +

+ dict_elementwise_dot(d1, d2) + +

+ + +
+ +

Apply element-wise product between the values of two dictionaries

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
d1 + +
+

A dictionary where values are numeric scalars or vectors

+
+
+ required +
d2 + +
+

A dictionary where values are numeric scalars or vectors

+
+
+ required +
+ +
+ Source code in viprs/utils/compute_utils.py +
64
+65
+66
+67
+68
+69
+70
+71
def dict_elementwise_dot(d1, d2):
+    """
+    Apply element-wise product between the values of two dictionaries
+
+    :param d1: A dictionary where values are numeric scalars or vectors
+    :param d2: A dictionary where values are numeric scalars or vectors
+    """
+    return {c: d1[c]*d2[c] for c, v in d1.items()}
+
+
+
+ +
+ +
+ + +

+ dict_elementwise_transform(d, transform) + +

+ + +
+ +

Apply a transformation to values of a dictionary

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
d + +
+

A dictionary where values are numeric scalars or vectors

+
+
+ required +
transform + +
+

A function to apply to

+
+
+ required +
+ +
+ Source code in viprs/utils/compute_utils.py +
55
+56
+57
+58
+59
+60
+61
def dict_elementwise_transform(d, transform):
+    """
+    Apply a transformation to values of a dictionary
+    :param d: A dictionary where values are numeric scalars or vectors
+    :param transform: A function to apply to
+    """
+    return {c: np.vectorize(transform)(v) for c, v in d.items()}
+
+
+
+ +
+ +
+ + +

+ dict_mean(d, axis=None) + +

+ + +
+ +

Estimate the mean of the values of a dictionary

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
d + +
+

A dictionary where values are numeric scalars or vectors

+
+
+ required +
axis + +
+

Perform aggregation along given axis.

+
+
+ None +
+ +
+ Source code in viprs/utils/compute_utils.py +
33
+34
+35
+36
+37
+38
+39
def dict_mean(d, axis=None):
+    """
+    Estimate the mean of the values of a dictionary
+    :param d: A dictionary where values are numeric scalars or vectors
+    :param axis: Perform aggregation along given axis.
+    """
+    return np.mean(np.array([np.mean(v, axis=axis) for v in d.values()]), axis=axis)
+
+
+
+ +
+ +
+ + +

+ dict_repeat(value, shapes) + +

+ + +
+ +

Given a value, create a dictionary where the value is repeated +according to the shapes parameter

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
shapes + +
+

A dictionary of shapes. Key is arbitrary, value is integer input to np.repeat

+
+
+ required +
value + +
+

The value to repeat

+
+
+ required +
+ +
+ Source code in viprs/utils/compute_utils.py +
def dict_repeat(value, shapes):
+    """
+    Given a value, create a dictionary where the value is repeated
+    according to the shapes parameter
+    :param shapes: A dictionary of shapes. Key is arbitrary, value is integer input to np.repeat
+    :param value:  The value to repeat
+    """
+    return {c: value*np.ones(shp) for c, shp in shapes.items()}
+
+
+
+ +
+ +
+ + +

+ dict_set(d, value) + +

+ + +
+ + + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
d + +
+

A dictionary where values are numeric vectors

+
+
+ required +
value + +
+

A value to set for all vectors

+
+
+ required +
+ +
+ Source code in viprs/utils/compute_utils.py +
83
+84
+85
+86
+87
+88
+89
+90
+91
def dict_set(d, value):
+    """
+    :param d: A dictionary where values are numeric vectors
+    :param value: A value to set for all vectors
+    """
+    for c in d:
+        d[c][:] = value
+
+    return d
+
+
+
+ +
+ +
+ + +

+ dict_sum(d, axis=None, transform=None) + +

+ + +
+ +

Estimate the sum of the values of a dictionary

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
d + +
+

A dictionary where values are numeric scalars or vectors

+
+
+ required +
axis + +
+

Perform aggregation along given axis.

+
+
+ None +
transform + +
+

Transformation to apply before summing.

+
+
+ None +
+ +
+ Source code in viprs/utils/compute_utils.py +
42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
def dict_sum(d, axis=None, transform=None):
+    """
+    Estimate the sum of the values of a dictionary
+    :param d: A dictionary where values are numeric scalars or vectors
+    :param axis: Perform aggregation along given axis.
+    :param transform: Transformation to apply before summing.
+    """
+    if transform is None:
+        return np.sum(np.array([np.sum(v, axis=axis) for v in d.values()]), axis=axis)
+    else:
+        return np.sum(np.array([np.sum(transform(v), axis=axis) for v in d.values()]), axis=axis)
+
+
+
+ +
+ +
+ + +

+ expand_column_names(c_name, shape, sep='_') + +

+ + +
+ +

Given a desired column name c_name and a matrix shape +that we'd like to apply the column name to, return a list of +column names for every column in the matrix. The column names will be +in the form of c_name followed by an index, separated by sep.

+

For example, if the column name is BETA, the +shape is (100, 3) and the separator is _, we return a list with: +[BETA_0, BETA_1, BETA_2]

+

If the matrix in question is a vector, we just return the column name +without any indices appended to it.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
c_name + +
+

A string object

+
+
+ required +
shape + +
+

The shape of a numpy matrix or vector

+
+
+ required +
sep + +
+

The separator

+
+
+ '_' +
+ +
+ Source code in viprs/utils/compute_utils.py +
def expand_column_names(c_name, shape, sep='_'):
+    """
+    Given a desired column name `c_name` and a matrix `shape`
+    that we'd like to apply the column name to, return a list of
+    column names for every column in the matrix. The column names will be
+    in the form of `c_name` followed by an index, separated by `sep`.
+
+    For example, if the column name is `BETA`, the
+    shape is (100, 3) and the separator is `_`, we return a list with:
+    [`BETA_0`, `BETA_1`, `BETA_2`]
+
+    If the matrix in question is a vector, we just return the column name
+    without any indices appended to it.
+
+    :param c_name: A string object
+    :param shape: The shape of a numpy matrix or vector
+    :param sep: The separator
+    """
+
+    if len(shape) < 2:
+        return c_name
+    elif shape[1] == 1:
+        return c_name
+    else:
+        return [c_name + f'{sep}{i}' for i in range(shape[1])]
+
+
+
+ +
+ +
+ + +

+ fits_in_memory(alloc_size, max_prop=0.9) + +

+ + +
+ +

Check whether there's enough memory resources to load an object +with the given allocation size (in MB).

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
alloc_size + +
+

The allocation size

+
+
+ required +
max_prop + +
+

The maximum proportion of available memory allowed for the object

+
+
+ 0.9 +
+ +
+ Source code in viprs/utils/compute_utils.py +
 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
def fits_in_memory(alloc_size, max_prop=.9):
+    """
+    Check whether there's enough memory resources to load an object
+    with the given allocation size (in MB).
+    :param alloc_size: The allocation size
+    :param max_prop: The maximum proportion of available memory allowed for the object
+    """
+
+    avail_mem = psutil.virtual_memory().available / (1024.0 ** 2)
+
+    if alloc_size / avail_mem > max_prop:
+        return False
+    else:
+        return True
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/utils/data_utils/index.html b/api/utils/data_utils/index.html new file mode 100644 index 0000000..95a609d --- /dev/null +++ b/api/utils/data_utils/index.html @@ -0,0 +1,865 @@ + + + + + + + + + + + + + + + + + + + Data utils - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data utils

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ download_ukb_wb_ld_matrix(target_dir='.', chromosome=None) + +

+ + +
+ +

Download the LD matrix for the White British samples in the UK Biobank.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
target_dir + +
+

The path or directory where to store the LD matrix

+
+
+ '.' +
chromosome + +
+

An integer or list of integers with the chromosome numbers for which to download the LD matrices from Zenodo.

+
+
+ None +
+ +
+ Source code in viprs/utils/data_utils.py +
 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
def download_ukb_wb_ld_matrix(target_dir='.', chromosome=None):
+    """
+    Download the LD matrix for the White British samples in the UK Biobank.
+    :param target_dir: The path or directory where to store the LD matrix
+    :param chromosome: An integer or list of integers with the chromosome numbers for which to download
+    the LD matrices from Zenodo.
+    """
+
+    import urllib.request
+    from magenpy.utils.system_utils import makedir
+    from magenpy.utils.compute_utils import iterable
+    import os
+    from tqdm import tqdm
+
+    if chromosome is None:
+        chromosome = list(range(1, 23))
+    elif not iterable(chromosome):
+        chromosome = [chromosome]
+
+    if len(chromosome) < 2:
+        print("> Download LD matrix for chromosome", chromosome[0])
+
+    for c in tqdm(chromosome, total=len(chromosome), disable=len(chromosome) < 2,
+                  desc='Downloading LD matrices'):
+
+        target_path = os.path.join(target_dir, f"chr_{c}.tar.gz")
+
+        try:
+            makedir(os.path.dirname(target_path))
+            urllib.request.urlretrieve(f"https://zenodo.org/record/7036625/files/chr_{c}.tar.gz?download=1",
+                                       target_path)
+            os.system(f"tar -xvzf {target_path}")
+        except Exception as e:
+            print(e)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/api/utils/exceptions/index.html b/api/utils/exceptions/index.html new file mode 100644 index 0000000..52d2d18 --- /dev/null +++ b/api/utils/exceptions/index.html @@ -0,0 +1,772 @@ + + + + + + + + + + + + + + + + + + + Exceptions - Variational Inference of Polygenic Risk Scores (VIPRS) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Exceptions

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ OptimizationDivergence + + +

+ + +
+

+ Bases: Exception

+ + +

Exception raised when the optimization algorithm diverges.

+ +
+ Source code in viprs/utils/exceptions.py +
2
+3
+4
+5
+6
class OptimizationDivergence(Exception):
+    """
+    Exception raised when the optimization algorithm diverges.
+    """
+    pass
+
+
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/assets/_mkdocstrings.css b/assets/_mkdocstrings.css new file mode 100644 index 0000000..85449ec --- /dev/null +++ b/assets/_mkdocstrings.css @@ -0,0 +1,119 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Backward-compatibility: docstring section titles in bold. */ +.doc-section-title { + font-weight: bold; +} + +/* Symbols in Navigation and ToC. */ +:root, +[data-md-color-scheme="default"] { + --doc-symbol-attribute-fg-color: #953800; + --doc-symbol-function-fg-color: #8250df; + --doc-symbol-method-fg-color: #8250df; + --doc-symbol-class-fg-color: #0550ae; + --doc-symbol-module-fg-color: #5cad0f; + + --doc-symbol-attribute-bg-color: #9538001a; + --doc-symbol-function-bg-color: #8250df1a; + --doc-symbol-method-bg-color: #8250df1a; + --doc-symbol-class-bg-color: #0550ae1a; + --doc-symbol-module-bg-color: #5cad0f1a; +} + +[data-md-color-scheme="slate"] { + --doc-symbol-attribute-fg-color: #ffa657; + --doc-symbol-function-fg-color: #d2a8ff; + --doc-symbol-method-fg-color: #d2a8ff; + --doc-symbol-class-fg-color: #79c0ff; + --doc-symbol-module-fg-color: #baff79; + + --doc-symbol-attribute-bg-color: #ffa6571a; + --doc-symbol-function-bg-color: #d2a8ff1a; + --doc-symbol-method-bg-color: #d2a8ff1a; + --doc-symbol-class-bg-color: #79c0ff1a; + --doc-symbol-module-bg-color: #baff791a; +} + +code.doc-symbol { + border-radius: .1rem; + font-size: .85em; + padding: 0 .3em; + font-weight: bold; +} + +code.doc-symbol-attribute { + color: var(--doc-symbol-attribute-fg-color); + background-color: var(--doc-symbol-attribute-bg-color); +} + +code.doc-symbol-attribute::after { + content: "attr"; +} + +code.doc-symbol-function { + color: var(--doc-symbol-function-fg-color); + background-color: var(--doc-symbol-function-bg-color); +} + +code.doc-symbol-function::after { + content: "func"; +} + +code.doc-symbol-method { + color: var(--doc-symbol-method-fg-color); + background-color: var(--doc-symbol-method-bg-color); +} + +code.doc-symbol-method::after { + content: "meth"; +} + +code.doc-symbol-class { + color: var(--doc-symbol-class-fg-color); + background-color: var(--doc-symbol-class-bg-color); +} + +code.doc-symbol-class::after { + content: "class"; +} + +code.doc-symbol-module { + color: var(--doc-symbol-module-fg-color); + background-color: var(--doc-symbol-module-bg-color); +} + +code.doc-symbol-module::after { + content: "mod"; +} + +.doc-signature .autorefs { + color: inherit; + border-bottom: 1px dotted currentcolor; +} diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/assets/javascripts/bundle.ad660dcc.min.js b/assets/javascripts/bundle.ad660dcc.min.js new file mode 100644 index 0000000..0ffc046 --- /dev/null +++ b/assets/javascripts/bundle.ad660dcc.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var O=f()(_);return u("cut"),O},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",O=document.createElement("textarea");O.style.fontSize="12pt",O.style.border="0",O.style.padding="0",O.style.margin="0",O.style.position="absolute",O.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return O.style.top="".concat(j,"px"),O.setAttribute("readonly",""),O.value=V,O}var te=function(_,O){var j=A(_);O.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var O=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,O):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,O):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(O){return typeof O}:H=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},O=_.action,j=O===void 0?"copy":O,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(O){return typeof O}:Ie=function(O){return O&&typeof Symbol=="function"&&O.constructor===Symbol&&O!==Symbol.prototype?"symbol":typeof O},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var O=0;O<_.length;O++){var j=_[O];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,O){return _&&ro(V.prototype,_),O&&ro(V,O),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(O){return O.__proto__||Object.getPrototypeOf(O)},Wt(V)}function vr(V,_){var O="data-clipboard-".concat(V);if(_.hasAttribute(O))return _.getAttribute(O)}var Ri=function(V){Ci(O,V);var _=Hi(O);function O(j,D){var Y;return _i(this,O),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(O,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),O}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var M=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return M}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),B(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),B(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?M:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(G("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),B(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():M))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),B(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),B(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),B(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),B(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function G(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!G("announce.dismiss")||!e.childElementCount)return M;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);G("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:M),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?M:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):M})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||G("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),G("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||G("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:M)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return G("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),B(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return G("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),Na(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>G("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return M;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!G("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),B(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>G("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?M:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),B(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return M;let r=e.target.closest("a");if(r===null)return M;if(r.target||e.metaKey||e.ctrlKey)return M;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):M}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...G("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),M}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return M;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),M)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:G("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>M)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?M:(i.preventDefault(),I(p))}}return M}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),B(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),B(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?M:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>M),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>M),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>M),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return M}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return M}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>M),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),B(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(G("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),G("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return G("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title")))})).subscribe(),e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),B(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;G("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),B(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>G("search.highlight")?mi(e,{index$:Mi,location$:jt}):M),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),B(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.ad660dcc.min.js.map + diff --git a/assets/javascripts/bundle.ad660dcc.min.js.map b/assets/javascripts/bundle.ad660dcc.min.js.map new file mode 100644 index 0000000..6d61170 --- /dev/null +++ b/assets/javascripts/bundle.ad660dcc.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an