diff --git a/.github/workflows/build-tests.yml b/.github/workflows/build-tests.yml index b88d719..a5583e1 100644 --- a/.github/workflows/build-tests.yml +++ b/.github/workflows/build-tests.yml @@ -1,7 +1,7 @@ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: Python package +name: Build Status on: push: diff --git a/BayesABTest/ab_test_dist_explorer.py b/BayesABTest/ab_test_dist_explorer.py index 584e918..a4dae72 100644 --- a/BayesABTest/ab_test_dist_explorer.py +++ b/BayesABTest/ab_test_dist_explorer.py @@ -35,7 +35,6 @@ def beta_dist(alpha, beta): for i in range(len(locs)): labels.append('{:.0%}'.format(locs[i])) plt.xticks(locs, labels=labels) - plt.show() def gamma_dist(mean=None, var=None, alpha=None, beta=None): @@ -81,7 +80,6 @@ def gamma_dist(mean=None, var=None, alpha=None, beta=None): plt.title('Gamma({0}, {1}) Distribution PDF (mean and variance)' .format(mean, var), fontweight='bold', fontsize=14) - plt.show() def lognormal_dist(mean, var): @@ -109,7 +107,6 @@ def lognormal_dist(mean, var): plt.title('Log-Normal({0:.2}, {1:.2}) Distribution PDF (mean and variance)' .format(mean, var), fontweight='bold', fontsize=14) - plt.show() def poisson_dist(lam): @@ -131,4 +128,3 @@ def poisson_dist(lam): plt.yticks([], []) plt.title('Poisson({}) Distribution PMF'.format(lam), fontweight='bold', fontsize=14) - plt.show() diff --git a/README.md b/README.md index 5b9cbaf..4d9a65c 100644 --- a/README.md +++ b/README.md @@ -1,133 +1,39 @@ -![Build Status](https://github.com/bakermoran/BayesABTest/workflows/Python%20package/badge.svg?branch=master) - # Bayesian AB Test Report Builder -> **Baker Moran** - -This is a package to allow AB testers to create a standard report for test results. Can handle conversion and continuous data. Can handle multiple variants. - -Conversion data uses the beta distribution as the conjugate prior, and continuous data uses the log-normal. - -## Instantiation variables - -* `raw_data` - a pandas dataframe with (at least) two columns, one for the bucket and one for the response variable -* `metric` - column name in `raw_data` for the response variable -* `bucket_col_name` - column name in `raw_data` for the bucket (defaults to `bucket`) -* `control_bucket_name` - value in `bucket_col_name` for the control bucket (defaults to `off`) -* `samples` - number of samples to run the monte carlo simulation, must be 1,000 and 50,000 (defaults to 10,000) -* `prior_func` - the type of distribution to use for the prior. options include: - * `beta` - use for conversion rates. bounded on the interval [0,1] - * `log-normal` - use for continuous, greater than zero response variables (0, +inf) (ex: premium, minutes spent on scene, etc.) - * `normal` - use for continuous response variables that on the interval (-inf, +inf) - * `poisson` - use for discrete, greater than zero response variables (ex: arrivals per day, events per account, etc.) -* `prior_info` - the prior belief to about the response varable. options include: - * `uninformed` - no prior belief about the response, all outcomes are equaly likely. No input required to use - * `informed` - uses and empirically informed prior (informed by the control data), and weakens this prior by `prior_scale_factor` - * `specified` - allows for a user to input prior parameters -* `prior_parameters` - `prior_info` must be set to `specified`. This must be a dictionary with the following key value pairs: - * `prior_func` = `beta` - keys are `alpha` and `beta` - * `prior_func` = `log-normal` - keys are `mean` and `var` - * `prior_func` = `normal` - keys are `mean` and `var` - * `prior_func` = `poisson` - keys are `alpha` and `beta` OR `mean` and `var` -* `confidence_level` - value for the confidence interval on the CDF chart (defaults to `0.05`) -* `compare_variants` - boolean for comparing the variants to each other. Control to each variant is always done (unless there are too many variants to plot). If there are few enough variants, the comparisons for variants will be plotted. (defaults to `False`) -* `debug` - boolean to print out extra output for debugging purposes (defaults to `False`) -* `prior_scale_factor` - factor to scale an empirically informed prior by (defaults to `4`) - -## Public class methods and variables - -### Methods - -* `fit()` - * *requires* - class was instantiated with valid inputs - * *modifies* - `ecdf`, `control_sample`, `variant_sample`, `lift` - * *effects* - creates and runs the monte carlo simulation, sets member variables to reflect model outputs -* `plot(lift_plot_flag=True)` - * `lift_plot_flag` - boolean for plotting lift PDF and CDF (defaults to `True`) - * *requires* - `fit()` has been run - * *modifies* - none - * *effects* - creates a 3 chart report of the AB test. (must show with `matplotlib.pylot.show()`) -* `plot_posteriors(variants=[]):` - * *requires* - variants is a list of variant names in `bucket_col_name`. One variant may not be repeated multiple times. - * *modifies* - none - * *effects* - creates a posterior plot of these variants (must show with `matplotlib.pylot.show()`) -* `plot_positive_lift(variant_one, variant_two)` - * *requires* - variant_one and variant_two are variant names in `bucket_col_name`. One variant may not be repeated multiple times. - * *modifies* - none - * *effects* - creates a positive lift plot between these variants (must show with `matplotlib.pylot.show()`) -* `plot_ecdf(variant_one, variant_two)` - * *requires* - variant_one and variant_two are variant names in `bucket_col_name`. One variant may not be repeated multiple times. - * *modifies* - none - * *effects* - creates an empirical cumulative distribution plot of these variants lift (must show with `matplotlib.pylot.show()`) - -### Variables - -* all variables in `Instantiation variables` -* `ecdf` - a 3 level dicitonary, where key1 is a the variant name of the numerator in the lift calculation, and key2 is the variant name of the denominator in the lift calculation. The third keys are `x` and `y`, and the value is a list containing the x and y coordinates for the empirical CDF (only meaningful after `fit()` is run) -* `lift` - a 2 level dicitonary, where key1 is a the variant name of the numerator in the lift calculation, and key2 is the variant name of the denominator in the lift calculation. The value is a list containing the lift of the variant over the control for each sampled value (only meaningful after `fit()` is run) -* `posteriors` is a dictionary, where the key is a variant name, and the value is a `posterior_distribution` object. - -## Usage guide - -First install package. Navigate to the directory in terminal and pip install it. - -```bash -cd BayesABTest -pip3 install -e . -``` +![Build Status](https://github.com/bakermoran/BayesABTest/workflows/Build%20Status/badge.svg?branch=master) +[![PyPI version](https://badge.fury.io/py/BayesABTest.svg)](https://badge.fury.io/py/BayesABTest) -Second, import in your analysis file. - -```python -import pandas as pd -from BayesABTest import ab_test_model - -# read in data -df = pd.read_csv('some_file.csv') - -# initialize class with the data and some optional variables -first_test = ab_test_model(df, metric='bind', prior_info='informed', prior_func='beta', debug=True) - -# run public methods -first_test.fit() -first_test.plot() -``` - -## Example output +> **Baker Moran** -### Chart Type Descriptions (Top Left -> Clockwise) +This allows AB testers to create a standard report for test results with python. Handles multiple variant tests, with a variety of prior function types. This is meant to be an abstraction from the nitty gritty of the details of Bayesian statistics. That said, some knowledge about which prior function to use etc. will be helpful for the user. -* **Posterior Distributions** - Shows both buckets posterior distributions, an estimate of the true conversion rate or continuous value, based on the prior and the observed data (using kernel density estimation plots) -* **Lift PDF** - Shows the probability density distribution of lift for the variant over the control, an estimate of the chance of improvement of the variant over the control (using kernel density estimation plot). Calculated by: the results from the sampled values for the variant percent change from the control. The proportion greater than 0 is highlighted. -* **Empirical CDF of Lift**å - Uses the same lift vector, but is the empirical cumulative distribution function. Median lift is highlighted and labeled, as well as the chosen confidence interval. +## Installation -### Conversion Rate Example (Beta Distribution) +* Install via pip (or specify pip3) -![alt text](img/one_var_conversion.png "Conversion Rate Example") + ```bash + pip install BayesABTest + ``` -### Continuous (Positive) Variable Example (Log-Normal Distribution) + OR -![alt text](img/one_var_continuous.png "Premium Example") +* Download files from [PyPi](https://pypi.org/project/BayesABTest/#files) and install -### Two Variant Conversion with Compare Variants +## Package Functions and Classes -![alt text](img/two_var_conversion_compare.png "Two Variant Example") +### [ab_test_model](docs/package_documentation/ab_test_model.md) -### Three Variant Continuous without Comparing Variants +Class implementing out of the box AB testing functionality. Simple, easy to use AB testing with many different prior function types, all in one clean interface. -![alt text](img/three_var_continuous_not_compare.png "Three Variant Example") +### [ab_test_dist_explorer](docs/package_documentation/ab_test_dist_explorer.md) -### Two Variant Poisson with Comparing Variants +Functions allowing a user to explore different distributions with simple to user interface. Allows a user to visually learn about bayesian statistics, and inform proper prior function choice when doing Bayesian AB testing. -![alt text](img/two_var_poisson_compare.png "Three Variant Example") +## Appendix -### Version History +### Learning -* `0.1.0` - *12/02/2019* -* `1.0.0` - *12/27/2019* -* `1.0.1` - *01/02/2020* -* `1.0.2` - *02/11/2020* -* `1.0.3` - *02/21/2020* +For a documentation explaining and motivating the use of Bayesian statistics to evaluate A/B tests, see [documentation](docs/besyian_ab_testing/Bayesian_AB_Testing_explainer.md) ### Acknowledgements @@ -138,3 +44,12 @@ Articles Reference: * * * + +### Version History + +* `0.1.0-prealpha` - *12/02/2019* +* `1.0.0-alpha` - *12/27/2019* +* `1.0.1-alpha` - *01/02/2020* +* `1.0.2-alpha` - *02/11/2020* +* `1.0.3-alpha` - *02/21/2020* +* `1.0.4-alpha` - *06/22/2020* diff --git a/docs/besyian_ab_testing/Bayesian_AB_Testing_explainer.md b/docs/besyian_ab_testing/Bayesian_AB_Testing_explainer.md new file mode 100644 index 0000000..dcab03a --- /dev/null +++ b/docs/besyian_ab_testing/Bayesian_AB_Testing_explainer.md @@ -0,0 +1,45 @@ +# Bayesian Statistics + +There are plenty of articles out there motivating why we should use Bayesian statistics in product analytics. I'm not going to try to reinvent the wheel here, so I will provide only a brief explaination and link to some articles that I found helpful in my learning. I will also explain why I created this package. + +## Motivation + +### Why Bayesian Statistics + +Bayesian statistics is a completely different way to think about statistics from what you likely learned in high school and college (typically referred to as frequentist statistics). Bayesian statistics allows us to much more easily answer a wider variety of questions that are more relevant for the business world. Although most people are _used_ to hearing a p-value for a statistical test, many don't actually know what it means or how to interpret it beyond the "satistical significance is when p <= 0.05" we were programmed to regurgitate. Bayesian statistics allows a much more intuitive interpretation of the results of a test. Examples of questions Bayesian statistics is purpose built to answer: + +* What is the probability that Variant we are testing is better than the Control? +* How much better is the Cariant than the Control? +* If we make the wrong choice, how much worse could it actually be? + +### The Issue with Traditional AB Testing + +The goal of using frequentist statistics is to minimize the probability of being wrong when we pick the variant over the control. P-values are designed to be biased towards the control. In business we often run an experiment because _we believe we are making an improvement to the product_. There clearly needs to be statistical rigor, but a question I often get when the variant is _slightly better_ than the control, is "why can't we just pick the variant?". Bayesian statistics allows for statistical support, even when picking a variant that is only slightly better. + +Frequentist statistics protects us against choosing something new that isn't actually better. This is important in things like medicine; it's not that important in the business world. In business, we want to run lots of tests as quickly as we can, in order to make the best decisions we can about the business. Changing the color of a button on the website likely will not result in lives lost, while a new medication could. Bayesian statistics allows us to control the risk we are taking on every decision we make; we can choose to make a decision with less data than we would need with Frequentist methods to approve a new medication. + +### The Power of Bayes Theorem + +Bayesian statistics is designed to use our belief about the world in order to help us make a decision. To me at first, this was confusing because it sounded like an arbitrary choice. It is; but the key to understanding why Bayesian statistics is so powerful is that we are making even strong assumptions when using frequentist statistics. In business and tech, we often have access to a lot of data and have a pretty good idea about conversion rates. I would argue that _not_ using any of that information is a more egregious mistake than using the wrong prior with Bayesian statistics. + +## Goal of this package + +I created this package originally out of necessity and a desire to learn. + +* I previously had little exposure to Bayesian statistics, and I found myself wanting to learn about it. The best way for me to learn things is to teach other people and build something. +* The code we used at Root for testing only included functionality for one variant conversion data AB tests. I often needed the ability to use different priors for continuous data, as well as additional variants. +* Additionally, I would much rather use Python than R. Initially, I built this to only add that functionality to my daily work. Then I realized, there are several very robust and really good R packages for Bayesian AB testing (I'll link a few). But I couldn't find anything that fit my needs in Python. For this reason, I decided to expand and productionalize this package into something that analysts at other data driven and testing oriented companies can use. +* This package is meant to be an easy to use method for analysts to report AB test results in a Bayesian framework using Python. + +### Further Reading + +* (this entire blog is great for the budding Bayesian statistitian) +* (how bayesian statistics allows us to innovate quickly) +* (a technical paper on defining risk with bayesian statistics) +* (how to use bayesian statistics correctly when AB testing) +* (AB testing at Airbnb) + +### R packages for Bayesian AB testing + +* - bayesAB by Frank Portman +* - abayes by Convoy Inc. diff --git a/docs/package_documentation/ab_test_dist_explorer.md b/docs/package_documentation/ab_test_dist_explorer.md new file mode 100644 index 0000000..8df2e37 --- /dev/null +++ b/docs/package_documentation/ab_test_dist_explorer.md @@ -0,0 +1,48 @@ +# Class ab_test_dist_explorer + +This file contains a set of functions to help a user visualize each distribution. + +## Methods + +* `beta_dist(alpha, beta)` + * *requires* - alpha and beta are non-negative integers. alpha signifies the successes, and beta signifies the failures. ex: alpha = 10, beta = 10. Sucess rate = 50% out of a population of 20. + * *modifies* - returns a matplot lib object (show the plot with `matplotlib.pyplot.show()`) + * *effects* - none +* `gamma_dist(mean=None, var=None, alpha=None, beta=None)` + * *requires* - Inputs are in pairs, either [mean, variance] OR [alpha, beta]. Mean is on the inveval (-inf, inf), variance is greater than 0. Alpha and beta are both postive numbers. + * *modifies* - returns a matplot lib object (show the plot with `matplotlib.pyplot.show()`) + * *effects* - none +* `lognormal_dist(mean, var)` + * *requires* - mean and variance of the log-normal distribution. Mean is on the inveval (-inf, inf), variance is greater than 0. + * *modifies* - returns a matplot lib object (show the plot with `matplotlib.pyplot.show()`) + * *effects* - none +* `poisson_dist(lam)` + * *requires* - lambda is the average occurences per unit time in a poisson distribution. Lambda is non-negative. + * *modifies* - returns a matplot lib object (show the plot with `matplotlib.pyplot.show()`) + * *effects* - none + +## Usage Guide + +```python +# import packages +from BayesABTest import ab_test_dist_explorer as pe +from matplotlib import pyplot as plt + +# use the functions to plot a distribution +pe.beta_dist(20, 80) +plt.show() +``` + +## Examples + +### Beta(20, 80) Distribution + +![alt text](img/beta_20_80.png "Beta(20, 80) Distribution") + +### Gamma(4, 2) Distribution + +![alt text](img/gamma_4_2.png "Gamma(4, 2) Distribution") + +### Poisson(15) Distribution + +![alt text](img/poisson_15.png "Poisson(15) Distribution") diff --git a/docs/package_documentation/ab_test_model.md b/docs/package_documentation/ab_test_model.md new file mode 100644 index 0000000..9685182 --- /dev/null +++ b/docs/package_documentation/ab_test_model.md @@ -0,0 +1,124 @@ +# Class ab_test_model + +## Instantiation variables + +* `raw_data` - a pandas dataframe with (at least) two columns, one for the bucket and one for the response variable +* `metric` - column name in `raw_data` for the response variable +* `bucket_col_name` - column name in `raw_data` for the bucket (defaults to `bucket`) +* `control_bucket_name` - value in `bucket_col_name` for the control bucket (defaults to `off`) +* `samples` - number of samples to run the monte carlo simulation, must be 1,000 and 50,000 (defaults to 10,000) +* `prior_func` - the type of distribution to use for the prior. options include: + * `beta` - use for conversion rates. bounded on the interval [0,1] + * `log-normal` - use for continuous, greater than zero response variables (0, +inf) (ex: premium, minutes spent on scene, etc.) + * `normal` - use for continuous response variables that on the interval (-inf, +inf) + * `poisson` - use for discrete, greater than zero response variables (ex: arrivals per day, events per account, etc.) +* `prior_info` - the prior belief to about the response varable. options include: + * `uninformed` - no prior belief about the response, all outcomes are equaly likely. No input required to use + * `informed` - uses an empirically informed prior (informed by the control data), and weakens this prior by `prior_scale_factor` + * `specified` - allows for a user to input prior parameters +* `prior_parameters` - `prior_info` must be set to `specified`. This must be a dictionary with the following key value pairs: + * `prior_func` = `beta` + * keys are `alpha` and `beta` + * `prior_func` = `log-normal` + * keys are `mean` and `var` + * `prior_func` = `normal` + * keys are `mean` and `var` + * `prior_func` = `poisson` + * keys are `alpha` and `beta` + * OR `mean` and `var` +* `confidence_level` - value for the confidence interval on the CDF chart (defaults to `0.05`) +* `compare_variants` - boolean for comparing the variants to each other. Control to each variant is always done (unless there are too many variants to plot). If there are few enough variants, the comparisons for variants will be plotted. (defaults to `False`) +* `debug` - boolean to print out extra output for debugging purposes (defaults to `False`) +* `prior_scale_factor` - factor to scale an empirically informed prior by (defaults to `4`) + +## Public class methods and variables + +### Methods + +* `fit()` + * *requires* - class was instantiated with valid inputs + * *modifies* - `ecdf`, `control_sample`, `variant_sample`, `lift` + * *effects* - creates and runs the monte carlo simulation, sets member variables to reflect model outputs +* `plot(lift_plot_flag=True)` + * `lift_plot_flag` - boolean for plotting lift PDF and CDF (defaults to `True`) + * *requires* - `fit()` has been run + * *modifies* - none + * *effects* - creates a 3 chart report of the AB test. (must show with `matplotlib.pyplot.show()`) +* `plot_posteriors(variants=[]):` + * *requires* - `fit()` has been run. variants is a list of variant names in `bucket_col_name`. One variant may not be repeated multiple times. + * *modifies* - none + * *effects* - creates a posterior plot of these variants (must show with `matplotlib.pyplot.show()`) +* `plot_positive_lift(variant_one, variant_two)` + * *requires* - `fit()` has been run. variant_one and variant_two are variant names in `bucket_col_name`. One variant may not be repeated multiple times. + * *modifies* - none + * *effects* - creates a positive lift plot between these variants (must show with `matplotlib.pyplot.show()`) +* `plot_ecdf(variant_one, variant_two)` + * *requires* - `fit()` has been run. variant_one and variant_two are variant names in `bucket_col_name`. One variant may not be repeated multiple times. + * *modifies* - none + * *effects* - creates an empirical cumulative distribution plot of these variants lift (must show with `matplotlib.pyplot.show()`) + +### Run-Time Variables + +* `ecdf` - a 3 level dicitonary, where key1 is a the variant name of the numerator in the lift calculation, and key2 is the variant name of the denominator in the lift calculation. The third keys are `x` and `y`, and the value is a list containing the x and y coordinates for the empirical CDF (only meaningful after `fit()` is run) +* `lift` - a 2 level dicitonary, where key1 is a the variant name of the numerator in the lift calculation, and key2 is the variant name of the denominator in the lift calculation. The value is a list containing the lift of the variant over the control for each sampled value (only meaningful after `fit()` is run) +* `posteriors` is a dictionary, where the key is a variant name, and the value is a `posterior_distribution` object. + +## Usage Guide + +Import the ab_test_model class. Input the the class should be a pandas dataframe. + +```python +import pandas as pd +from BayesABTest import ab_test_model +from matplotlib import pyplot as plt + +# read in data +df = pd.read_csv('some_file.csv') + +# initialize class with the data and some optional variables +first_test = ab_test_model(df, metric='conversion_rate', prior_info='informed', prior_func='beta', debug=True) + +# run public methods +first_test.fit() +first_test.plot() +plt.show() +``` + +## Example output + +The output of the `plot()` method is a uniform reporting style. + +For a single variant AB test: starting in the top left and going clockwise: + +* **Posterior Distributions** - Shows both variants posterior distributions, an estimate of the true conversion rate or continuous value, based on the prior and the observed data (using kernel density estimation plots) +* **Lift PDF** - Shows the probability density distribution of lift for the variant over the control, an estimate of the chance of improvement of the variant over the control (using kernel density estimation plot). Calculated by: the results from the sampled values for the variant percent change from the control. The proportion greater than 0 is highlighted. +* **Empirical CDF of Lift** - Uses the same lift vector, but is the empirical cumulative distribution function. Median lift is highlighted and labeled, as well as the chosen confidence interval. + +### Conversion Rate Example (Beta Distribution) + +![alt text](img/one_var_conversion.png "Conversion Rate Example") + +### Continuous (Positive) Variable Example (Log-Normal Distribution) + +![alt text](img/one_var_continuous.png "Premium Example") + +For a multiple variant AB test: + +* At the top: **Posterior Distributions** - Shows all variants posterior distributions, an estimate of the true conversion rate or continuous value, based on the prior and the observed data (using kernel density estimation plots) + +Each row will then follow: + +* Left: **Lift PDF** - Shows the probability density distribution of lift for a variant over the control, an estimate of the chance of improvement of the variant over the control (using kernel density estimation plot). Calculated by: the results from the sampled values for the variant percent change from the control. The proportion greater than 0 is highlighted. +* Right: **Empirical CDF of Lift** - Uses the same lift vector, but is the empirical cumulative distribution function. Median lift is highlighted and labeled, as well as the chosen confidence interval. + +### Two Variant Conversion with Compare Variants + +![alt text](img/two_var_conversion_compare.png "Two Variant Example") + +### Three Variant Continuous without Comparing Variants + +![alt text](img/three_var_continuous_not_compare.png "Three Variant Example") + +### Two Variant Poisson with Comparing Variants + +![alt text](img/two_var_poisson_compare.png "Three Variant Example") diff --git a/docs/package_documentation/img/beta_20_80.png b/docs/package_documentation/img/beta_20_80.png new file mode 100644 index 0000000..48368d1 Binary files /dev/null and b/docs/package_documentation/img/beta_20_80.png differ diff --git a/docs/package_documentation/img/gamma_4_2.png b/docs/package_documentation/img/gamma_4_2.png new file mode 100644 index 0000000..7c391db Binary files /dev/null and b/docs/package_documentation/img/gamma_4_2.png differ diff --git a/img/one_var_continuous.png b/docs/package_documentation/img/one_var_continuous.png similarity index 100% rename from img/one_var_continuous.png rename to docs/package_documentation/img/one_var_continuous.png diff --git a/img/one_var_conversion.png b/docs/package_documentation/img/one_var_conversion.png similarity index 100% rename from img/one_var_conversion.png rename to docs/package_documentation/img/one_var_conversion.png diff --git a/docs/package_documentation/img/poisson_15.png b/docs/package_documentation/img/poisson_15.png new file mode 100644 index 0000000..16ab5d2 Binary files /dev/null and b/docs/package_documentation/img/poisson_15.png differ diff --git a/img/three_var_continuous_not_compare.png b/docs/package_documentation/img/three_var_continuous_not_compare.png similarity index 100% rename from img/three_var_continuous_not_compare.png rename to docs/package_documentation/img/three_var_continuous_not_compare.png diff --git a/img/two_var_conversion_compare.png b/docs/package_documentation/img/two_var_conversion_compare.png similarity index 100% rename from img/two_var_conversion_compare.png rename to docs/package_documentation/img/two_var_conversion_compare.png diff --git a/img/two_var_poisson_compare.png b/docs/package_documentation/img/two_var_poisson_compare.png similarity index 100% rename from img/two_var_poisson_compare.png rename to docs/package_documentation/img/two_var_poisson_compare.png diff --git a/legacy_tsts/legacy_model_tsts.py b/legacy_tsts/legacy_model_tsts.py index 46a15b9..ea08baf 100644 --- a/legacy_tsts/legacy_model_tsts.py +++ b/legacy_tsts/legacy_model_tsts.py @@ -7,13 +7,13 @@ def one_variant_conversion(): """Create data and run a one variant report for a conversion metric.""" - raw_data_auto_bind = dh.create_conversion_data([.27, .3], ['off', 'on'], - metric_name='bind') - auto_bind = ab(raw_data_auto_bind, metric='bind', - prior_info='uninformed', prior_func='beta', debug=True, - samples=1000) - auto_bind.fit() - auto_bind.plot() + raw_data_conversion = dh.create_conversion_data([.27, .3], ['off', 'on'], + metric_name='conversion') + site_conversion = ab(raw_data_conversion, metric='conversion', + prior_info='uninformed', prior_func='beta', + debug=True, samples=1000) + site_conversion.fit() + site_conversion.plot() plt.show() @@ -64,13 +64,13 @@ def two_variants_conversion(): raw_data_2vars = dh.create_conversion_data([.2, .3, .4], ['control', 'rebrand', 'oldbrand'], - metric_name='bind') - auto_bind = ab(raw_data_2vars, metric='bind', - prior_info='uninformed', prior_func='beta', - debug=True, control_bucket_name='control', - compare_variants=True, samples=1000) - auto_bind.fit() - auto_bind.plot(lift_plot_flag=True) + metric_name='conversion') + site_conversion = ab(raw_data_2vars, metric='conversion', + prior_info='uninformed', prior_func='beta', + debug=True, control_bucket_name='control', + compare_variants=True, samples=1000) + site_conversion.fit() + site_conversion.plot(lift_plot_flag=True) plt.show() @@ -83,13 +83,13 @@ def conversion_negative_variants(): ['control', 'variant_1', 'variant_2'], - metric_name='bind') - auto_bind = ab(raw_data_2vars, metric='bind', - prior_info='uninformed', prior_func='beta', - debug=True, control_bucket_name='control', - compare_variants=True, samples=1000) - auto_bind.fit() - auto_bind.plot(lift_plot_flag=True) + metric_name='conversion') + site_conversion = ab(raw_data_2vars, metric='conversion', + prior_info='uninformed', prior_func='beta', + debug=True, control_bucket_name='control', + compare_variants=True, samples=1000) + site_conversion.fit() + site_conversion.plot(lift_plot_flag=True) plt.show() @@ -304,15 +304,15 @@ def tst_specified_prior(): ['control', 'variant_1', 'variant_2'], - metric_name='bind') + metric_name='conversion') prior = {'alpha': 22, 'beta': 100-22} - auto_bind = ab(raw_data_2vars, metric='bind', - prior_info='specified', prior_func='beta', - debug=True, control_bucket_name='control', - compare_variants=True, prior_parameters=prior, - samples=1000) - auto_bind.fit() - auto_bind.plot(lift_plot_flag=True) + site_conversion = ab(raw_data_2vars, metric='conversion', + prior_info='specified', prior_func='beta', + debug=True, control_bucket_name='control', + compare_variants=True, prior_parameters=prior, + samples=1000) + site_conversion.fit() + site_conversion.plot(lift_plot_flag=True) plt.show() prior = {'mean': 650, 'var': 1.5}