├── leab
├── __init__.py
├── before
│ ├── __init__.py
│ ├── leTemplate.html
│ └── leSample.py
├── leDataset
│ ├── data
│ │ ├── evan_miller_ttest_default_1.csv
│ │ ├── evan_miller_ttest_default_2.csv
│ │ ├── evan_miller_chi2_default_1.csv
│ │ ├── evan_miller_chi2_default_2.csv
│ │ ├── evan_miller_chi2_default_3.csv
│ │ ├── evan_miller_chi2_default_4.csv
│ │ ├── evan_miller_chi2_default_5.csv
│ │ └── data_chi2.csv
│ ├── __init__.py
│ ├── sample_le_average.py
│ └── sample_le_success.py
└── after
│ ├── __init__.py
│ ├── leSuccess.py
│ └── leAverage.py
├── docs
├── reference
│ ├── leAverage.md
│ ├── leSample.md
│ └── leSuccess.md
├── _static
│ ├── logo.py
│ └── logo.svg
├── index.md
└── notebooks
│ ├── sample_size.ipynb
│ ├── success_comparaison.ipynb
│ └── average_comparaison.ipynb
├── .github
├── PULL_REQUEST_TEMPLATE.md
├── workflows
│ ├── deploy-docs.yml
│ ├── python-tests.yml
│ └── release.yml
└── ISSUE_TEMPLATE
│ ├── feature_request.md
│ └── bug_report.md
├── mkdocs.yml
├── tests
├── test_leAverage.py
├── test_leSuccess.py
└── test_leSample.py
├── pyproject.toml
├── LICENSE
├── SECURITY.md
├── .gitignore
├── CODE_OF_CONDUCT.md
├── README.md
└── CONTRIBUTING.md
/leab/__init__.py:
--------------------------------------------------------------------------------
1 | __all__ = ["leDataset", "before", "after"]
2 |
--------------------------------------------------------------------------------
/docs/reference/leAverage.md:
--------------------------------------------------------------------------------
1 | ## leAverage.py
2 |
3 | ::: leab.after.leAverage
--------------------------------------------------------------------------------
/docs/reference/leSample.md:
--------------------------------------------------------------------------------
1 | ## leSample.py
2 |
3 | ::: leab.before.leSample
--------------------------------------------------------------------------------
/docs/reference/leSuccess.md:
--------------------------------------------------------------------------------
1 | ## leSuccess.py
2 |
3 | ::: leab.after.leSuccess
--------------------------------------------------------------------------------
/leab/before/__init__.py:
--------------------------------------------------------------------------------
1 | from .leSample import leSample
2 |
3 | __all__ = [
4 | 'leSample'
5 | ]
6 |
7 |
--------------------------------------------------------------------------------
/leab/leDataset/data/evan_miller_ttest_default_1.csv:
--------------------------------------------------------------------------------
1 | 64.2
2 | 28.4
3 | 85.3
4 | 83.1
5 | 13.4
6 | 56.8
7 | 44.2
8 | 90.0
--------------------------------------------------------------------------------
/leab/leDataset/data/evan_miller_ttest_default_2.csv:
--------------------------------------------------------------------------------
1 | 45.0
2 | 29.5
3 | 32.3
4 | 49.3
5 | 18.3
6 | 34.2
7 | 43.9
8 | 13.8
9 | 27.4
10 | 43.4
--------------------------------------------------------------------------------
/leab/after/__init__.py:
--------------------------------------------------------------------------------
1 | from .leSuccess import leSuccess
2 | from .leAverage import leAverage
3 |
4 | __all__ = [
5 | 'leSuccess',
6 | 'leAverage'
7 | ]
8 |
9 |
--------------------------------------------------------------------------------
/leab/leDataset/__init__.py:
--------------------------------------------------------------------------------
1 | from .sample_le_success import SampleLeSuccess
2 | from .sample_le_average import SampleLeAverage
3 |
4 | __all__ = [
5 | 'SampleLeSuccess',
6 | 'SampleLeAverage'
7 | ]
8 |
9 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Describe your changes
2 |
3 | ## Issue ticket number and link
4 |
5 | ## Checklist before requesting a review
6 |
7 | - [ ] I have performed a self-review of my code
8 | - [ ] If it is a core feature, I have added thorough tests.
9 | - [ ] Do we need to implement analytics?
10 | - [ ] Will this be part of a product update? If yes, please write one phrase about this update.
--------------------------------------------------------------------------------
/docs/_static/logo.py:
--------------------------------------------------------------------------------
1 | from PIL import Image, ImageDraw, ImageFont
2 |
3 | img = Image.new('RGBA',
4 | (840, 450),
5 | color = (255, 255, 255,0))
6 |
7 | fnt = ImageFont.truetype('Le Havre Light.ttf',
8 | 500)
9 | d = ImageDraw.Draw(img)
10 | d.text((0,0),
11 | "le AB",
12 | font=fnt,
13 | fill=(0, 0, 0))
14 |
15 | img.save('logo_le_ab.png')
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: LeAB Documentation
2 | use_directory_urls: false
3 | site_url: https://tlentali.github.io/leab/
4 | theme:
5 | name: material
6 |
7 | plugins:
8 | - search
9 | - mkdocstrings:
10 | default_handler: python
11 |
12 | nav:
13 | - Home: index.md
14 | - Reference:
15 | - leAverage: reference/leAverage.md
16 | - leSuccess: reference/leSuccess.md
17 | - leSample: reference/leSample.md
--------------------------------------------------------------------------------
/tests/test_leAverage.py:
--------------------------------------------------------------------------------
1 | from leab import after
2 | from leab import leDataset
3 |
4 |
5 | def test_get_size_absolute():
6 | data = leDataset.SampleLeAverage()
7 | ab_test = after.leAverage(data.A, data.B)
8 | assert ab_test.sample_A.confidence_interval == [
9 | 34.75214007684581,
10 | 81.59785992315418,
11 | ]
12 |
13 |
14 | def test_get_size_absolute():
15 | data = leDataset.SampleLeAverage()
16 | ab_test = after.leAverage(data.A, data.B)
17 | assert ab_test.get_verdict() == "Sample A mean is greater"
18 |
--------------------------------------------------------------------------------
/leab/leDataset/sample_le_average.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from pathlib import Path
3 |
4 |
5 | class SampleLeAverage:
6 | """
7 | Sample data to run with leAverage, from E. Miller default example in app.
8 | """
9 | def __init__(self):
10 |
11 | root = Path(__file__).parent
12 | file_1 = root / 'data' / 'evan_miller_ttest_default_1.csv'
13 | file_2 = root / 'data' / 'evan_miller_ttest_default_2.csv'
14 |
15 | self.A = pd.read_csv(
16 | file_1, names=["values"]
17 | )
18 | self.B = pd.read_csv(
19 | file_2, names=["values"]
20 | )
21 |
--------------------------------------------------------------------------------
/.github/workflows/deploy-docs.yml:
--------------------------------------------------------------------------------
1 | name: Deploy MkDocs to GitHub Pages
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 | permissions:
9 | contents: write
10 |
11 | jobs:
12 | deploy:
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - name: Checkout code
17 | uses: actions/checkout@v4
18 |
19 | - name: Set up Python
20 | uses: actions/setup-python@v5
21 | with:
22 | python-version: '3.11'
23 |
24 | - name: Install dependencies
25 | run: |
26 | pip install mkdocs mkdocs-material mkdocstrings[python]
27 |
28 | - name: Deploy to GitHub Pages
29 | run: |
30 | mkdocs gh-deploy --force
31 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/workflows/python-tests.yml:
--------------------------------------------------------------------------------
1 | name: Python Tests
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request:
7 | branches: [ master ]
8 |
9 | jobs:
10 | test:
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - name: Checkout code
15 | uses: actions/checkout@v3
16 |
17 | - name: Install uv
18 | run: |
19 | curl -Ls https://astral.sh/uv/install.sh | bash
20 | echo "$HOME/.cargo/bin" >> $GITHUB_PATH
21 |
22 | - name: Create the virtual environnement using uv
23 | run: uv venv
24 |
25 | - name: Activate the venv and install dépendancies
26 | run: |
27 | source .venv/bin/activate
28 | uv sync
29 |
30 | - name: Launch tests
31 | run: |
32 | source .venv/bin/activate
33 | pytest
--------------------------------------------------------------------------------
/tests/test_leSuccess.py:
--------------------------------------------------------------------------------
1 | from leab import after
2 | from leab import leDataset
3 |
4 |
5 | def test_get_confidence_interval():
6 | data = leDataset.SampleLeSuccess()
7 | ab_test = after.leSuccess(data.A, data.B, confidence_level=0.95)
8 | assert ab_test.sample_A.confidence_interval == [
9 | 8.526343659939133,
10 | 22.13718821096384,
11 | ]
12 |
13 |
14 | def test_get_p_value():
15 | data = leDataset.SampleLeSuccess()
16 | ab_test = after.leSuccess(data.A, data.B, confidence_level=0.95)
17 | assert ab_test.p_value == 0.25870176105718934
18 |
19 |
20 | def test_get_verdict():
21 | data = leDataset.SampleLeSuccess()
22 | ab_test = after.leSuccess(data.A, data.B, confidence_level=0.95)
23 | assert ab_test.get_verdict() == "No significant difference"
24 |
--------------------------------------------------------------------------------
/leab/leDataset/data/evan_miller_chi2_default_1.csv:
--------------------------------------------------------------------------------
1 | 1
2 | 0
3 | 1
4 | 1
5 | 0
6 | 0
7 | 1
8 | 1
9 | 0
10 | 0
11 | 0
12 | 1
13 | 1
14 | 1
15 | 0
16 | 0
17 | 1
18 | 0
19 | 0
20 | 0
21 | 0
22 | 0
23 | 0
24 | 1
25 | 1
26 | 1
27 | 1
28 | 0
29 | 0
30 | 0
31 | 0
32 | 0
33 | 0
34 | 0
35 | 0
36 | 0
37 | 0
38 | 0
39 | 0
40 | 0
41 | 0
42 | 0
43 | 0
44 | 0
45 | 0
46 | 0
47 | 0
48 | 0
49 | 0
50 | 0
51 | 0
52 | 0
53 | 0
54 | 0
55 | 0
56 | 0
57 | 0
58 | 0
59 | 0
60 | 0
61 | 0
62 | 0
63 | 0
64 | 0
65 | 0
66 | 0
67 | 0
68 | 0
69 | 0
70 | 1
71 | 0
72 | 0
73 | 0
74 | 0
75 | 0
76 | 0
77 | 0
78 | 0
79 | 0
80 | 0
81 | 0
82 | 0
83 | 0
84 | 0
85 | 0
86 | 0
87 | 0
88 | 0
89 | 0
90 | 0
91 | 0
92 | 0
93 | 0
94 | 0
95 | 0
96 | 0
97 | 0
98 | 0
99 | 0
100 | 0
--------------------------------------------------------------------------------
/leab/leDataset/data/evan_miller_chi2_default_2.csv:
--------------------------------------------------------------------------------
1 | 1
2 | 0
3 | 0
4 | 1
5 | 0
6 | 0
7 | 0
8 | 1
9 | 0
10 | 0
11 | 0
12 | 0
13 | 1
14 | 0
15 | 0
16 | 0
17 | 0
18 | 0
19 | 0
20 | 1
21 | 0
22 | 0
23 | 0
24 | 0
25 | 0
26 | 0
27 | 0
28 | 0
29 | 0
30 | 0
31 | 1
32 | 0
33 | 0
34 | 1
35 | 0
36 | 1
37 | 0
38 | 0
39 | 0
40 | 1
41 | 0
42 | 0
43 | 1
44 | 1
45 | 0
46 | 1
47 | 0
48 | 0
49 | 1
50 | 1
51 | 1
52 | 0
53 | 0
54 | 1
55 | 0
56 | 0
57 | 0
58 | 0
59 | 1
60 | 0
61 | 0
62 | 1
63 | 0
64 | 0
65 | 0
66 | 1
67 | 1
68 | 0
69 | 0
70 | 0
71 | 0
72 | 0
73 | 0
74 | 0
75 | 0
76 | 0
77 | 0
78 | 0
79 | 0
80 | 0
81 | 0
82 | 0
83 | 0
84 | 0
85 | 0
86 | 0
87 | 0
88 | 0
89 | 0
90 | 0
91 | 0
92 | 0
93 | 0
94 | 0
95 | 0
96 | 0
97 | 0
98 | 0
99 | 0
100 | 0
--------------------------------------------------------------------------------
/leab/leDataset/data/evan_miller_chi2_default_3.csv:
--------------------------------------------------------------------------------
1 | 0
2 | 0
3 | 0
4 | 0
5 | 0
6 | 0
7 | 0
8 | 0
9 | 0
10 | 0
11 | 0
12 | 1
13 | 0
14 | 0
15 | 0
16 | 0
17 | 1
18 | 0
19 | 0
20 | 0
21 | 0
22 | 0
23 | 0
24 | 0
25 | 0
26 | 0
27 | 0
28 | 0
29 | 0
30 | 0
31 | 0
32 | 0
33 | 0
34 | 0
35 | 0
36 | 0
37 | 0
38 | 0
39 | 0
40 | 0
41 | 0
42 | 0
43 | 0
44 | 0
45 | 0
46 | 0
47 | 0
48 | 0
49 | 0
50 | 0
51 | 0
52 | 0
53 | 0
54 | 0
55 | 0
56 | 0
57 | 0
58 | 0
59 | 1
60 | 0
61 | 0
62 | 1
63 | 0
64 | 0
65 | 1
66 | 0
67 | 0
68 | 0
69 | 0
70 | 0
71 | 0
72 | 0
73 | 1
74 | 0
75 | 0
76 | 0
77 | 0
78 | 0
79 | 1
80 | 0
81 | 0
82 | 0
83 | 0
84 | 1
85 | 0
86 | 0
87 | 0
88 | 1
89 | 0
90 | 0
91 | 0
92 | 0
93 | 1
94 | 0
95 | 0
96 | 0
97 | 0
98 | 0
99 | 0
100 | 0
--------------------------------------------------------------------------------
/leab/leDataset/data/evan_miller_chi2_default_4.csv:
--------------------------------------------------------------------------------
1 | 0
2 | 0
3 | 0
4 | 0
5 | 0
6 | 0
7 | 0
8 | 0
9 | 0
10 | 0
11 | 0
12 | 0
13 | 0
14 | 0
15 | 0
16 | 0
17 | 0
18 | 0
19 | 0
20 | 0
21 | 0
22 | 0
23 | 0
24 | 0
25 | 0
26 | 0
27 | 0
28 | 0
29 | 0
30 | 0
31 | 0
32 | 0
33 | 0
34 | 0
35 | 0
36 | 0
37 | 0
38 | 0
39 | 0
40 | 0
41 | 0
42 | 0
43 | 0
44 | 0
45 | 0
46 | 0
47 | 0
48 | 0
49 | 0
50 | 0
51 | 0
52 | 0
53 | 0
54 | 0
55 | 0
56 | 0
57 | 0
58 | 0
59 | 0
60 | 0
61 | 0
62 | 0
63 | 0
64 | 0
65 | 0
66 | 0
67 | 0
68 | 0
69 | 0
70 | 0
71 | 0
72 | 0
73 | 0
74 | 0
75 | 0
76 | 0
77 | 0
78 | 0
79 | 0
80 | 0
81 | 0
82 | 0
83 | 1
84 | 0
85 | 0
86 | 0
87 | 1
88 | 0
89 | 0
90 | 0
91 | 0
92 | 0
93 | 0
94 | 0
95 | 0
96 | 0
97 | 0
98 | 0
99 | 0
100 | 0
--------------------------------------------------------------------------------
/leab/leDataset/data/evan_miller_chi2_default_5.csv:
--------------------------------------------------------------------------------
1 | 1
2 | 1
3 | 1
4 | 1
5 | 1
6 | 1
7 | 1
8 | 1
9 | 1
10 | 1
11 | 1
12 | 1
13 | 1
14 | 1
15 | 1
16 | 1
17 | 1
18 | 1
19 | 1
20 | 1
21 | 1
22 | 1
23 | 1
24 | 1
25 | 1
26 | 1
27 | 1
28 | 1
29 | 1
30 | 1
31 | 1
32 | 1
33 | 1
34 | 1
35 | 1
36 | 1
37 | 1
38 | 1
39 | 1
40 | 1
41 | 1
42 | 1
43 | 1
44 | 1
45 | 1
46 | 1
47 | 1
48 | 1
49 | 1
50 | 1
51 | 1
52 | 1
53 | 1
54 | 1
55 | 1
56 | 1
57 | 1
58 | 1
59 | 1
60 | 1
61 | 1
62 | 1
63 | 1
64 | 1
65 | 1
66 | 1
67 | 1
68 | 1
69 | 1
70 | 1
71 | 1
72 | 1
73 | 1
74 | 1
75 | 1
76 | 1
77 | 1
78 | 1
79 | 1
80 | 1
81 | 1
82 | 1
83 | 1
84 | 1
85 | 1
86 | 1
87 | 1
88 | 1
89 | 1
90 | 1
91 | 0
92 | 0
93 | 0
94 | 0
95 | 0
96 | 0
97 | 0
98 | 0
99 | 0
100 | 0
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | **Smartphone (please complete the following information):**
32 | - Device: [e.g. iPhone6]
33 | - OS: [e.g. iOS8.1]
34 | - Browser [e.g. stock browser, safari]
35 | - Version [e.g. 22]
36 |
37 | **Additional context**
38 | Add any other context about the problem here.
39 |
--------------------------------------------------------------------------------
/leab/leDataset/sample_le_success.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | from pathlib import Path
3 |
4 |
5 | class SampleLeSuccess:
6 | """
7 | Sample data to run with le Mean, from E. Miller default example in app.
8 | """
9 | def __init__(self):
10 |
11 | root = Path(__file__).parent
12 | file_A = root / 'data' / 'evan_miller_chi2_default_1.csv'
13 | file_B = root / 'data' / 'evan_miller_chi2_default_2.csv'
14 | file_C = root / 'data' / 'evan_miller_chi2_default_3.csv'
15 | file_D = root / 'data' / 'evan_miller_chi2_default_4.csv'
16 | file_E = root / 'data' / 'evan_miller_chi2_default_5.csv'
17 |
18 | self.A = pd.read_csv(
19 | file_A, names=["success"]
20 | )
21 | self.B = pd.read_csv(
22 | file_B, names=["success"]
23 | )
24 | self.C = pd.read_csv(
25 | file_C, names=["success"]
26 | )
27 | self.D = pd.read_csv(
28 | file_D, names=["success"]
29 | )
30 | self.E = pd.read_csv(
31 | file_E, names=["success"]
32 | )
--------------------------------------------------------------------------------
/tests/test_leSample.py:
--------------------------------------------------------------------------------
1 | from leab import before
2 |
3 |
4 | def test_get_size_per_variation_absolute():
5 | ab_test = before.leSample(conversion_rate=20, min_detectable_effect=2)
6 | assert ab_test.get_size_per_variation() == 6347
7 |
8 |
9 | def test_get_size_per_variation_relative():
10 | ab_test = before.leSample(
11 | conversion_rate=20, min_detectable_effect=2, absolute=False
12 | )
13 | assert ab_test.get_size_per_variation() == 157328
14 |
15 |
16 | def test_get_total_size_absolute():
17 | ab_test = before.leSample(conversion_rate=20, min_detectable_effect=2)
18 | assert ab_test.get_total_size() == 12694
19 |
20 |
21 | def test_get_total_size_relative():
22 | ab_test = before.leSample(
23 | conversion_rate=20, min_detectable_effect=2, absolute=False
24 | )
25 | assert ab_test.get_total_size() == 314656
26 |
27 |
28 | def test_get_duration_absolute():
29 | ab_test = before.leSample(conversion_rate=20, min_detectable_effect=2)
30 | assert ab_test.get_duration(avg_daily_total_visitor=1000) == 13
31 |
32 |
33 | def test_get_duration_relative():
34 | ab_test = before.leSample(
35 | conversion_rate=20, min_detectable_effect=2, absolute=False
36 | )
37 | assert ab_test.get_duration(avg_daily_total_visitor=1000) == 315
38 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "leab"
3 | version = "0.1.11"
4 | description = "Add your description here"
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "numpy>=2.2.4",
9 | "pandas>=2.2.3",
10 | "scipy>=1.15.2",
11 | "statsmodels>=0.14.4",
12 | ]
13 |
14 | [project.urls]
15 | Repository = "https://github.com/tlentali/leab"
16 | Documentation = "https://tlentali.github.io/leab/"
17 |
18 | [dependency-groups]
19 | dev = [
20 | "black>=25.1.0",
21 | "jupyter>=1.1.1",
22 | "mkdocs>=1.6.1",
23 | "mkdocs-material>=9.6.9",
24 | "mkdocstrings[python]>=0.29.0",
25 | "pylint>=3.3.6",
26 | "pytest>=8.3.5",
27 | ]
28 |
29 | [tool.pytest.ini_options]
30 | pythonpath = [
31 | "."
32 | ]
33 |
34 | [build-system]
35 | requires = ["setuptools>=61.0"]
36 | build-backend = "setuptools.build_meta"
37 |
38 | [tool.setuptools.packages.find]
39 | where = ["."]
40 | include = ["leab"]
41 | exclude = ["tests*", "notebook*", "misc*"]
42 |
43 | [tool.bumpver]
44 | current_version = "0.1.11"
45 | version_pattern = "MAJOR.MINOR.PATCH"
46 | commit_message = "bump: {old_version} → {new_version}"
47 | tag_message = "v{new_version}"
48 |
49 | [tool.bumpver.file_patterns]
50 | "pyproject.toml" = [
51 | 'current_version = "{version}"',
52 | 'version = "{version}"'
53 | ]
54 |
55 | [tool.uv]
56 | required-version = ">=0.6.11,<0.7"
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2025, tlentali
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | ---
4 |
5 | `LeAB` is a Python library for AB testing analysis.
6 |
7 | Get ready to take a decision !
8 |
9 | `LeAB` is a Python package designed to streamline A/B test analysis, from raw data to insightful results. It provides a clean API to define experiments, compute metrics, and interpret statistical outcomes with confidence.
10 | AB testing has never been more popular, especially on Internet based companies.
11 | Even if each test is unique, some questions seem to be asked again and again :
12 |
13 | - when is my test going to be statistically significant ?
14 | - is A more successful than B ?
15 | - does A generate more than B ?
16 |
17 | Strong statistical knowledge are required to handle it from start to end correctly.
18 | To answer those questions in a simple and robust way, we built **le AB**.
19 | Lets Python do AB testing analysis !
20 |
21 | ## 🛠 Installation
22 |
23 | `LeAB` is intended to work with Python 3.12 or above.
24 |
25 | Installation can be done by using `pip` :
26 |
27 | ```bash
28 | pip install leab
29 | ```
30 |
31 | There are [wheels](https://pypi.org/project/leab/#files) available for Linux, MacOS, and Windows.
32 |
33 | ## ✨ Why LEAB?
34 |
35 | A/B testing is a powerful tool, but often requires stitching together multiple tools and manual steps. LEAB aims to:
36 |
37 | - 📊 Simplify and standardize A/B test workflows
38 | - 🔍 Ensure statistical rigor with built-in checks
39 | - 🧪 Support simulations and power analysis
40 |
41 | Let the data speak—with confidence.
42 |
43 | Made with 💙 from Bordeaux 🍷 and Montréal 🍁.
44 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Publish to PyPI
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 | jobs:
9 | release:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout code
13 | uses: actions/checkout@v4
14 |
15 | - name: Set up Python
16 | uses: actions/setup-python@v5
17 | with:
18 | python-version: "3.12"
19 |
20 | - name: Install tools
21 | run: |
22 | python -m pip install --upgrade pip
23 | pip install bumpver build twine
24 |
25 | - name: Determine version bump type
26 | id: version
27 | run: |
28 | git fetch --tags
29 | LAST_COMMIT_MESSAGE=$(git log -1 --pretty=%B)
30 | echo "Last commit: $LAST_COMMIT_MESSAGE"
31 |
32 | if [[ "$LAST_COMMIT_MESSAGE" == *#major* ]]; then
33 | echo "bump=major" >> $GITHUB_OUTPUT
34 | elif [[ "$LAST_COMMIT_MESSAGE" == *#minor* ]]; then
35 | echo "bump=minor" >> $GITHUB_OUTPUT
36 | else
37 | echo "bump=patch" >> $GITHUB_OUTPUT
38 | fi
39 |
40 | - name: Bump version
41 | run: |
42 | bumpver update --${{ steps.version.outputs.bump }}
43 |
44 | - name: Commit bumped version
45 | run: |
46 | git config user.name "GitHub Actions"
47 | git config user.email "actions@github.com"
48 | git add .
49 | git commit -m "chore: bump version [skip ci]"
50 | git push
51 |
52 | - name: Build and publish
53 | env:
54 | TWINE_USERNAME: __token__
55 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
56 | run: |
57 | python -m build
58 | twine upload dist/*
59 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # `leAB` Open Source Security Policies and Procedures
2 |
3 | This document outlines security procedures and general policies for the `leAB` Open Source projects as found on [https://github.com/tlentali/leab](https://github.com/tlentali/leab).
4 |
5 | * [Reporting a Vulnerability](#reporting-a-vulnerability)
6 | * [Disclosure Policy](#disclosure-policy)
7 |
8 | ## Reporting a Vulnerability
9 |
10 | The `leAB` team and community take all security vulnerabilities
11 | seriously. Thank you for improving the security of our open source
12 | software. We appreciate your efforts and responsible disclosure and will
13 | make every effort to acknowledge your contributions.
14 |
15 | Report security vulnerabilities by emailing the `leAB` security team at thomas.lentali@gmail.com
16 |
17 | The lead maintainer will acknowledge your email within 24 hours, and will send a more detailed response within 48 hours indicating the next steps in handling your report.
18 | After the initial reply to your report, the security team will endeavor to keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
19 |
20 | Report security vulnerabilities in third-party modules to the person or team maintaining the module.
21 |
22 | ## Disclosure Policy
23 |
24 | When the security team receives a security bug report, they will assign it to a primary handler.
25 | This person will coordinate the fix and release process, involving the following steps:
26 |
27 | - Confirm the problem and determine the affected versions.
28 | - Audit code to find any potential similar problems.
29 | - Prepare fixes for all releases still under maintenance.
30 |
31 | These fixes will be released as fast as possible to `Pypi`.
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 | cover/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 | db.sqlite3-journal
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | .pybuilder/
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | # For a library or package, you might want to ignore these files since the code is
88 | # intended to run in multiple environments; otherwise, check them in:
89 | # .python-version
90 |
91 | # pipenv
92 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
94 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
95 | # install all needed dependencies.
96 | #Pipfile.lock
97 |
98 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
99 | __pypackages__/
100 |
101 | # Celery stuff
102 | celerybeat-schedule
103 | celerybeat.pid
104 |
105 | # SageMath parsed files
106 | *.sage.py
107 |
108 | # Environments
109 | .env
110 | .venv
111 | env/
112 | venv/
113 | ENV/
114 | env.bak/
115 | venv.bak/
116 |
117 | # Spyder project settings
118 | .spyderproject
119 | .spyproject
120 |
121 | # Rope project settings
122 | .ropeproject
123 |
124 | # mkdocs documentation
125 | /site
126 |
127 | # mypy
128 | .mypy_cache/
129 | .dmypy.json
130 | dmypy.json
131 |
132 | # Pyre type checker
133 | .pyre/
134 |
135 | # pytype static type analyzer
136 | .pytype/
137 |
138 | # Cython debug symbols
139 | cython_debug/
140 |
141 | # static files generated from Django application using `collectstatic`
142 | media
143 | static
144 |
145 | # docs
146 | =0.5.0
147 | =1.8.3
148 | make.bat
149 |
150 | # vscode
151 | .vscode
152 |
153 | # uv
154 | .python-version
155 | *.lock
156 |
--------------------------------------------------------------------------------
/leab/before/leTemplate.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {{ title }}
6 |
7 |
85 |
86 |
87 |
88 |
89 |
92 |
93 |
94 |
95 | Sample size per variation :
96 |
97 | {{ html_plot_sample_size }}
98 |
99 |
100 |
101 |
102 | Baseline conversion rate and Minimum Detectable Effect :
103 |
104 | {{ html_plot_baseline_conversion_rate }}
105 |
106 |
107 |
108 |
109 |
112 |
113 |
114 |
115 |
116 |
--------------------------------------------------------------------------------
/docs/notebooks/sample_size.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Le Sample\n",
8 | "\n",
9 | "This tuto shows you how to get the number of sample needed per variation to reach a statistical significant result. \n",
10 | "First, let's import the right tools :"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "from leab import before"
20 | ]
21 | },
22 | {
23 | "cell_type": "markdown",
24 | "metadata": {},
25 | "source": [
26 | "As we want to know the sample needed, we stand **before** launching the AB test. \n",
27 | "Then, from the `before` import, we create a `leSample` instance that we call `ab_test`. \n",
28 | "In this example we want to get a volume of sample per variation needed if we expect a minimum detectable effect of 2% on a baseline conversion rate of 20% :"
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": null,
34 | "metadata": {},
35 | "outputs": [
36 | {
37 | "data": {
38 | "text/plain": [
39 | "6347"
40 | ]
41 | },
42 | "execution_count": 2,
43 | "metadata": {},
44 | "output_type": "execute_result"
45 | }
46 | ],
47 | "source": [
48 | "ab_test = before.leSample(conversion_rate=20,\n",
49 | " min_detectable_effect=2)\n",
50 | "ab_test.get_size_per_variation()"
51 | ]
52 | },
53 | {
54 | "cell_type": "markdown",
55 | "metadata": {},
56 | "source": [
57 | "So the result is `6347` visits per variation (`6347` for the A variation and `6347` more for the B variation). \n",
58 | "If we know the trafic expected on this test, we can ends up with a approximate number of days needed to run the test :"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": 3,
64 | "metadata": {},
65 | "outputs": [
66 | {
67 | "data": {
68 | "text/plain": [
69 | "13"
70 | ]
71 | },
72 | "execution_count": 3,
73 | "metadata": {},
74 | "output_type": "execute_result"
75 | }
76 | ],
77 | "source": [
78 | "ab_test.get_duration(avg_daily_total_visitor=1000)"
79 | ]
80 | },
81 | {
82 | "cell_type": "markdown",
83 | "metadata": {},
84 | "source": [
85 | "You will need `13` days to run the test. "
86 | ]
87 | }
88 | ],
89 | "metadata": {
90 | "file_extension": ".py",
91 | "kernelspec": {
92 | "display_name": "Python 3",
93 | "language": "python",
94 | "name": "python3"
95 | },
96 | "language_info": {
97 | "codemirror_mode": {
98 | "name": "ipython",
99 | "version": 3
100 | },
101 | "file_extension": ".py",
102 | "mimetype": "text/x-python",
103 | "name": "python",
104 | "nbconvert_exporter": "python",
105 | "pygments_lexer": "ipython3",
106 | "version": "3.6.9"
107 | },
108 | "mimetype": "text/x-python",
109 | "name": "python",
110 | "npconvert_exporter": "python",
111 | "pygments_lexer": "ipython3",
112 | "version": 3
113 | },
114 | "nbformat": 4,
115 | "nbformat_minor": 4
116 | }
117 |
--------------------------------------------------------------------------------
/docs/notebooks/success_comparaison.ipynb:
--------------------------------------------------------------------------------
1 | {"cells":[{"cell_type":"markdown","metadata":{},"source":["# Le Success\n","\n","Here we will compare success reached in A versus B. \n","First, lets import some data from `leDataset` :"]},{"cell_type":"code","execution_count":2,"metadata":{},"outputs":[],"source":"from leab import leDataset\n\ndata = leDataset.SampleLeSuccess()"},{"cell_type":"markdown","metadata":{},"source":["Then we get `data.A` and `data.B`, where **1** means *success* and **0** means *fail* :"]},{"cell_type":"code","execution_count":3,"metadata":{},"outputs":[{"data":{"text/html":["\n","\n","
\n"," \n"," \n"," \n"," success \n"," \n"," \n"," \n"," \n"," 0 \n"," 1 \n"," \n"," \n"," 1 \n"," 0 \n"," \n"," \n"," 2 \n"," 1 \n"," \n"," \n"," 3 \n"," 1 \n"," \n"," \n"," 4 \n"," 0 \n"," \n"," \n","
\n","
"],"text/plain":[" success\n","0 1\n","1 0\n","2 1\n","3 1\n","4 0"]},"execution_count":3,"metadata":{},"output_type":"execute_result"}],"source":"data.A.head()"},{"cell_type":"markdown","metadata":{},"source":["Now, we call the `after` tool from `leab`, because, as we are comparing results, we assume that it appends after launching the AB test."]},{"cell_type":"code","execution_count":4,"metadata":{},"outputs":[],"source":"from leab import after"},{"cell_type":"markdown","metadata":{},"source":["We build an instance `ab_test` including our data for a confidence level fixed at 95% :"]},{"cell_type":"code","execution_count":5,"metadata":{},"outputs":[],"source":"ab_test = after.leSuccess(data.A, data.B, confidence_level=0.95)"},{"cell_type":"markdown","metadata":{},"source":["We have acces at some complementary infos like the *confidence interval* of a sample or the *p value* obtained :"]},{"cell_type":"code","execution_count":6,"metadata":{},"outputs":[{"data":{"text/plain":["[8.526343659939133, 22.13718821096384]"]},"execution_count":6,"metadata":{},"output_type":"execute_result"}],"source":"ab_test.sample_A.confidence_interval"},{"cell_type":"code","execution_count":7,"metadata":{},"outputs":[{"data":{"text/plain":["0.25870176105718934"]},"execution_count":7,"metadata":{},"output_type":"execute_result"}],"source":"ab_test.p_value"},{"cell_type":"markdown","metadata":{},"source":["To conclude on the test, we call the `get_verdict()` methode :"]},{"cell_type":"code","execution_count":8,"metadata":{},"outputs":[{"name":"stdout","output_type":"stream","text":["No significant difference\n"]}],"source":"ab_test.get_verdict()"},{"cell_type":"markdown","metadata":{},"source":["So, at 95% of confidence level, A and B variations are quite as succeful."]}],"nbformat":4,"nbformat_minor":2,"metadata":{"language_info":{"name":"python","codemirror_mode":{"name":"ipython","version":3}},"orig_nbformat":2,"file_extension":".py","mimetype":"text/x-python","name":"python","npconvert_exporter":"python","pygments_lexer":"ipython3","version":3}}
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, gender identity and expression, level of experience,
9 | nationality, personal appearance, race, religion, or sexual identity and
10 | orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at **thomas.lentali@gmail.com**. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at [http://contributor-covenant.org/version/1/4][version]
72 |
73 | [homepage]: http://contributor-covenant.org
74 | [version]: http://contributor-covenant.org/version/1/4/
--------------------------------------------------------------------------------
/docs/_static/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
13 |
15 |
17 |
18 |
20 | image/svg+xml
21 |
23 |
24 |
25 |
26 |
27 |
30 |
35 |
39 |
43 |
47 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/leab/after/leSuccess.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import statsmodels.stats.proportion as smp
3 | from scipy.stats import chi2
4 | import scipy.stats
5 |
6 |
7 | class Chi2Sample:
8 | def __init__(self, sample: pd.DataFrame, confidence_level: float = 0.95):
9 | self.sample = sample
10 | self.confidence_level = confidence_level
11 | self.compute()
12 |
13 | def compute(self) -> None:
14 | self.get_success()
15 | self.get_trial()
16 | self.get_confidence_interval()
17 |
18 | def get_success(self) -> None:
19 | self.success = self.sample.sum().iloc[0]
20 |
21 | def get_trial(self) -> None:
22 | self.trial = len(self.sample)
23 |
24 | def get_confidence_interval(self) -> None:
25 | self.conf_int = smp.proportion_confint(
26 | self.success, self.trial, alpha=1 - self.confidence_level, method="wilson"
27 | )
28 | self.conf_int_inf = self.conf_int[0] * 100
29 | self.conf_int_sup = self.conf_int[1] * 100
30 | self.confidence_interval = [self.conf_int_inf, self.conf_int_sup]
31 |
32 |
33 | class leSuccess(Chi2Sample):
34 | """
35 | Build leSuccess object.
36 |
37 | Parameters:
38 |
39 | sample_A (pd.DataFrame): A sample data.
40 | sample_B (pd.DataFrame): B sample data.
41 | confidence_level (float): desired confidence level, default : 95%.
42 |
43 | Example:
44 |
45 | ::
46 |
47 | >>> from leab import leDataset
48 | >>> from leab import after
49 |
50 | >>> data = leDataset.SampleLeSuccess()
51 | >>> ab_test = after.leSuccess(data.A,
52 | ... data.B,
53 | ... confidence_level=0.95)
54 | >>> ab_test.sample_A.confidence_interval
55 |
56 | [8.526343659939133, 22.13718821096384]
57 |
58 | >>> ab_test.p_value
59 |
60 | 0.25870176105718934
61 |
62 | >>> ab_test.get_verdict()
63 |
64 | 'No significant difference'
65 | """
66 | def __init__(
67 | self,
68 | sample_A: pd.DataFrame,
69 | sample_B: pd.DataFrame,
70 | confidence_level: float = 0.95,
71 | ):
72 | self.confidence_level = confidence_level
73 | self.sample_A = Chi2Sample(sample_A, self.confidence_level)
74 | self.sample_B = Chi2Sample(sample_B, self.confidence_level)
75 | self.compute()
76 |
77 | def compute(self) -> None:
78 | self._get_contingency_table()
79 | self._get_observed_values()
80 | self._get_expected_values()
81 | self._get_chi_square_statistic()
82 | self._get_degree_of_freedom()
83 | self._get_p_value()
84 |
85 | def _get_contingency_table(self) -> None:
86 | sample_A_value_counts = self.sample_A.sample["success"].value_counts()
87 | sample_B_value_counts = self.sample_B.sample["success"].value_counts()
88 | self.contingency_table = pd.DataFrame(
89 | [sample_A_value_counts, sample_B_value_counts]
90 | )
91 |
92 | self.contingency_table.index = ["sample_A", "sample_B"]
93 | self.contingency_table.columns = ["fail", "success"]
94 |
95 | def _get_observed_values(self) -> None:
96 | self.observed_values = self.contingency_table.values
97 |
98 | def _get_expected_values(self) -> None:
99 | b = scipy.stats.chi2_contingency(self.contingency_table)
100 | self.expected_values = b[3]
101 |
102 | def _get_chi_square_statistic(self) -> None:
103 | chi_square = sum(
104 | [
105 | (o - e) ** 2.0 / e
106 | for o, e in zip(self.observed_values, self.expected_values)
107 | ]
108 | )
109 | self.chi_square_statistic = chi_square[0] + chi_square[1]
110 |
111 | def _get_degree_of_freedom(self) -> None:
112 | no_of_rows = len(self.contingency_table.iloc[0:2, 0])
113 | no_of_columns = len(self.contingency_table.iloc[0, 0:2])
114 | self.degree_of_freedom = (no_of_rows - 1) * (no_of_columns - 1)
115 |
116 | def _get_p_value(self) -> None:
117 | self.p_value = 1 - chi2.cdf(
118 | x=self.chi_square_statistic, df=self.degree_of_freedom
119 | )
120 |
121 | def get_verdict(self) -> None:
122 | if self.p_value < 1.0 - self.confidence_level:
123 | if (
124 | self.sample_A.success / self.sample_A.trial
125 | > self.sample_B.success / self.sample_B.trial
126 | ):
127 | return("Sample A is more successful")
128 | else:
129 | return("Sample B is more successful")
130 | else:
131 | return("No significant difference")
132 |
--------------------------------------------------------------------------------
/docs/notebooks/average_comparaison.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Le Average\n",
8 | "\n",
9 | "Here we will compare means of value obtained from A versus B. \n",
10 | "First, lets import some data from `leDataset` :"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "from leab import after\n",
20 | "from leab import leDataset\n",
21 | "\n",
22 | "data = leDataset.SampleLeAverage()"
23 | ]
24 | },
25 | {
26 | "cell_type": "markdown",
27 | "metadata": {},
28 | "source": [
29 | "`data` is composed by `data.A` and `data.B`"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 3,
35 | "metadata": {},
36 | "outputs": [
37 | {
38 | "data": {
39 | "text/html": [
40 | "\n",
41 | "\n",
54 | "
\n",
55 | " \n",
56 | " \n",
57 | " \n",
58 | " values \n",
59 | " \n",
60 | " \n",
61 | " \n",
62 | " \n",
63 | " 0 \n",
64 | " 64.2 \n",
65 | " \n",
66 | " \n",
67 | " 1 \n",
68 | " 28.4 \n",
69 | " \n",
70 | " \n",
71 | " 2 \n",
72 | " 85.3 \n",
73 | " \n",
74 | " \n",
75 | " 3 \n",
76 | " 83.1 \n",
77 | " \n",
78 | " \n",
79 | " 4 \n",
80 | " 13.4 \n",
81 | " \n",
82 | " \n",
83 | "
\n",
84 | "
"
85 | ],
86 | "text/plain": [
87 | " values\n",
88 | "0 64.2\n",
89 | "1 28.4\n",
90 | "2 85.3\n",
91 | "3 83.1\n",
92 | "4 13.4"
93 | ]
94 | },
95 | "execution_count": 3,
96 | "metadata": {},
97 | "output_type": "execute_result"
98 | }
99 | ],
100 | "source": [
101 | "data.A.head()"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "We build a `leAverage` instance from `after` tools that we call `ab_test`. \n",
109 | "`leAverage` is in `after` tools as we compute it after getting results so, after the AB test ran."
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": 4,
115 | "metadata": {},
116 | "outputs": [],
117 | "source": [
118 | "ab_test = after.leAverage(data.A, data.B)"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": 5,
124 | "metadata": {},
125 | "outputs": [
126 | {
127 | "data": {
128 | "text/plain": [
129 | "[34.75214007684581, 81.59785992315418]"
130 | ]
131 | },
132 | "execution_count": 5,
133 | "metadata": {},
134 | "output_type": "execute_result"
135 | }
136 | ],
137 | "source": [
138 | "ab_test.sample_A.confidence_interval"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {},
144 | "source": [
145 | "We have acces to interstings info such as the wanted sample *confidence interval* or the *p value* for example etc"
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": 6,
151 | "metadata": {},
152 | "outputs": [
153 | {
154 | "name": "stdout",
155 | "output_type": "stream",
156 | "text": [
157 | "Sample A mean is greater\n"
158 | ]
159 | }
160 | ],
161 | "source": [
162 | "ab_test.get_verdict()"
163 | ]
164 | },
165 | {
166 | "cell_type": "markdown",
167 | "metadata": {},
168 | "source": [
169 | "At this default value of confidence level (95% by default), we can conclude that A mean is greater than B mean. \n",
170 | "`A` variation wins !"
171 | ]
172 | }
173 | ],
174 | "metadata": {
175 | "file_extension": ".py",
176 | "kernelspec": {
177 | "display_name": "Python 3",
178 | "language": "python",
179 | "name": "python3"
180 | },
181 | "language_info": {
182 | "codemirror_mode": {
183 | "name": "ipython",
184 | "version": 3
185 | },
186 | "file_extension": ".py",
187 | "mimetype": "text/x-python",
188 | "name": "python",
189 | "nbconvert_exporter": "python",
190 | "pygments_lexer": "ipython3",
191 | "version": "3.6.9"
192 | },
193 | "mimetype": "text/x-python",
194 | "name": "python",
195 | "npconvert_exporter": "python",
196 | "pygments_lexer": "ipython3",
197 | "version": 3
198 | },
199 | "nbformat": 4,
200 | "nbformat_minor": 4
201 | }
202 |
--------------------------------------------------------------------------------
/leab/after/leAverage.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import scipy.special as sc
4 | import statistics
5 |
6 |
7 | class TTestSample:
8 | def __init__(self, sample: pd.DataFrame, confidence_level: float):
9 | self.sample = sample
10 | self.confidence_level = confidence_level
11 | self.compute()
12 |
13 | def compute(self) -> None:
14 | self.get_mean()
15 | self.get_std()
16 | self.get_count()
17 | self.get_variance()
18 | self.get_mean_stddev()
19 | self.get_nu()
20 | self.get_confidence_interval()
21 | self.get_mean_difference_with_confidence_interval()
22 |
23 | def get_mean(self) -> None:
24 | self.mean = np.mean(self.sample)
25 |
26 | def get_std(self) -> None:
27 | # using the std from Pandas doesn't match the expected result
28 | # we use stdev from statistics instead
29 | # (https://stackoverflow.com/questions/24984178/different-std-in-pandas-vs-numpy)
30 | self.std = statistics.stdev(self.sample.iloc[:, 0].tolist())
31 |
32 | def get_count(self) -> None:
33 | self.count_elt = len(self.sample)
34 |
35 | def get_variance(self) -> None:
36 | self.variance = self.std * self.std
37 |
38 | def get_mean_stddev(self) -> None:
39 | self.mean_stddev = np.sqrt(self.variance / self.count_elt)
40 |
41 | def get_nu(self) -> None:
42 | self.nu = self.count_elt - 1
43 |
44 | def get_invBeta(self) -> None:
45 | self.invBeta = sc.betaincinv(0.5 * self.nu, 0.5, 1 - self.confidence_level)
46 |
47 | def get_t(self) -> None:
48 | self.t = np.sqrt(self.nu / self.invBeta - self.nu)
49 |
50 | def get_confidence_interval(self) -> None:
51 | self.get_invBeta()
52 | self.get_t()
53 | self.conf_inf = self.mean - (self.t * self.mean_stddev)
54 | self.conf_sup = self.mean + (self.t * self.mean_stddev)
55 | self.confidence_interval = [self.conf_inf, self.conf_sup]
56 |
57 | def get_mean_difference_with_confidence_interval(self) -> None:
58 | self.mean_difference_with_confidence_interval = (
59 | self.mean - self.confidence_interval[0]
60 | )
61 |
62 | def plot_distribution(self):
63 | pass
64 |
65 |
66 | class leAverage(TTestSample):
67 | """
68 | Build leAverage object.
69 |
70 | Parameters:
71 |
72 | sample_A (pd.DataFrame): A sample data.
73 | sample_B (pd.DataFrame): B sample data.
74 | confidence_level (float): desired confidence level, default : 95%.
75 |
76 | Example:
77 |
78 | ::
79 |
80 | >>> from leab import after
81 | >>> from leab import leDataset
82 |
83 | >>> data = leDataset.SampleLeAverage()
84 | >>> ab_test = after.leAverage(data.A, data.B)
85 |
86 | >>> ab_test.sample_A.confidence_interval
87 |
88 | [34.75214007684581, 81.59785992315418]
89 |
90 | >>> ab_test.get_verdict()
91 |
92 | 'Sample A mean is greater'
93 | """
94 | def __init__(
95 | self,
96 | sample_A: pd.DataFrame,
97 | sample_B: pd.DataFrame,
98 | confidence_level: float = 0.95,
99 | ):
100 | self.confidence_level = confidence_level
101 | self.sample_A = TTestSample(sample_A, confidence_level)
102 | self.sample_B = TTestSample(sample_B, confidence_level)
103 | self.compute()
104 |
105 | def compute(self) -> None:
106 | self._get_diff_mean()
107 | self._get_diff_variance()
108 | self._get_diff_df()
109 | self._get_diff_mean_stddev()
110 | self._get_t()
111 | self._get_x()
112 | self._get_p_value()
113 | self._get_d()
114 | self._get_SE()
115 |
116 | def _get_diff_mean(self) -> None:
117 | self.diff_mean = self.sample_A.mean - self.sample_B.mean
118 |
119 | def _get_diff_variance(self) -> None:
120 | self.diff_variance = (
121 | self.sample_A.variance / self.sample_A.count_elt
122 | + self.sample_B.variance / self.sample_B.count_elt
123 | )
124 |
125 | def _get_diff_df(self) -> None:
126 | self.diff_df = (
127 | self.diff_variance
128 | * self.diff_variance
129 | / (
130 | (self.sample_A.variance / self.sample_A.count_elt)
131 | * (self.sample_A.variance / self.sample_A.count_elt)
132 | / (self.sample_A.count_elt - 1)
133 | + (self.sample_B.variance / self.sample_B.count_elt)
134 | * (self.sample_B.variance / self.sample_B.count_elt)
135 | / (self.sample_B.count_elt - 1)
136 | )
137 | )
138 |
139 | def _get_diff_mean_stddev(self) -> None:
140 | self.diff_mean_stddev = np.sqrt(self.diff_variance)
141 |
142 | def _get_t(self) -> None:
143 | self.t = self.diff_mean / self.diff_mean_stddev
144 |
145 | def _get_x(self) -> None:
146 | self.x = self.diff_df / (self.diff_df + self.t * self.t)
147 |
148 | def _get_p_value(self) -> None:
149 | self.p_value = sc.betainc(self.diff_df / 2, 0.5, self.x)
150 |
151 | def _get_d(self) -> None:
152 | self.d = self.diff_mean
153 |
154 | def _get_SE(self) -> None:
155 | self.SE = self.diff_mean_stddev
156 |
157 | def plot_difference_of_means(self):
158 | pass
159 |
160 | def get_verdict(self) -> None:
161 | if self.p_value < 1 - self.confidence_level:
162 | if self.sample_A.mean > self.sample_B.mean:
163 | return("Sample A mean is greater")
164 | else:
165 | return("Sample B mean is greater")
166 | else:
167 | return("No significant difference")
168 |
--------------------------------------------------------------------------------
/leab/before/leSample.py:
--------------------------------------------------------------------------------
1 |
2 | import numpy as np
3 | from scipy.stats import norm
4 |
5 |
6 | class leSample:
7 | """
8 | Build leSample object.
9 |
10 | Parameters:
11 |
12 | conversion_rate (float): baseline conversion rate.
13 | min_detectable_effect (float): minimum detectable effect.
14 | significance_level (float): alpha, percent of the time a difference will be detected, assuming one does NOT exist.
15 | statistical_power (float): 1-beta, percent of the time the minimum effect size will be detected, assuming it exists.
16 |
17 | Example:
18 |
19 | ::
20 |
21 | >>> from leab import before
22 |
23 | >>> ab_test = before.leSample(conversion_rate=20,
24 | ... min_detectable_effect=2)
25 | >>> ab_test.get_size_per_variation()
26 | 6347
27 |
28 | >>> ab_test.get_duration(avg_daily_total_visitor=1000)
29 | 13
30 | """
31 | def __init__(
32 | self,
33 | conversion_rate: float,
34 | min_detectable_effect: float,
35 | significance_level: float = 0.05,
36 | statistical_power: float = 0.8,
37 | absolute: bool = True,
38 | ):
39 | self.conversion_rate = conversion_rate / 100
40 | self.absolute = absolute
41 | self.min_detectable_effect = min_detectable_effect / 100
42 | self.absolute_or_relative()
43 | self.significance_level = significance_level
44 | self.statistical_power = statistical_power
45 | self.alpha = significance_level
46 | self.beta = 1 - statistical_power
47 | self.n = None
48 | self.size = self.get_size_per_variation()
49 |
50 | def absolute_or_relative(self) -> None:
51 | """
52 | Set up the min_detectable_effect absolute value or relative to conversion_rate.
53 | """
54 | if self.absolute:
55 | self.min_detectable_effect = self.min_detectable_effect
56 | else:
57 | self.min_detectable_effect = (
58 | self.conversion_rate * self.min_detectable_effect
59 | )
60 |
61 | @staticmethod
62 | def compute_z_score(alpha: float) -> float:
63 | """
64 | Compute z score from alpha value.
65 |
66 | Parameters:
67 |
68 | alpha (float): required alpha value (alpha should already fit the required test).
69 |
70 | Returns:
71 |
72 | Z-score.
73 | """
74 | return norm.ppf(alpha)
75 |
76 | def _get_z_1(self) -> None:
77 | self.significance = 1 - (self.alpha / 2)
78 | self.z_1 = self.compute_z_score(self.significance)
79 |
80 | def _get_z_2(self) -> None:
81 | self.power = 1 - self.beta
82 | self.z_2 = self.compute_z_score(self.power)
83 |
84 | def _get_zs(self) -> None:
85 | self._get_z_1()
86 | self._get_z_2()
87 |
88 | def _get_sd1(self) -> None:
89 | """
90 | Compute standard deviation v1.
91 | p-baseline conversion rate which is our estimated p and d-minimum detectable change.
92 | """
93 | self.sd1 = np.sqrt(2 * self.conversion_rate * (1 - self.conversion_rate))
94 |
95 | def _get_sd2(self) -> None:
96 | """
97 | Compute standard deviation v1.
98 | p-baseline conversion rate which is our estimated p and d-minimum detectable change.
99 | """
100 | self.sd2 = np.sqrt(
101 | self.conversion_rate * (1 - self.conversion_rate)
102 | + (self.conversion_rate + self.min_detectable_effect)
103 | * (1 - (self.conversion_rate + self.min_detectable_effect))
104 | )
105 |
106 | def _get_sds(self) -> None:
107 | self._get_sd1()
108 | self._get_sd2()
109 |
110 | def _compute_n(self) -> None:
111 | self.n = int(
112 | np.round(
113 | ((self.z_1 * self.sd1 + self.z_2 * self.sd2) ** 2)
114 | / (self.min_detectable_effect ** 2)
115 | )
116 | )
117 |
118 | def get_size_per_variation(self) -> int:
119 | """
120 | Calls all methods used to get the size needed per group to get significance on the test.
121 |
122 | Returns:
123 |
124 | Minimum sample size required per group according to metric denominator.
125 | """
126 | self._get_zs()
127 | self._get_sds()
128 | self._compute_n()
129 | return self.n
130 |
131 | def get_total_size(self) -> int:
132 | """
133 | Calls all methods used to get the total size needed to get significance on the test.
134 |
135 | Returns:
136 |
137 | Minimum total sample size required according to metric denominator.
138 | """
139 | self.total_sample_size = self.n * 2
140 | return self.total_sample_size
141 |
142 | def get_duration(self, avg_daily_total_visitor: int, nb_split: int = 2) -> int:
143 | """
144 | Compute the estimate duration in day needed to get significance on the test.
145 |
146 | Parameters:
147 |
148 | avg_daily_total_visitor (int): The first parameter.
149 | nb_split (int): The second parameter.
150 |
151 | Returns:
152 |
153 | Return the estimate duration in day needed to get significance on the test.
154 | """
155 | self.avg_daily_total_visitor = avg_daily_total_visitor
156 | self.nb_split = nb_split
157 | if self.n:
158 | self.duration = int(
159 | np.round(self.n / (self.avg_daily_total_visitor / self.nb_split))
160 | )
161 | else:
162 | self.get_size_per_variation()
163 | self.duration = int(
164 | np.round(self.n / (self.avg_daily_total_visitor / self.nb_split))
165 | )
166 | return self.duration
167 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
8 |
9 |
10 |
11 |
12 |
13 | le AB is a Python library for AB testing analysis.
14 |
15 |
16 | ## ⚡️ Quick start
17 |
18 | [](https://colab.research.google.com/drive/1R3z8uoCvpqhVfH0X6eU_-TaRRPbKUqbM?usp=sharing)
19 |
20 | Before launching your AB test, you can compute the needed **sample size** per variation :
21 |
22 | ```python
23 | >>> from leab import before
24 |
25 | >>> ab_test = before.leSample(conversion_rate=20,
26 | ... min_detectable_effect=2)
27 | >>> ab_test.get_size_per_variation()
28 |
29 | 6347
30 | ```
31 |
32 | After reaching the needed sample size, you can **compare means** obtained from A VS B :
33 |
34 | ```python
35 | >>> from leab import after
36 | >>> from leab import leDataset
37 |
38 | >>> data = leDataset.SampleLeAverage()
39 |
40 | >>> ab_test = after.leAverage(data.A, data.B)
41 | >>> teab_testst.get_verdict()
42 |
43 | 'Sample A mean is greater'
44 | ```
45 |
46 | ## 🛠 Installation
47 |
48 | 🐍 You need to install **Python 3.12** or above.
49 |
50 | Installation can be done by using `pip`.
51 | There are [wheels available](https://pypi.org/project/leab/#files) for **Linux**, **MacOS**, and **Windows**.
52 |
53 | ```bash
54 | pip install leab
55 | ```
56 |
57 | You can also install the latest development version as so:
58 |
59 | ```bash
60 | pip install git+https://github.com/tlentali/leab
61 |
62 | # Or, through SSH:
63 | pip install git+ssh://git@github.com/tlentali/leab.git
64 | ```
65 |
66 | ## 🥄 Philosophy
67 |
68 | > "*Life is a sum of all our choices.*"
69 | > **Albert Camus**
70 |
71 | Get ready to take a decision !
72 |
73 | AB testing has never been more popular, especially on Internet based companies.
74 | Even if each test is unique, some questions seem to be asked again and again :
75 |
76 | - when is my test going to be statistically significant ?
77 | - is A more successful than B ?
78 | - does A generate more than B ?
79 |
80 | Strong statistical knowledge are required to handle it from start to end correctly.
81 | To answer those questions in a simple and robust way, we built `le AB`.
82 | Lets Python do AB testing analysis !
83 |
84 | ## 🔥 Features
85 |
86 | Here are some benefits of using `Le AB` :
87 |
88 | - **Sample size** : How many subjects are needed for my AB test ?
89 | - **Test duration** : How many days are needed for my AB test ?
90 | - **Rate of success** : Does the rate of success differ across two groups ?
91 | - **Average value** : Does the average value differ across two groups ?
92 |
93 | ## 🔗 Useful links
94 |
95 | - [Documentation](https://tlentali.github.io/leab/)
96 | - [API reference](https://tlentali.github.io/leab/reference/leAverage/)
97 | - [Issue tracker](https://github.com/tlentali/leab/issues)
98 |
99 | ## 🙏 Thanks
100 |
101 | This project takes its inspiration from [Evan Miller](https://www.evanmiller.org/) great work, especially the following :
102 |
103 | - [Sample Size Calculator](https://www.evanmiller.org/ab-testing/sample-size.html)
104 | - [Chi-Squared Test](https://www.evanmiller.org/ab-testing/chi-squared.html)
105 | - [2 Sample T-Test](https://www.evanmiller.org/ab-testing/t-test.html)
106 |
107 | Thank you so much Evan M. for your work, it saved our lives so many times !
108 |
109 | A big thanks to [Max Halford](https://maxhalford.github.io/) too, who inspired us in the structure of this project, particularly for docs and tests.
110 | Have a look at [Creme-ml](https://github.com/creme-ml/creme), it's just amazingly done !
111 |
112 | To finish, thanks to all of you who use or are going to use this lib, hope it helps !
113 |
114 | ## 🖖 Contributing
115 |
116 | Feel free to contribute in any way you like, we're always open to new ideas and approaches. If you want to contribute to the code base please check out the [CONTRIBUTING.md](https://github.com/tlentali/leab/blob/master/CONTRIBUTING.md) file. Also take a look at the [issue tracker](https://github.com/tlentali/leab/issues) and see if anything takes your fancy.
117 |
118 | This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Again, contributions of any kind are welcome!
119 |
120 |
121 |
122 |
123 |
130 |
131 |
132 |
133 |
134 | ## 📜 License
135 |
136 | `le AB` is free and open-source software licensed under the [3-clause BSD license](https://github.com/tlentali/leab/blob/master/LICENSE).
137 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | *Pull requests, bug reports, and all other forms of contribution are welcomed and highly encouraged!* :octocat:
4 |
5 | ### Contents
6 |
7 | - [Code of Conduct](#book-code-of-conduct)
8 | - [Asking Questions](#bulb-asking-questions)
9 | - [Opening an Issue](#inbox_tray-opening-an-issue)
10 | - [Feature Requests](#love_letter-feature-requests)
11 | - [Triaging Issues](#mag-triaging-issues)
12 | - [Submitting Pull Requests](#repeat-submitting-pull-requests)
13 | - [Writing Commit Messages](#memo-writing-commit-messages)
14 | - [Code Review](#white_check_mark-code-review)
15 | - [Coding Style](#nail_care-coding-style)
16 | - [Certificate of Origin](#medal_sports-certificate-of-origin)
17 | - [Credits](#pray-credits)
18 |
19 | > **This guide serves to set clear expectations for everyone involved with the project so that we can improve it together while also creating a welcoming space for everyone to participate. Following these guidelines will help ensure a positive experience for contributors and maintainers.**
20 |
21 | ## :book: Code of Conduct
22 |
23 | Please review our [Code of Conduct](https://github.com/tlentali/leab/blob/master/CODE_OF_CONDUCT.md). It is in effect at all times. We expect it to be honored by everyone who contributes to this project. Acting like an asshole will not be tolerated.
24 |
25 | ## :inbox_tray: Opening an Issue
26 |
27 | Before [creating an issue](https://help.github.com/en/github/managing-your-work-on-github/creating-an-issue), check if you are using the latest version of the project. If you are not up-to-date, see if updating fixes your issue first.
28 |
29 | ### :lock: Reporting Security Issues
30 |
31 | Review our [Security Policy](https://github.com/tlentali/leab/blob/master/SECURITY.md). **Do not** file a public issue for security vulnerabilities.
32 |
33 | ### :beetle: Bug Reports and Other Issues
34 |
35 | A great way to contribute to the project is to send a detailed issue when you encounter a problem. We always appreciate a well-written, thorough bug report. :v:
36 |
37 | In short, since you are most likely a developer, **provide a ticket that you would like to receive**.
38 |
39 | - **Review the [documentation](https://tlentali.github.io/leab/)** before opening a new issue.
40 |
41 | - **Do not open a duplicate issue!** Search through existing issues to see if your issue has previously been reported. If your issue exists, comment with any additional information you have. You may simply note "I have this problem too", which helps prioritize the most common problems and requests.
42 |
43 | - **Prefer using [reactions](https://github.blog/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/)**, not comments, if you simply want to "+1" an existing issue.
44 |
45 | - **Fully complete the provided issue template.** The bug report template requests all the information we need to quickly and efficiently address your issue. Be clear, concise, and descriptive. Provide as much information as you can, including steps to reproduce, stack traces, compiler errors, library versions, OS versions, and screenshots (if applicable).
46 |
47 | - **Use [GitHub-flavored Markdown](https://help.github.com/en/github/writing-on-github/basic-writing-and-formatting-syntax).** Especially put code blocks and console outputs in backticks (```). This improves readability.
48 |
49 | ## :love_letter: Feature Requests
50 |
51 | Feature requests are welcome! While we will consider all requests, we cannot guarantee your request will be accepted. We want to avoid [feature creep](https://en.wikipedia.org/wiki/Feature_creep). Your idea may be great, but also out-of-scope for the project. If accepted, we cannot make any commitments regarding the timeline for implementation and release. However, you are welcome to submit a pull request to help!
52 |
53 | - **Do not open a duplicate feature request.** Search for existing feature requests first. If you find your feature (or one very similar) previously requested, comment on that issue.
54 |
55 | - **Fully complete the provided issue template.** The feature request template asks for all necessary information for us to begin a productive conversation.
56 |
57 | - Be precise about the proposed outcome of the feature and how it relates to existing features. Include implementation details if possible.
58 |
59 | ## :mag: Triaging Issues
60 |
61 | You can triage issues which may include reproducing bug reports or asking for additional information, such as version numbers or reproduction instructions. Any help you can provide to quickly resolve an issue is very much appreciated!
62 |
63 | ## :repeat: Submitting Pull Requests
64 |
65 | We **love** pull requests! Before [forking the repo](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) and [creating a pull request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/proposing-changes-to-your-work-with-pull-requests) for non-trivial changes, it is usually best to first open an issue to discuss the changes, or discuss your intended approach for solving the problem in the comments for an existing issue.
66 |
67 | For most contributions, after your first pull request is accepted and merged, you will be [invited to the project](https://help.github.com/en/github/setting-up-and-managing-your-github-user-account/inviting-collaborators-to-a-personal-repository) and given **push access**. :tada:
68 |
69 | *Note: All contributions will be licensed under the project's license.*
70 |
71 | - **Smaller is better.** Submit **one** pull request per bug fix or feature. A pull request should contain isolated changes pertaining to a single bug fix or feature implementation. **Do not** refactor or reformat code that is unrelated to your change. It is better to **submit many small pull requests** rather than a single large one. Enormous pull requests will take enormous amounts of time to review, or may be rejected altogether.
72 |
73 | - **Coordinate bigger changes.** For large and non-trivial changes, open an issue to discuss a strategy with the maintainers. Otherwise, you risk doing a lot of work for nothing!
74 |
75 | - **Prioritize understanding over cleverness.** Write code clearly and concisely. Remember that source code usually gets written once and read often. Ensure the code is clear to the reader. The purpose and logic should be obvious to a reasonably skilled developer, otherwise you should add a comment that explains it.
76 |
77 | - **Follow existing coding style and conventions.** Keep your code consistent with the style, formatting, and conventions in the rest of the code base. When possible, these will be enforced with a linter. Consistency makes it easier to review and modify in the future.
78 |
79 | - **Include test coverage.** Add unit tests or UI tests when possible. Follow existing patterns for implementing tests.
80 |
81 | - **Update the example project** if one exists to exercise any new functionality you have added.
82 |
83 | - **Add documentation.** Document your changes with code doc comments or in existing guides.
84 |
85 | - **Update the CHANGELOG** for all enhancements and bug fixes. Include the corresponding issue number if one exists, and your GitHub username. (example: "- Fixed crash in profile view. #123 @jessesquires")
86 |
87 | - **Use the repo's default branch.** Branch from and [submit your pull request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) to the repo's default branch. Usually this is `main`, but it could be `dev`, `develop`, or `master`.
88 |
89 | - **[Resolve any merge conflicts](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/resolving-a-merge-conflict-on-github)** that occur.
90 |
91 | - **Promptly address any CI failures**. If your pull request fails to build or pass tests, please push another commit to fix it.
92 |
93 | - When writing comments, use properly constructed sentences, including punctuation.
94 |
95 | - Use spaces, not tabs.
96 |
97 | ## :memo: Writing Commit Messages
98 |
99 | Please [write a great commit message](https://chris.beams.io/posts/git-commit/).
100 |
101 | 1. Separate subject from body with a blank line
102 | 1. Limit the subject line to 50 characters
103 | 1. Capitalize the subject line
104 | 1. Do not end the subject line with a period
105 | 1. Use the imperative mood in the subject line (example: "Fix networking issue")
106 | 1. Wrap the body at about 72 characters
107 | 1. Use the body to explain **why**, *not what and how* (the code shows that!)
108 | 1. If applicable, prefix the title with the relevant component name. (examples: "[Docs] Fix typo", "[Profile] Fix missing avatar")
109 |
110 | ```
111 | [TAG] Short summary of changes in 50 chars or less
112 |
113 | Add a more detailed explanation here, if necessary. Possibly give
114 | some background about the issue being fixed, etc. The body of the
115 | commit message can be several paragraphs. Further paragraphs come
116 | after blank lines and please do proper word-wrap.
117 |
118 | Wrap it to about 72 characters or so. In some contexts,
119 | the first line is treated as the subject of the commit and the
120 | rest of the text as the body. The blank line separating the summary
121 | from the body is critical (unless you omit the body entirely);
122 | various tools like `log`, `shortlog` and `rebase` can get confused
123 | if you run the two together.
124 |
125 | Explain the problem that this commit is solving. Focus on why you
126 | are making this change as opposed to how or what. The code explains
127 | how or what. Reviewers and your future self can read the patch,
128 | but might not understand why a particular solution was implemented.
129 | Are there side effects or other unintuitive consequences of this
130 | change? Here's the place to explain them.
131 |
132 | - Bullet points are okay, too
133 |
134 | - A hyphen or asterisk should be used for the bullet, preceded
135 | by a single space, with blank lines in between
136 |
137 | Note the fixed or relevant GitHub issues at the end:
138 |
139 | Resolves: #123
140 | See also: #456, #789
141 | ```
142 |
143 | ## :white_check_mark: Code Review
144 |
145 | - **Review the code, not the author.** Look for and suggest improvements without disparaging or insulting the author. Provide actionable feedback and explain your reasoning.
146 |
147 | - **You are not your code.** When your code is critiqued, questioned, or constructively criticized, remember that you are not your code. Do not take code review personally.
148 |
149 | - **Always do your best.** No one writes bugs on purpose. Do your best, and learn from your mistakes.
150 |
151 | - Kindly note any violations to the guidelines specified in this document.
152 |
153 | ## :nail_care: Coding Style
154 |
155 | Consistency is the most important. Following the existing style, formatting, and naming conventions of the file you are modifying and of the overall project. Failure to do so will result in a prolonged review process that has to focus on updating the superficial aspects of your code, rather than improving its functionality and performance.
156 |
157 | For example, if all private properties are prefixed with an underscore `_`, then new ones you add should be prefixed in the same way. Or, if methods are named using camelcase, like `thisIsMyNewMethod`, then do not diverge from that by writing `this_is_my_new_method`. You get the idea. If in doubt, please ask or search the codebase for something similar.
158 |
159 | When possible, style and format will be enforced with a linter.
160 |
161 | ## :medal_sports: Certificate of Origin
162 |
163 | *Developer's Certificate of Origin 1.1*
164 |
165 | By making a contribution to this project, I certify that:
166 |
167 | > 1. The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or
168 | > 1. The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or
169 | > 1. The contribution was provided directly to me by some other person who certified (1), (2) or (3) and I have not modified it.
170 | > 1. I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved.
171 |
172 | ## [No Brown M&M's](https://en.wikipedia.org/wiki/Van_Halen#Contract_riders)
173 |
174 | If you are reading this, bravo dear user and (hopefully) contributor for making it this far! You are awesome. :100:
175 |
176 | To confirm that you have read this guide and are following it as best as possible, **include this emoji at the top** of your issue or pull request: :black_heart: `:black_heart:`
177 |
178 | ## :pray: Credits
179 |
180 | **Please feel free to adopt this guide in your own projects. Fork it wholesale or remix it for your needs.**
181 |
182 | *Many of the ideas and prose for the statements in this document were based on or inspired by work from the following communities:*
183 |
184 | - [@jessesquires](https://github.com/jessesquires/.github/blob/main/CODE_OF_CONDUCT.md)
185 | - [Alamofire](https://github.com/Alamofire/Alamofire/blob/master/CONTRIBUTING.md)
186 | - [CocoaPods](https://github.com/CocoaPods/CocoaPods/blob/master/CONTRIBUTING.md)
187 | - [Docker](https://github.com/moby/moby/blob/master/CONTRIBUTING.md)
188 | - [Linux](https://elinux.org/Developer_Certificate_Of_Origin)
189 |
190 | *We commend them for their efforts to facilitate collaboration in their projects.*
191 |
--------------------------------------------------------------------------------
/leab/leDataset/data/data_chi2.csv:
--------------------------------------------------------------------------------
1 | groups;effect
2 | group_B;no
3 | group_A;no
4 | group_A;yes
5 | group_A;no
6 | group_A;no
7 | group_B;yes
8 | group_B;no
9 | group_B;yes
10 | group_B;yes
11 | group_B;yes
12 | group_B;yes
13 | group_B;no
14 | group_B;no
15 | group_B;yes
16 | group_B;no
17 | group_B;yes
18 | group_B;no
19 | group_B;no
20 | group_A;yes
21 | group_B;yes
22 | group_A;yes
23 | group_B;no
24 | group_A;no
25 | group_A;yes
26 | group_A;yes
27 | group_A;yes
28 | group_A;yes
29 | group_B;no
30 | group_B;yes
31 | group_A;yes
32 | group_B;no
33 | group_B;no
34 | group_A;yes
35 | group_B;yes
36 | group_B;no
37 | group_A;no
38 | group_B;yes
39 | group_B;no
40 | group_A;yes
41 | group_B;yes
42 | group_A;yes
43 | group_A;no
44 | group_B;yes
45 | group_A;yes
46 | group_B;no
47 | group_B;yes
48 | group_A;yes
49 | group_A;yes
50 | group_A;no
51 | group_B;no
52 | group_B;yes
53 | group_B;no
54 | group_B;yes
55 | group_B;no
56 | group_A;yes
57 | group_A;no
58 | group_A;no
59 | group_B;no
60 | group_A;no
61 | group_B;yes
62 | group_B;yes
63 | group_B;yes
64 | group_A;yes
65 | group_B;yes
66 | group_A;yes
67 | group_A;yes
68 | group_B;yes
69 | group_A;no
70 | group_B;no
71 | group_A;no
72 | group_B;no
73 | group_A;no
74 | group_B;yes
75 | group_A;no
76 | group_A;yes
77 | group_A;yes
78 | group_A;yes
79 | group_B;no
80 | group_A;no
81 | group_A;yes
82 | group_A;yes
83 | group_A;no
84 | group_A;no
85 | group_A;no
86 | group_B;no
87 | group_A;yes
88 | group_A;no
89 | group_A;no
90 | group_B;yes
91 | group_A;no
92 | group_B;yes
93 | group_A;yes
94 | group_B;yes
95 | group_B;no
96 | group_A;yes
97 | group_A;yes
98 | group_A;no
99 | group_B;yes
100 | group_A;no
101 | group_A;yes
102 | group_B;yes
103 | group_B;yes
104 | group_B;yes
105 | group_A;no
106 | group_B;yes
107 | group_A;no
108 | group_B;yes
109 | group_B;yes
110 | group_B;yes
111 | group_A;yes
112 | group_A;no
113 | group_B;no
114 | group_B;no
115 | group_B;no
116 | group_A;no
117 | group_A;yes
118 | group_B;yes
119 | group_B;yes
120 | group_A;yes
121 | group_A;no
122 | group_A;yes
123 | group_A;no
124 | group_B;yes
125 | group_A;yes
126 | group_B;yes
127 | group_A;no
128 | group_A;yes
129 | group_A;yes
130 | group_B;no
131 | group_A;no
132 | group_B;yes
133 | group_A;no
134 | group_A;no
135 | group_B;no
136 | group_B;no
137 | group_A;yes
138 | group_A;yes
139 | group_A;yes
140 | group_A;no
141 | group_A;no
142 | group_A;no
143 | group_A;no
144 | group_A;yes
145 | group_A;no
146 | group_A;no
147 | group_B;yes
148 | group_B;no
149 | group_B;no
150 | group_A;yes
151 | group_B;no
152 | group_A;no
153 | group_B;no
154 | group_B;no
155 | group_A;no
156 | group_A;yes
157 | group_B;yes
158 | group_B;yes
159 | group_A;no
160 | group_A;no
161 | group_B;no
162 | group_B;no
163 | group_A;yes
164 | group_B;yes
165 | group_B;yes
166 | group_B;no
167 | group_A;yes
168 | group_B;yes
169 | group_B;no
170 | group_A;no
171 | group_B;no
172 | group_B;no
173 | group_B;yes
174 | group_A;no
175 | group_A;no
176 | group_A;no
177 | group_B;no
178 | group_B;no
179 | group_A;yes
180 | group_B;yes
181 | group_A;yes
182 | group_A;yes
183 | group_B;no
184 | group_A;yes
185 | group_A;yes
186 | group_A;no
187 | group_B;no
188 | group_A;yes
189 | group_B;yes
190 | group_A;no
191 | group_B;yes
192 | group_A;yes
193 | group_B;no
194 | group_B;yes
195 | group_A;yes
196 | group_B;no
197 | group_B;yes
198 | group_B;no
199 | group_B;no
200 | group_A;no
201 | group_A;yes
202 | group_B;no
203 | group_A;no
204 | group_A;no
205 | group_B;yes
206 | group_B;no
207 | group_A;no
208 | group_A;yes
209 | group_B;yes
210 | group_B;no
211 | group_B;yes
212 | group_B;no
213 | group_B;yes
214 | group_A;no
215 | group_A;no
216 | group_A;no
217 | group_B;no
218 | group_A;yes
219 | group_A;yes
220 | group_B;yes
221 | group_B;no
222 | group_B;yes
223 | group_B;no
224 | group_A;yes
225 | group_B;no
226 | group_B;yes
227 | group_B;yes
228 | group_B;yes
229 | group_B;no
230 | group_A;no
231 | group_A;no
232 | group_B;yes
233 | group_A;no
234 | group_A;no
235 | group_B;yes
236 | group_B;no
237 | group_B;yes
238 | group_A;no
239 | group_B;yes
240 | group_B;no
241 | group_B;yes
242 | group_A;yes
243 | group_B;no
244 | group_A;no
245 | group_A;yes
246 | group_B;yes
247 | group_A;yes
248 | group_A;no
249 | group_B;no
250 | group_A;no
251 | group_B;yes
252 | group_B;yes
253 | group_B;yes
254 | group_A;no
255 | group_A;no
256 | group_B;yes
257 | group_B;yes
258 | group_A;no
259 | group_B;yes
260 | group_A;yes
261 | group_B;yes
262 | group_B;yes
263 | group_A;no
264 | group_A;no
265 | group_A;no
266 | group_B;no
267 | group_A;yes
268 | group_A;no
269 | group_B;no
270 | group_A;yes
271 | group_B;no
272 | group_A;yes
273 | group_A;yes
274 | group_B;yes
275 | group_A;yes
276 | group_A;yes
277 | group_B;no
278 | group_A;no
279 | group_A;no
280 | group_B;no
281 | group_A;yes
282 | group_A;no
283 | group_B;yes
284 | group_A;no
285 | group_B;no
286 | group_B;yes
287 | group_A;no
288 | group_A;yes
289 | group_B;yes
290 | group_A;no
291 | group_A;no
292 | group_A;no
293 | group_A;yes
294 | group_A;no
295 | group_A;yes
296 | group_A;yes
297 | group_B;yes
298 | group_A;yes
299 | group_A;no
300 | group_B;no
301 | group_B;yes
302 | group_B;no
303 | group_A;no
304 | group_B;yes
305 | group_A;no
306 | group_B;no
307 | group_A;yes
308 | group_A;no
309 | group_B;no
310 | group_A;yes
311 | group_B;no
312 | group_A;no
313 | group_B;no
314 | group_B;no
315 | group_A;no
316 | group_A;no
317 | group_A;yes
318 | group_A;no
319 | group_B;yes
320 | group_A;yes
321 | group_A;no
322 | group_B;yes
323 | group_A;yes
324 | group_A;no
325 | group_A;no
326 | group_A;yes
327 | group_B;no
328 | group_A;no
329 | group_B;no
330 | group_B;yes
331 | group_A;no
332 | group_B;yes
333 | group_B;no
334 | group_A;no
335 | group_B;yes
336 | group_B;no
337 | group_B;yes
338 | group_B;yes
339 | group_B;no
340 | group_A;no
341 | group_A;yes
342 | group_A;yes
343 | group_B;yes
344 | group_A;yes
345 | group_B;no
346 | group_B;no
347 | group_A;no
348 | group_A;yes
349 | group_B;no
350 | group_A;yes
351 | group_A;no
352 | group_A;yes
353 | group_B;yes
354 | group_A;no
355 | group_B;yes
356 | group_A;no
357 | group_B;no
358 | group_B;no
359 | group_B;yes
360 | group_A;yes
361 | group_A;no
362 | group_A;no
363 | group_A;no
364 | group_B;yes
365 | group_B;no
366 | group_A;yes
367 | group_A;yes
368 | group_A;yes
369 | group_B;yes
370 | group_B;no
371 | group_B;yes
372 | group_B;yes
373 | group_B;no
374 | group_B;yes
375 | group_A;yes
376 | group_A;no
377 | group_A;yes
378 | group_B;no
379 | group_A;yes
380 | group_A;no
381 | group_A;yes
382 | group_B;no
383 | group_A;yes
384 | group_B;yes
385 | group_B;yes
386 | group_A;no
387 | group_B;yes
388 | group_A;no
389 | group_A;no
390 | group_B;no
391 | group_A;no
392 | group_B;yes
393 | group_B;yes
394 | group_B;yes
395 | group_B;no
396 | group_B;yes
397 | group_A;yes
398 | group_B;no
399 | group_B;yes
400 | group_A;yes
401 | group_B;no
402 | group_A;no
403 | group_B;no
404 | group_A;no
405 | group_B;no
406 | group_A;yes
407 | group_A;no
408 | group_B;no
409 | group_A;no
410 | group_B;no
411 | group_B;yes
412 | group_A;yes
413 | group_A;no
414 | group_B;yes
415 | group_A;yes
416 | group_B;no
417 | group_A;yes
418 | group_B;no
419 | group_B;yes
420 | group_A;yes
421 | group_A;yes
422 | group_B;no
423 | group_B;no
424 | group_B;no
425 | group_B;no
426 | group_A;no
427 | group_B;no
428 | group_B;no
429 | group_A;yes
430 | group_B;yes
431 | group_A;no
432 | group_B;yes
433 | group_B;no
434 | group_A;no
435 | group_A;no
436 | group_B;no
437 | group_B;no
438 | group_B;yes
439 | group_B;no
440 | group_B;yes
441 | group_A;yes
442 | group_B;yes
443 | group_B;yes
444 | group_B;yes
445 | group_B;no
446 | group_A;yes
447 | group_A;yes
448 | group_B;yes
449 | group_A;yes
450 | group_A;no
451 | group_A;no
452 | group_B;yes
453 | group_A;yes
454 | group_B;no
455 | group_A;yes
456 | group_B;yes
457 | group_B;no
458 | group_A;yes
459 | group_B;no
460 | group_A;yes
461 | group_A;no
462 | group_A;yes
463 | group_B;yes
464 | group_B;yes
465 | group_A;no
466 | group_A;no
467 | group_A;yes
468 | group_B;no
469 | group_A;yes
470 | group_A;yes
471 | group_B;no
472 | group_A;no
473 | group_B;no
474 | group_A;yes
475 | group_A;no
476 | group_A;yes
477 | group_B;no
478 | group_B;yes
479 | group_A;no
480 | group_A;yes
481 | group_A;yes
482 | group_A;no
483 | group_A;no
484 | group_A;no
485 | group_A;no
486 | group_B;yes
487 | group_A;yes
488 | group_A;yes
489 | group_B;yes
490 | group_A;no
491 | group_B;no
492 | group_B;yes
493 | group_A;no
494 | group_B;yes
495 | group_A;yes
496 | group_A;yes
497 | group_B;yes
498 | group_B;yes
499 | group_B;yes
500 | group_B;no
501 | group_B;yes
502 | group_A;no
503 | group_B;yes
504 | group_B;yes
505 | group_B;yes
506 | group_B;no
507 | group_A;yes
508 | group_A;yes
509 | group_B;yes
510 | group_A;yes
511 | group_B;yes
512 | group_A;no
513 | group_B;yes
514 | group_B;yes
515 | group_A;yes
516 | group_A;yes
517 | group_B;no
518 | group_B;yes
519 | group_A;no
520 | group_B;yes
521 | group_B;yes
522 | group_B;yes
523 | group_B;no
524 | group_A;yes
525 | group_A;no
526 | group_B;yes
527 | group_A;no
528 | group_B;no
529 | group_A;no
530 | group_B;yes
531 | group_B;yes
532 | group_A;yes
533 | group_B;no
534 | group_B;no
535 | group_A;no
536 | group_B;yes
537 | group_A;no
538 | group_A;yes
539 | group_A;yes
540 | group_B;no
541 | group_B;no
542 | group_B;yes
543 | group_A;no
544 | group_B;no
545 | group_B;no
546 | group_B;no
547 | group_B;yes
548 | group_A;yes
549 | group_B;no
550 | group_A;yes
551 | group_A;no
552 | group_A;yes
553 | group_A;yes
554 | group_B;yes
555 | group_B;yes
556 | group_B;yes
557 | group_B;no
558 | group_B;yes
559 | group_A;yes
560 | group_B;no
561 | group_A;no
562 | group_B;yes
563 | group_A;yes
564 | group_A;yes
565 | group_B;no
566 | group_A;no
567 | group_B;yes
568 | group_A;yes
569 | group_B;no
570 | group_B;no
571 | group_B;yes
572 | group_A;yes
573 | group_B;no
574 | group_A;no
575 | group_A;yes
576 | group_B;yes
577 | group_A;yes
578 | group_B;yes
579 | group_A;no
580 | group_B;no
581 | group_B;yes
582 | group_B;yes
583 | group_B;no
584 | group_B;yes
585 | group_B;no
586 | group_B;yes
587 | group_A;yes
588 | group_B;yes
589 | group_B;no
590 | group_A;no
591 | group_A;no
592 | group_B;no
593 | group_B;yes
594 | group_B;yes
595 | group_A;yes
596 | group_A;no
597 | group_B;yes
598 | group_B;yes
599 | group_A;yes
600 | group_A;no
601 | group_A;no
602 | group_B;yes
603 | group_A;no
604 | group_A;no
605 | group_B;no
606 | group_A;no
607 | group_B;yes
608 | group_B;yes
609 | group_A;no
610 | group_B;no
611 | group_A;yes
612 | group_B;yes
613 | group_B;no
614 | group_A;no
615 | group_A;no
616 | group_A;yes
617 | group_A;no
618 | group_B;yes
619 | group_A;yes
620 | group_B;yes
621 | group_B;yes
622 | group_A;yes
623 | group_B;yes
624 | group_B;yes
625 | group_A;yes
626 | group_A;yes
627 | group_B;no
628 | group_B;no
629 | group_A;yes
630 | group_A;no
631 | group_B;no
632 | group_B;no
633 | group_A;no
634 | group_A;yes
635 | group_B;yes
636 | group_A;no
637 | group_A;no
638 | group_A;no
639 | group_A;yes
640 | group_A;yes
641 | group_A;yes
642 | group_B;yes
643 | group_B;no
644 | group_A;yes
645 | group_B;no
646 | group_B;yes
647 | group_B;no
648 | group_B;yes
649 | group_B;no
650 | group_A;yes
651 | group_A;yes
652 | group_B;no
653 | group_B;yes
654 | group_B;yes
655 | group_A;no
656 | group_B;yes
657 | group_A;no
658 | group_A;yes
659 | group_A;no
660 | group_A;yes
661 | group_A;no
662 | group_A;no
663 | group_A;no
664 | group_A;yes
665 | group_A;yes
666 | group_B;no
667 | group_A;no
668 | group_A;no
669 | group_B;no
670 | group_A;no
671 | group_B;yes
672 | group_A;no
673 | group_B;yes
674 | group_B;yes
675 | group_B;yes
676 | group_A;no
677 | group_B;yes
678 | group_A;yes
679 | group_B;no
680 | group_A;yes
681 | group_B;yes
682 | group_B;yes
683 | group_B;no
684 | group_B;no
685 | group_B;no
686 | group_B;no
687 | group_A;yes
688 | group_A;yes
689 | group_A;yes
690 | group_A;no
691 | group_B;no
692 | group_A;no
693 | group_B;no
694 | group_B;no
695 | group_B;no
696 | group_B;no
697 | group_B;yes
698 | group_A;yes
699 | group_A;yes
700 | group_A;no
701 | group_B;yes
702 | group_A;no
703 | group_B;yes
704 | group_A;no
705 | group_A;no
706 | group_A;no
707 | group_B;yes
708 | group_A;no
709 | group_B;no
710 | group_B;yes
711 | group_B;yes
712 | group_B;yes
713 | group_B;no
714 | group_B;yes
715 | group_B;yes
716 | group_A;no
717 | group_A;no
718 | group_A;no
719 | group_A;yes
720 | group_B;no
721 | group_A;no
722 | group_B;yes
723 | group_B;yes
724 | group_A;no
725 | group_B;yes
726 | group_B;no
727 | group_A;yes
728 | group_A;no
729 | group_A;no
730 | group_A;yes
731 | group_B;yes
732 | group_A;no
733 | group_B;no
734 | group_B;no
735 | group_A;no
736 | group_B;yes
737 | group_B;yes
738 | group_A;no
739 | group_A;no
740 | group_B;no
741 | group_A;yes
742 | group_B;yes
743 | group_B;no
744 | group_B;yes
745 | group_B;no
746 | group_A;no
747 | group_B;no
748 | group_A;no
749 | group_B;yes
750 | group_A;yes
751 | group_A;yes
752 | group_B;no
753 | group_A;yes
754 | group_A;yes
755 | group_B;yes
756 | group_A;no
757 | group_A;no
758 | group_B;no
759 | group_A;yes
760 | group_B;yes
761 | group_B;yes
762 | group_A;yes
763 | group_B;yes
764 | group_B;no
765 | group_A;yes
766 | group_A;yes
767 | group_B;no
768 | group_B;yes
769 | group_B;no
770 | group_B;yes
771 | group_A;no
772 | group_A;no
773 | group_A;no
774 | group_B;no
775 | group_B;no
776 | group_A;no
777 | group_A;yes
778 | group_B;yes
779 | group_B;no
780 | group_A;yes
781 | group_B;yes
782 | group_A;no
783 | group_B;no
784 | group_A;yes
785 | group_A;yes
786 | group_B;no
787 | group_A;no
788 | group_B;yes
789 | group_A;yes
790 | group_A;yes
791 | group_B;no
792 | group_A;yes
793 | group_A;yes
794 | group_A;no
795 | group_A;yes
796 | group_A;yes
797 | group_A;yes
798 | group_A;yes
799 | group_B;no
800 | group_A;yes
801 | group_A;yes
802 | group_B;no
803 | group_B;yes
804 | group_A;yes
805 | group_A;yes
806 | group_A;yes
807 | group_B;yes
808 | group_B;yes
809 | group_A;yes
810 | group_A;no
811 | group_A;yes
812 | group_B;yes
813 | group_B;yes
814 | group_A;no
815 | group_A;no
816 | group_B;no
817 | group_B;no
818 | group_B;no
819 | group_A;no
820 | group_B;no
821 | group_A;no
822 | group_A;yes
823 | group_B;no
824 | group_B;yes
825 | group_B;no
826 | group_A;yes
827 | group_B;yes
828 | group_A;no
829 | group_A;yes
830 | group_A;no
831 | group_B;no
832 | group_A;no
833 | group_B;yes
834 | group_A;no
835 | group_A;yes
836 | group_B;no
837 | group_B;no
838 | group_B;yes
839 | group_B;yes
840 | group_A;yes
841 | group_B;no
842 | group_B;no
843 | group_A;yes
844 | group_B;yes
845 | group_A;yes
846 | group_A;no
847 | group_A;yes
848 | group_B;yes
849 | group_B;no
850 | group_A;yes
851 | group_A;no
852 | group_B;yes
853 | group_B;no
854 | group_A;no
855 | group_B;yes
856 | group_A;yes
857 | group_B;no
858 | group_B;no
859 | group_B;no
860 | group_B;yes
861 | group_A;no
862 | group_B;yes
863 | group_B;yes
864 | group_A;yes
865 | group_B;no
866 | group_A;no
867 | group_B;yes
868 | group_B;yes
869 | group_B;yes
870 | group_A;no
871 | group_B;no
872 | group_A;yes
873 | group_B;no
874 | group_B;yes
875 | group_B;no
876 | group_B;yes
877 | group_B;yes
878 | group_A;yes
879 | group_A;no
880 | group_B;yes
881 | group_A;yes
882 | group_A;no
883 | group_B;yes
884 | group_B;no
885 | group_B;yes
886 | group_B;yes
887 | group_B;yes
888 | group_A;no
889 | group_B;no
890 | group_A;yes
891 | group_B;yes
892 | group_B;no
893 | group_A;no
894 | group_A;no
895 | group_B;no
896 | group_B;yes
897 | group_A;no
898 | group_A;yes
899 | group_B;yes
900 | group_B;yes
901 | group_A;no
902 | group_A;no
903 | group_A;no
904 | group_A;yes
905 | group_B;no
906 | group_B;yes
907 | group_B;no
908 | group_A;yes
909 | group_B;no
910 | group_B;yes
911 | group_B;yes
912 | group_B;no
913 | group_A;yes
914 | group_A;yes
915 | group_B;no
916 | group_B;no
917 | group_A;no
918 | group_A;no
919 | group_B;no
920 | group_B;yes
921 | group_B;no
922 | group_A;yes
923 | group_A;yes
924 | group_A;no
925 | group_A;yes
926 | group_B;no
927 | group_A;yes
928 | group_A;yes
929 | group_B;yes
930 | group_B;no
931 | group_A;yes
932 | group_B;no
933 | group_A;no
934 | group_A;no
935 | group_B;yes
936 | group_B;yes
937 | group_B;no
938 | group_B;yes
939 | group_A;no
940 | group_A;yes
941 | group_B;yes
942 | group_A;no
943 | group_B;no
944 | group_A;no
945 | group_B;no
946 | group_B;yes
947 | group_B;no
948 | group_B;no
949 | group_B;no
950 | group_A;yes
951 | group_B;yes
952 | group_B;no
953 | group_A;yes
954 | group_B;yes
955 | group_A;no
956 | group_A;yes
957 | group_A;no
958 | group_A;no
959 | group_B;yes
960 | group_A;yes
961 | group_B;no
962 | group_A;yes
963 | group_A;no
964 | group_A;yes
965 | group_B;yes
966 | group_B;yes
967 | group_A;yes
968 | group_A;no
969 | group_A;no
970 | group_A;no
971 | group_B;yes
972 | group_A;yes
973 | group_B;yes
974 | group_A;no
975 | group_B;yes
976 | group_A;no
977 | group_B;no
978 | group_B;yes
979 | group_A;yes
980 | group_A;yes
981 | group_B;no
982 | group_A;no
983 | group_B;yes
984 | group_B;no
985 | group_A;yes
986 | group_A;no
987 | group_B;yes
988 | group_A;yes
989 | group_B;no
990 | group_B;no
991 | group_A;yes
992 | group_B;yes
993 | group_A;no
994 | group_A;no
995 | group_A;no
996 | group_A;yes
997 | group_A;no
998 | group_B;yes
999 | group_B;no
1000 | group_B;yes
1001 | group_B;no
1002 | group_B;no
1003 | group_A;yes
1004 | group_B;no
1005 | group_A;no
1006 | group_B;yes
1007 | group_A;no
1008 | group_B;yes
1009 | group_B;yes
1010 | group_A;yes
1011 | group_A;yes
1012 | group_B;no
1013 | group_B;yes
1014 | group_B;yes
1015 | group_A;yes
1016 | group_A;no
1017 | group_A;yes
1018 | group_B;yes
1019 | group_A;yes
1020 | group_B;yes
1021 | group_A;yes
1022 | group_A;no
1023 | group_A;no
1024 | group_A;no
1025 | group_A;no
1026 | group_A;yes
1027 | group_B;no
1028 | group_A;yes
1029 | group_A;yes
1030 | group_A;no
1031 | group_A;yes
1032 | group_A;no
1033 | group_A;yes
1034 | group_B;yes
1035 | group_B;yes
1036 | group_B;no
1037 | group_B;yes
1038 | group_B;yes
1039 | group_A;yes
1040 | group_A;no
1041 | group_B;no
1042 | group_B;no
1043 | group_B;yes
1044 | group_A;no
1045 | group_B;yes
1046 | group_A;yes
1047 | group_B;no
1048 | group_A;yes
1049 | group_A;no
1050 | group_A;no
1051 | group_A;no
1052 | group_A;yes
1053 | group_A;no
1054 | group_A;no
1055 | group_A;no
1056 | group_B;yes
1057 | group_A;yes
1058 | group_B;no
1059 | group_B;no
1060 | group_A;yes
1061 | group_A;yes
1062 | group_A;no
1063 | group_A;yes
1064 | group_B;yes
1065 | group_B;no
1066 | group_A;yes
1067 | group_A;no
1068 | group_B;yes
1069 | group_A;yes
1070 | group_A;yes
1071 | group_B;no
1072 | group_B;no
1073 | group_A;no
1074 | group_B;yes
1075 | group_A;no
1076 | group_A;yes
1077 | group_A;yes
1078 | group_B;yes
1079 | group_B;yes
1080 | group_A;no
1081 | group_B;no
1082 | group_A;yes
1083 | group_B;no
1084 | group_A;no
1085 | group_A;no
1086 | group_B;no
1087 | group_A;no
1088 | group_B;yes
1089 | group_A;yes
1090 | group_B;yes
1091 | group_A;no
1092 | group_B;no
1093 | group_A;no
1094 | group_B;yes
1095 | group_A;yes
1096 | group_B;yes
1097 | group_A;no
1098 | group_A;no
1099 | group_B;no
1100 | group_A;yes
1101 | group_A;yes
1102 | group_B;yes
1103 | group_B;no
1104 | group_B;yes
1105 | group_A;yes
1106 | group_B;no
1107 | group_A;yes
1108 | group_B;no
1109 | group_A;no
1110 | group_B;no
1111 | group_B;no
1112 | group_A;yes
1113 | group_A;yes
1114 | group_B;no
1115 | group_B;yes
1116 | group_B;no
1117 | group_A;yes
1118 | group_B;yes
1119 | group_A;no
1120 | group_A;no
1121 | group_B;no
1122 | group_A;no
1123 | group_B;yes
1124 | group_B;yes
1125 | group_B;yes
1126 | group_A;no
1127 | group_B;yes
1128 | group_A;no
1129 | group_B;yes
1130 | group_A;no
1131 | group_B;yes
1132 | group_A;yes
1133 | group_A;no
1134 | group_B;yes
1135 | group_B;yes
1136 | group_A;no
1137 | group_A;yes
1138 | group_B;no
1139 | group_B;yes
1140 | group_B;yes
1141 | group_A;yes
1142 | group_B;yes
1143 | group_A;yes
1144 | group_B;yes
1145 | group_A;no
1146 | group_A;no
1147 | group_A;yes
1148 | group_A;no
1149 | group_A;no
1150 | group_A;yes
1151 | group_B;yes
1152 | group_A;yes
1153 | group_A;yes
1154 | group_B;yes
1155 | group_B;yes
1156 | group_B;yes
1157 | group_B;yes
1158 | group_B;no
1159 | group_A;no
1160 | group_A;yes
1161 | group_A;no
1162 | group_B;yes
1163 | group_A;no
1164 | group_B;yes
1165 | group_A;yes
1166 | group_B;yes
1167 | group_A;no
1168 | group_A;yes
1169 | group_B;no
1170 | group_B;yes
1171 | group_A;no
1172 | group_B;no
1173 | group_B;no
1174 | group_B;yes
1175 | group_A;yes
1176 | group_A;no
1177 | group_B;no
1178 | group_A;yes
1179 | group_B;no
1180 | group_B;yes
1181 | group_B;no
1182 | group_B;no
1183 | group_B;no
1184 | group_B;no
1185 | group_B;no
1186 | group_A;yes
1187 | group_A;yes
1188 | group_A;no
1189 | group_B;yes
1190 | group_A;no
1191 | group_B;no
1192 | group_A;yes
1193 | group_B;no
1194 | group_B;no
1195 | group_A;no
1196 | group_A;no
1197 | group_B;no
1198 | group_B;yes
1199 | group_B;no
1200 | group_B;no
1201 | group_A;no
1202 | group_A;no
1203 | group_B;yes
1204 | group_B;no
1205 | group_B;no
1206 | group_A;no
1207 | group_B;yes
1208 | group_B;no
1209 | group_A;no
1210 | group_B;no
1211 | group_B;yes
1212 | group_B;yes
1213 | group_B;yes
1214 | group_A;yes
1215 | group_A;no
1216 | group_B;no
1217 | group_A;no
1218 | group_A;no
1219 | group_B;yes
1220 | group_B;yes
1221 | group_B;yes
1222 | group_B;yes
1223 | group_A;yes
1224 | group_A;yes
1225 | group_A;yes
1226 | group_B;yes
1227 | group_A;yes
1228 | group_A;yes
1229 | group_B;yes
1230 | group_B;yes
1231 | group_A;yes
1232 | group_A;yes
1233 | group_A;no
1234 | group_A;yes
1235 | group_B;yes
1236 | group_B;no
1237 | group_A;no
1238 | group_B;no
1239 | group_A;no
1240 | group_A;yes
1241 | group_B;no
1242 | group_B;yes
1243 | group_A;yes
1244 | group_A;yes
1245 | group_A;yes
1246 | group_B;yes
1247 | group_A;yes
1248 | group_B;yes
1249 | group_B;yes
1250 | group_B;yes
1251 | group_A;no
1252 | group_A;no
1253 | group_B;no
1254 | group_B;no
1255 | group_B;no
1256 | group_A;no
1257 | group_B;no
1258 | group_A;yes
1259 | group_B;yes
1260 | group_B;no
1261 | group_A;no
1262 | group_B;no
1263 | group_B;no
1264 | group_A;no
1265 | group_A;yes
1266 | group_A;no
1267 | group_B;yes
1268 | group_A;yes
1269 | group_B;no
1270 | group_A;no
1271 | group_A;yes
1272 | group_B;yes
1273 | group_B;yes
1274 | group_B;no
1275 | group_A;no
1276 | group_A;yes
1277 | group_B;no
1278 | group_A;no
1279 | group_A;no
1280 | group_B;yes
1281 | group_A;no
1282 | group_B;no
1283 | group_A;yes
1284 | group_B;no
1285 | group_A;no
1286 | group_B;yes
1287 | group_A;no
1288 | group_B;yes
1289 | group_B;no
1290 | group_B;yes
1291 | group_A;no
1292 | group_A;no
1293 | group_B;yes
1294 | group_B;yes
1295 | group_B;no
1296 | group_A;yes
1297 | group_A;no
1298 | group_A;yes
1299 | group_A;yes
1300 | group_B;yes
1301 | group_A;no
1302 | group_A;no
1303 | group_A;yes
1304 | group_A;yes
1305 | group_B;yes
1306 | group_A;no
1307 | group_B;no
1308 | group_B;no
1309 | group_A;no
1310 | group_A;no
1311 | group_B;yes
1312 | group_A;yes
1313 | group_A;no
1314 | group_A;no
1315 | group_A;no
1316 | group_A;yes
1317 | group_B;no
1318 | group_B;no
1319 | group_B;no
1320 | group_A;yes
1321 | group_B;no
1322 | group_A;yes
1323 | group_A;no
1324 | group_A;yes
1325 | group_B;no
1326 | group_A;no
1327 | group_A;yes
1328 | group_A;yes
1329 | group_B;no
1330 | group_B;no
1331 | group_B;yes
1332 | group_B;no
1333 | group_B;yes
1334 | group_A;no
1335 | group_A;yes
1336 | group_B;yes
1337 | group_A;no
1338 | group_B;yes
1339 | group_B;no
1340 | group_A;yes
1341 | group_B;no
1342 | group_A;yes
1343 | group_A;yes
1344 | group_B;yes
1345 | group_A;yes
1346 | group_B;no
1347 | group_A;no
1348 | group_A;yes
1349 | group_A;yes
1350 | group_A;yes
1351 | group_A;yes
1352 | group_B;yes
1353 | group_B;no
1354 | group_B;no
1355 | group_B;no
1356 | group_B;no
1357 | group_A;yes
1358 | group_A;no
1359 | group_B;no
1360 | group_A;yes
1361 | group_A;yes
1362 | group_B;yes
1363 | group_B;no
1364 | group_A;yes
1365 | group_B;no
1366 | group_A;yes
1367 | group_A;no
1368 | group_B;no
1369 | group_B;yes
1370 | group_B;no
1371 | group_A;yes
1372 | group_B;no
1373 | group_A;yes
1374 | group_A;no
1375 | group_B;no
1376 | group_A;yes
1377 | group_A;no
1378 | group_B;no
1379 | group_B;no
1380 | group_B;no
1381 | group_B;yes
1382 | group_A;yes
1383 | group_B;no
1384 | group_B;yes
1385 | group_A;no
1386 | group_A;no
1387 | group_A;yes
1388 | group_A;yes
1389 | group_A;no
1390 | group_B;no
1391 | group_A;yes
1392 | group_A;yes
1393 | group_A;no
1394 | group_B;yes
1395 | group_A;no
1396 | group_B;no
1397 | group_A;no
1398 | group_B;no
1399 | group_B;no
1400 | group_B;yes
1401 | group_A;no
1402 | group_A;yes
1403 | group_B;no
1404 | group_B;no
1405 | group_B;no
1406 | group_A;yes
1407 | group_B;no
1408 | group_A;no
1409 | group_A;yes
1410 | group_A;no
1411 | group_A;no
1412 | group_A;yes
1413 | group_B;yes
1414 | group_B;yes
1415 | group_A;no
1416 | group_A;no
1417 | group_B;no
1418 | group_B;no
1419 | group_B;no
1420 | group_B;no
1421 | group_B;no
1422 | group_A;no
1423 | group_B;yes
1424 | group_B;no
1425 | group_A;yes
1426 | group_B;no
1427 | group_B;yes
1428 | group_A;no
1429 | group_A;no
1430 | group_B;yes
1431 | group_B;yes
1432 | group_B;no
1433 | group_A;no
1434 | group_B;no
1435 | group_B;no
1436 | group_B;no
1437 | group_B;no
1438 | group_A;no
1439 | group_B;yes
1440 | group_B;no
1441 | group_B;yes
1442 | group_A;yes
1443 | group_A;no
1444 | group_B;no
1445 | group_A;no
1446 | group_B;yes
1447 | group_A;yes
1448 | group_A;no
1449 | group_A;yes
1450 | group_A;yes
1451 | group_A;no
1452 | group_A;no
1453 | group_A;no
1454 | group_A;no
1455 | group_A;yes
1456 | group_A;no
1457 | group_A;no
1458 | group_B;yes
1459 | group_A;yes
1460 | group_B;yes
1461 | group_A;no
1462 | group_A;yes
1463 | group_B;no
1464 | group_A;no
1465 | group_A;no
1466 | group_A;yes
1467 | group_A;no
1468 | group_B;no
1469 | group_B;yes
1470 | group_B;yes
1471 | group_B;no
1472 | group_B;no
1473 | group_B;yes
1474 | group_A;no
1475 | group_B;no
1476 | group_B;yes
1477 | group_B;no
1478 | group_B;no
1479 | group_A;no
1480 | group_B;yes
1481 | group_A;yes
1482 | group_A;no
1483 | group_B;no
1484 | group_A;yes
1485 | group_A;yes
1486 | group_B;no
1487 | group_B;yes
1488 | group_B;yes
1489 | group_A;no
1490 | group_B;no
1491 | group_A;no
1492 | group_A;yes
1493 | group_A;no
1494 | group_A;no
1495 | group_A;yes
1496 | group_B;yes
1497 | group_B;yes
1498 | group_B;no
1499 | group_A;no
1500 | group_A;yes
1501 | group_A;yes
1502 | group_A;no
1503 | group_B;yes
1504 | group_B;no
1505 | group_B;no
1506 | group_A;yes
1507 | group_B;yes
1508 | group_B;no
1509 | group_A;no
1510 | group_B;yes
1511 | group_A;no
1512 | group_B;yes
1513 | group_B;no
1514 | group_B;yes
1515 | group_A;no
1516 | group_A;yes
1517 | group_B;yes
1518 | group_A;no
1519 | group_B;yes
1520 | group_A;no
1521 | group_B;yes
1522 | group_A;no
1523 | group_B;no
1524 | group_B;yes
1525 | group_B;yes
1526 | group_B;yes
1527 | group_A;yes
1528 | group_A;yes
1529 | group_B;yes
1530 | group_A;yes
1531 | group_B;no
1532 | group_A;yes
1533 | group_B;no
1534 | group_A;no
1535 | group_B;yes
1536 | group_A;no
1537 | group_A;no
1538 | group_B;no
1539 | group_B;no
1540 | group_B;yes
1541 | group_A;no
1542 | group_A;no
1543 | group_B;yes
1544 | group_B;yes
1545 | group_B;yes
1546 | group_B;no
1547 | group_A;yes
1548 | group_A;yes
1549 | group_B;yes
1550 | group_B;yes
1551 | group_A;no
1552 | group_A;no
1553 | group_A;yes
1554 | group_B;no
1555 | group_A;no
1556 | group_A;no
1557 | group_A;yes
1558 | group_B;yes
1559 | group_B;yes
1560 | group_A;yes
1561 | group_B;yes
1562 | group_A;no
1563 | group_A;no
1564 | group_A;yes
1565 | group_A;yes
1566 | group_A;no
1567 | group_B;yes
1568 | group_B;yes
1569 | group_A;no
1570 | group_A;yes
1571 | group_B;yes
1572 | group_B;no
1573 | group_A;no
1574 | group_A;yes
1575 | group_B;yes
1576 | group_A;no
1577 | group_B;yes
1578 | group_A;yes
1579 | group_B;yes
1580 | group_A;no
1581 | group_A;no
1582 | group_A;yes
1583 | group_A;yes
1584 | group_A;no
1585 | group_A;yes
1586 | group_A;no
1587 | group_A;no
1588 | group_A;no
1589 | group_B;no
1590 | group_A;no
1591 | group_A;no
1592 | group_A;no
1593 | group_B;no
1594 | group_A;yes
1595 | group_B;no
1596 | group_B;no
1597 | group_A;yes
1598 | group_A;no
1599 | group_A;no
1600 | group_B;yes
1601 | group_A;no
1602 | group_B;no
1603 | group_B;no
1604 | group_A;no
1605 | group_A;yes
1606 | group_A;yes
1607 | group_B;yes
1608 | group_B;no
1609 | group_A;no
1610 | group_A;yes
1611 | group_A;no
1612 | group_B;no
1613 | group_A;yes
1614 | group_B;yes
1615 | group_A;no
1616 | group_B;yes
1617 | group_B;yes
1618 | group_A;no
1619 | group_B;no
1620 | group_A;yes
1621 | group_A;yes
1622 | group_A;yes
1623 | group_B;no
1624 | group_B;yes
1625 | group_A;no
1626 | group_B;no
1627 | group_B;no
1628 | group_A;yes
1629 | group_A;no
1630 | group_B;yes
1631 | group_A;yes
1632 | group_B;yes
1633 | group_A;no
1634 | group_B;no
1635 | group_A;no
1636 | group_A;yes
1637 | group_B;yes
1638 | group_A;yes
1639 | group_A;yes
1640 | group_B;yes
1641 | group_B;yes
1642 | group_A;no
1643 | group_B;yes
1644 | group_B;no
1645 | group_B;yes
1646 | group_B;no
1647 | group_A;no
1648 | group_B;no
1649 | group_A;no
1650 | group_A;yes
1651 | group_B;no
1652 | group_B;yes
1653 | group_B;no
1654 | group_B;yes
1655 | group_A;no
1656 | group_B;yes
1657 | group_B;yes
1658 | group_B;yes
1659 | group_A;no
1660 | group_B;no
1661 | group_A;yes
1662 | group_B;yes
1663 | group_B;yes
1664 | group_B;yes
1665 | group_A;no
1666 | group_B;no
1667 | group_A;yes
1668 | group_A;yes
1669 | group_B;no
1670 | group_A;no
1671 | group_B;no
1672 | group_B;no
1673 | group_A;no
1674 | group_B;no
1675 | group_B;yes
1676 | group_B;no
1677 | group_B;yes
1678 | group_B;no
1679 | group_B;yes
1680 | group_B;yes
1681 | group_B;no
1682 | group_A;yes
1683 | group_B;no
1684 | group_B;no
1685 | group_B;no
1686 | group_B;yes
1687 | group_B;no
1688 | group_B;yes
1689 | group_B;yes
1690 | group_B;yes
1691 | group_A;no
1692 | group_B;yes
1693 | group_A;yes
1694 | group_B;yes
1695 | group_B;yes
1696 | group_B;no
1697 | group_B;no
1698 | group_B;yes
1699 | group_B;no
1700 | group_A;no
1701 | group_A;no
1702 | group_B;yes
1703 | group_A;no
1704 | group_A;yes
1705 | group_A;yes
1706 | group_B;yes
1707 | group_B;yes
1708 | group_B;no
1709 | group_A;yes
1710 | group_B;no
1711 | group_A;yes
1712 | group_A;yes
1713 | group_A;no
1714 | group_B;no
1715 | group_A;yes
1716 | group_A;yes
1717 | group_A;yes
1718 | group_B;yes
1719 | group_B;no
1720 | group_B;no
1721 | group_A;yes
1722 | group_B;no
1723 | group_B;yes
1724 | group_B;yes
1725 | group_B;yes
1726 | group_B;no
1727 | group_B;yes
1728 | group_A;no
1729 | group_A;no
1730 | group_B;no
1731 | group_B;yes
1732 | group_A;yes
1733 | group_B;no
1734 | group_A;yes
1735 | group_B;yes
1736 | group_A;yes
1737 | group_B;yes
1738 | group_B;no
1739 | group_A;yes
1740 | group_A;yes
1741 | group_A;no
1742 | group_A;yes
1743 | group_A;no
1744 | group_A;no
1745 | group_A;yes
1746 | group_B;no
1747 | group_A;no
1748 | group_B;yes
1749 | group_B;yes
1750 | group_B;yes
1751 | group_A;yes
1752 | group_B;no
1753 | group_A;yes
1754 | group_A;yes
1755 | group_B;yes
1756 | group_A;yes
1757 | group_B;no
1758 | group_A;no
1759 | group_B;no
1760 | group_A;no
1761 | group_B;no
1762 | group_A;no
1763 | group_A;yes
1764 | group_A;no
1765 | group_B;yes
1766 | group_B;yes
1767 | group_A;yes
1768 | group_A;no
1769 | group_B;no
1770 | group_B;no
1771 | group_A;yes
1772 | group_B;yes
1773 | group_A;yes
1774 | group_B;yes
1775 | group_B;no
1776 | group_A;no
1777 | group_A;yes
1778 | group_B;yes
1779 | group_A;yes
1780 | group_A;no
1781 | group_B;no
1782 | group_A;no
1783 | group_B;yes
1784 | group_B;yes
1785 | group_B;no
1786 | group_B;no
1787 | group_A;yes
1788 | group_B;yes
1789 | group_B;no
1790 | group_A;no
1791 | group_B;yes
1792 | group_B;no
1793 | group_A;no
1794 | group_B;no
1795 | group_A;yes
1796 | group_B;yes
1797 | group_A;no
1798 | group_B;yes
1799 | group_B;no
1800 | group_B;no
1801 | group_B;yes
1802 | group_B;yes
1803 | group_A;yes
1804 | group_A;no
1805 | group_B;no
1806 | group_B;no
1807 | group_A;no
1808 | group_A;no
1809 | group_A;no
1810 | group_A;yes
1811 | group_B;yes
1812 | group_B;yes
1813 | group_B;no
1814 | group_B;yes
1815 | group_A;no
1816 | group_B;yes
1817 | group_B;no
1818 | group_B;no
1819 | group_A;yes
1820 | group_B;yes
1821 | group_B;no
1822 | group_A;no
1823 | group_B;no
1824 | group_B;yes
1825 | group_B;yes
1826 | group_B;yes
1827 | group_B;no
1828 | group_B;yes
1829 | group_B;no
1830 | group_A;no
1831 | group_B;yes
1832 | group_B;yes
1833 | group_B;no
1834 | group_B;no
1835 | group_A;no
1836 | group_B;no
1837 | group_A;yes
1838 | group_A;yes
1839 | group_B;no
1840 | group_B;no
1841 | group_A;no
1842 | group_B;no
1843 | group_B;yes
1844 | group_A;yes
1845 | group_A;no
1846 | group_B;yes
1847 | group_B;no
1848 | group_B;yes
1849 | group_B;no
1850 | group_B;yes
1851 | group_B;no
1852 | group_B;no
1853 | group_B;no
1854 | group_B;yes
1855 | group_B;yes
1856 | group_A;no
1857 | group_A;no
1858 | group_A;yes
1859 | group_B;yes
1860 | group_A;no
1861 | group_B;no
1862 | group_A;yes
1863 | group_B;yes
1864 | group_A;yes
1865 | group_A;yes
1866 | group_A;no
1867 | group_B;no
1868 | group_A;yes
1869 | group_B;yes
1870 | group_B;no
1871 | group_A;no
1872 | group_B;no
1873 | group_B;no
1874 | group_B;yes
1875 | group_A;yes
1876 | group_B;yes
1877 | group_B;yes
1878 | group_A;no
1879 | group_A;yes
1880 | group_A;yes
1881 | group_A;no
1882 | group_B;no
1883 | group_A;yes
1884 | group_B;no
1885 | group_B;no
1886 | group_A;no
1887 | group_A;yes
1888 | group_A;yes
1889 | group_B;yes
1890 | group_B;yes
1891 | group_B;no
1892 | group_A;yes
1893 | group_B;no
1894 | group_B;no
1895 | group_A;no
1896 | group_A;no
1897 | group_A;yes
1898 | group_B;no
1899 | group_A;yes
1900 | group_B;no
1901 | group_B;yes
1902 | group_B;no
1903 | group_B;no
1904 | group_B;yes
1905 | group_B;no
1906 | group_B;no
1907 | group_A;no
1908 | group_B;yes
1909 | group_A;yes
1910 | group_B;no
1911 | group_B;yes
1912 | group_B;no
1913 | group_B;no
1914 | group_A;no
1915 | group_B;yes
1916 | group_A;no
1917 | group_A;yes
1918 | group_B;yes
1919 | group_B;no
1920 | group_A;yes
1921 | group_A;no
1922 | group_A;yes
1923 | group_B;no
1924 | group_A;no
1925 | group_B;no
1926 | group_B;yes
1927 | group_B;no
1928 | group_B;no
1929 | group_A;no
1930 | group_B;yes
1931 | group_B;no
1932 | group_A;yes
1933 | group_A;no
1934 | group_A;yes
1935 | group_A;no
1936 | group_B;yes
1937 | group_B;no
1938 | group_A;no
1939 | group_A;yes
1940 | group_B;yes
1941 | group_B;yes
1942 | group_B;yes
1943 | group_B;yes
1944 | group_B;no
1945 | group_A;no
1946 | group_B;no
1947 | group_B;yes
1948 | group_B;no
1949 | group_B;yes
1950 | group_A;no
1951 | group_B;no
1952 | group_A;no
1953 | group_A;no
1954 | group_A;no
1955 | group_A;no
1956 | group_A;no
1957 | group_A;yes
1958 | group_A;yes
1959 | group_B;yes
1960 | group_A;no
1961 | group_B;yes
1962 | group_A;yes
1963 | group_A;yes
1964 | group_B;yes
1965 | group_B;no
1966 | group_A;yes
1967 | group_A;no
1968 | group_A;yes
1969 | group_B;yes
1970 | group_A;yes
1971 | group_A;yes
1972 | group_B;no
1973 | group_A;no
1974 | group_A;yes
1975 | group_A;yes
1976 | group_A;no
1977 | group_B;yes
1978 | group_B;no
1979 | group_B;yes
1980 | group_B;yes
1981 | group_B;yes
1982 | group_A;yes
1983 | group_B;yes
1984 | group_A;no
1985 | group_B;yes
1986 | group_B;yes
1987 | group_A;no
1988 | group_B;yes
1989 | group_A;no
1990 | group_A;no
1991 | group_A;no
1992 | group_A;no
1993 | group_A;yes
1994 | group_A;no
1995 | group_A;yes
1996 | group_A;yes
1997 | group_B;yes
1998 | group_B;yes
1999 | group_A;no
2000 | group_A;no
2001 | group_B;no
2002 |
--------------------------------------------------------------------------------