├── setup.cfg
├── .gitignore
├── doc
├── _build
│ ├── html
│ │ ├── objects.inv
│ │ ├── _static
│ │ │ ├── up.png
│ │ │ ├── down.png
│ │ │ ├── file.png
│ │ │ ├── plus.png
│ │ │ ├── comment.png
│ │ │ ├── minus.png
│ │ │ ├── ajax-loader.gif
│ │ │ ├── down-pressed.png
│ │ │ ├── up-pressed.png
│ │ │ ├── comment-bright.png
│ │ │ ├── comment-close.png
│ │ │ ├── pygments.css
│ │ │ ├── default.css
│ │ │ ├── sidebar.js
│ │ │ ├── doctools.js
│ │ │ ├── underscore.js
│ │ │ └── basic.css
│ │ ├── .buildinfo
│ │ ├── _sources
│ │ │ └── index.txt
│ │ ├── _modules
│ │ │ ├── index.html
│ │ │ └── clintrials
│ │ │ │ └── coll.html
│ │ ├── search.html
│ │ ├── py-modindex.html
│ │ └── searchindex.js
│ └── doctrees
│ │ ├── index.doctree
│ │ └── environment.pickle
├── index.rst
├── Makefile
└── conf.py
├── tests
├── test_threeplusthree.py
├── test_wagestait.py
├── test_watu.py
├── test_recruitment.py
└── test_crm.py
├── requirements.txt
├── clintrials
├── phase2
│ ├── __init__.py
│ ├── bebop
│ │ ├── peps2v2.py
│ │ └── __init__.py
│ └── simple.py
├── __init__.py
├── coll.py
├── common.py
├── stats.py
├── tte.py
├── recruitment.py
└── simulation.py
├── setup.py
├── tutorials
└── matchpoint
│ ├── README.md
│ ├── Ambivalence.ipynb
│ └── DTPs.ipynb
├── README.rst
└── README.md
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | .idea/
3 | dist
4 | *.egg-info/
5 | .ipynb_checkpoints/
--------------------------------------------------------------------------------
/doc/_build/html/objects.inv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/objects.inv
--------------------------------------------------------------------------------
/doc/_build/html/_static/up.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/up.png
--------------------------------------------------------------------------------
/doc/_build/html/_static/down.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/down.png
--------------------------------------------------------------------------------
/doc/_build/html/_static/file.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/file.png
--------------------------------------------------------------------------------
/doc/_build/html/_static/plus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/plus.png
--------------------------------------------------------------------------------
/tests/test_threeplusthree.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 | # TODO
--------------------------------------------------------------------------------
/doc/_build/doctrees/index.doctree:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/doctrees/index.doctree
--------------------------------------------------------------------------------
/doc/_build/html/_static/comment.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/comment.png
--------------------------------------------------------------------------------
/doc/_build/html/_static/minus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/minus.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | ggplot>=0.6.5
2 | matplotlib>=1.4.3
3 | numpy>=1.9.2
4 | pandas>=0.15.2
5 | scipy>=0.15.1
6 | statsmodels>=0.6.1
--------------------------------------------------------------------------------
/doc/_build/doctrees/environment.pickle:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/doctrees/environment.pickle
--------------------------------------------------------------------------------
/doc/_build/html/_static/ajax-loader.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/ajax-loader.gif
--------------------------------------------------------------------------------
/doc/_build/html/_static/down-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/down-pressed.png
--------------------------------------------------------------------------------
/doc/_build/html/_static/up-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/up-pressed.png
--------------------------------------------------------------------------------
/clintrials/phase2/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 |
5 | __all__ = ["simple"]
--------------------------------------------------------------------------------
/doc/_build/html/_static/comment-bright.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/comment-bright.png
--------------------------------------------------------------------------------
/doc/_build/html/_static/comment-close.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/brockk/clintrials/HEAD/doc/_build/html/_static/comment-close.png
--------------------------------------------------------------------------------
/clintrials/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 | __all__ = ["dosefinding", "phase2", "coll", "common", "recruitment", "simulation", "stats", "tte", "util"]
5 |
--------------------------------------------------------------------------------
/doc/_build/html/.buildinfo:
--------------------------------------------------------------------------------
1 | # Sphinx build info version 1
2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3 | config: 8d813b2368a5473396cda4428d1e92ab
4 | tags: fbb0d17656682115ca4d033fb2f83ba1
5 |
--------------------------------------------------------------------------------
/clintrials/coll.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 | """ Functions and classes for manipulating collections. """
5 |
6 |
7 | def to_1d_list_gen(x):
8 | """ Generator function to reduce lists of lists of arbitrary depth (and scalars) to single depth-1 list.
9 |
10 | .. note:: this function is recursive.
11 |
12 | """
13 |
14 | if isinstance(x, list):
15 | for y in x:
16 | for z in to_1d_list_gen(y):
17 | yield z
18 | else:
19 | yield x
20 |
21 |
22 | def to_1d_list(x):
23 | """ Reshape scalars, lists and lists of lists of arbitrary depth as a single flat list, i.e. list of depth 1.
24 |
25 | .. note:: this function basically offloads all its work to a generator function because **we like yield**!
26 |
27 | E.g.
28 |
29 | >>> to_1d_list(0)
30 | [0]
31 | >>> to_1d_list([1])
32 | [1]
33 | >>> to_1d_list([[1,2],3,[4,5]])
34 | [1, 2, 3, 4, 5]
35 | >>> to_1d_list([[1,2],3,[4,5,[6,[7,8,[9]]]]])
36 | [1, 2, 3, 4, 5, 6, 7, 8, 9]
37 |
38 | """
39 | return list(to_1d_list_gen(x))
--------------------------------------------------------------------------------
/clintrials/phase2/bebop/peps2v2.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 |
5 | """ Probability models for the BeBOP model used in PePS2 trial.
6 |
7 | In PePS2, the patient vector is x and D is a list of x instances, where:
8 |
9 | x[0] is efficacy event
10 | x[1] is toxicity event
11 | x[2] is pre-treated group membership dummy
12 | x[3] is low PD-L1 group membership dummy
13 | x[4] is middle PD-L1 group membership dummy
14 |
15 | The parameter vector is theta:
16 | theta[0] is efficacy model intercept
17 | theta[1] is efficacy model pre-treated group coeff
18 | theta[2] is efficacy model low PD-L1 group coeff
19 | theta[3] is efficacy model middle PD-L1 group coeff
20 | theta[4] is toxicity model intercept
21 | theta[5] is association param
22 |
23 | """
24 |
25 | import numpy
26 |
27 |
28 | def pi_e(x, theta):
29 | z = theta[:,0] + theta[:, 1]*x[2] + theta[:, 2]*x[3] + theta[:, 3]*x[4]
30 | return 1/(1+numpy.exp(-z))
31 |
32 |
33 | def pi_t(x, theta):
34 | z = theta[:,4]
35 | return 1 / (1+numpy.exp(-z))
36 |
37 |
38 | def pi_ab(x, theta):
39 | b = x[0] # had efficacy
40 | a = x[1] # had_toxicity
41 | psi = theta[:, 5]
42 | pe = pi_e(x, theta)
43 | pt = pi_t(x, theta)
44 | joint_prob = pe**b * (1-pe)**(1-b) * pt**a * (1-pt)**(1-a)
45 | joint_prob = joint_prob + (-1)**(a+b) * pe * (1-pe) * pt * (1-pt) * (numpy.exp(psi) - 1) / (numpy.exp(psi) + 1)
46 | return joint_prob
47 |
48 |
49 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | setup(
4 | name = 'clintrials',
5 |
6 | packages = find_packages(exclude=['gh-pages', 'doc', 'tutorials']),
7 | # packages = ['clintrials'], # this must be the same as the name above
8 |
9 | version = '0.1.4',
10 |
11 | description = 'clintrials is a library of clinical trial designs and methods in Python',
12 |
13 | author = 'Kristian Brock',
14 |
15 | author_email = 'kristian.brock@gmail.com',
16 |
17 | url = 'https://github.com/brockk/clintrials', # use the URL to the github repo
18 |
19 | download_url = 'https://github.com/brockk/clintrials/tarball/0.1.4', # Should match a git tag
20 |
21 | keywords = ['clinical', 'trial', 'biostatistics', 'medical', 'statistics'], # keywords
22 |
23 | classifiers = [
24 |
25 | # How mature is this project? Common values are
26 | # 3 - Alpha
27 | # 4 - Beta
28 | # 5 - Production/Stable
29 | 'Development Status :: 3 - Alpha',
30 |
31 | # Indicate who your project is intended for
32 | 'Intended Audience :: Science/Research',
33 | 'Topic :: Scientific/Engineering :: Medical Science Apps.',
34 |
35 | # Pick your license as you wish (should match "license" above)
36 | 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
37 |
38 | # Specify the Python versions you support here. In particular, ensure
39 | # that you indicate whether you support Python 2, Python 3 or both.
40 | 'Programming Language :: Python :: 2.7',
41 | 'Programming Language :: Python :: 3.5',
42 | ],
43 | )
--------------------------------------------------------------------------------
/tutorials/matchpoint/README.md:
--------------------------------------------------------------------------------
1 | # EffTox in Matchpoint tutorials #
2 |
3 | ## README ##
4 |
5 |
6 | The three tutorials
7 |
8 | DTPs.pynb
9 |
10 | Utility.ipynb
11 |
12 | Ambivalence.ipynb
13 |
14 | are provided to complement the publication _Implementing the EffTox Dose-Finding Design in the Matchpoint Trial_ (Brock et al.,in submission).
15 | Please consult the paper for the clinical background, the methodology details, and full explanation of the terminology.
16 |
17 | These notebooks can be viewed online at https://github.com/brockk/clintrials/tree/master/tutorials/matchpoint
18 | but to run them, you will need Python and Jupyter.
19 | We recommended you install Anaconda because it greatly simplifies the process of installing Python and the common add-ons like jupyter, numpy, scipy, pandas, etc.
20 | Install it from https://www.continuum.io/downloads.
21 |
22 | The notebooks with plots use ggplot. To get ggplot, run:
23 |
24 | `pip install ggplot`
25 |
26 | at the command line.
27 |
28 | Clone this repository by navigating to a directory where the code will live and running
29 |
30 | `git clone https://github.com/brockk/clintrials.git`
31 |
32 | `cd clintrials`
33 |
34 | You need to put clintrials on your path.
35 | An easy way to do this is to edit the PYTHONPATH environment variable.
36 | To do this in Mac or Linux, run
37 |
38 | `export PYTHONPATH=$PYTHONPATH:$(pwd)`
39 |
40 | Or, in Windows run
41 |
42 | `set PYTHONPATH=%PYTHONPATH%;%CD%`
43 |
44 | Then, load a jupyter notebook session for the tutorials using:
45 |
46 | `jupyter notebook --notebook-dir=tutorials/matchpoint`
47 |
48 | A browser window should appear and you should see the tutorials.
49 | "Test ggplot.ipynb" is a notebook to test whether ggplot is correctly installed.
50 |
51 | ## Plan B
52 | If adding clintrials to your path by editing environment variables is not an option for you (e.g. lack of admin rights), an alternative is to copy the notebooks you want to use to the root directory that contains the folders named `docs` and `tests` and the `README.md` file.
53 | Then navigate to that directory in console and run
54 |
55 | `jupyter notebook`
56 |
57 | clintrials should automatically be on your path because it resides in the executing directory.
58 |
59 |
60 |
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/doc/index.rst:
--------------------------------------------------------------------------------
1 | .. clintrials documentation master file, created by
2 | sphinx-quickstart on Sat Dec 13 19:54:47 2014.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to clintrials's documentation!
7 | ======================================
8 |
9 | Contents: BLAH
10 |
11 | .. toctree::
12 | :maxdepth: 2
13 |
14 |
15 |
16 | Common, helpful stuff
17 | ======================
18 |
19 | General functions
20 | __________________
21 |
22 | .. automodule:: clintrials.common
23 | :members: inverse_logit
24 |
25 | dfcrm-style link functions and their inverses
26 | ______________________________________________
27 |
28 | See http://cran.r-project.org/web/packages/dfcrm/dfcrm.pdf
29 |
30 | .. automodule:: clintrials.common
31 | :members: empiric, inverse_empiric, logistic, inverse_logistic, hyperbolic_tan, inverse_hyperbolic_tan
32 |
33 |
34 |
35 | Coll
36 | ____
37 |
38 | .. automodule:: clintrials.coll
39 | :members:
40 |
41 |
42 | Recruitment
43 | ____________
44 | .. autoclass:: clintrials.recruitment.RecruitmentStream
45 | :members:
46 | :special-members:
47 | .. autoclass:: clintrials.recruitment.ConstantRecruitmentStream
48 | :members:
49 | :special-members:
50 | .. autoclass:: clintrials.recruitment.QuadrilateralRecruitmentStream
51 | :members:
52 | :special-members:
53 |
54 |
55 | Util
56 | _____
57 | .. automodule:: clintrials.util
58 | :members:
59 |
60 |
61 |
62 | Phase I Trial Designs
63 | ======================
64 | Dose-finding based on toxicity
65 | _______________________________
66 |
67 | These designs are used to find the maximum tolerable dose (MTD) for cytotoxic agents.
68 |
69 |
70 | Dose-finding based on efficacy and toxicity
71 | ____________________________________________
72 |
73 | These designs are used to find the optimum biological dose (OBD) for cytotoxic and cytostatic agents.
74 |
75 |
76 |
77 | Phase II Trial Designs
78 | =======================
79 |
80 | .. automodule:: clintrials.phase2
81 | :members:
82 |
83 | Time-to-Event Designs
84 | ______________________
85 |
86 | Time-to-event outcomes are not typical in phase II clinical trials, but they do exist.
87 |
88 | .. automodule:: clintrials.tte
89 | :members:
90 |
91 |
92 |
93 | Indices and tables
94 | ==================
95 |
96 | * :ref:`genindex`
97 | * :ref:`modindex`
98 | * :ref:`search`
99 |
100 |
--------------------------------------------------------------------------------
/doc/_build/html/_sources/index.txt:
--------------------------------------------------------------------------------
1 | .. clintrials documentation master file, created by
2 | sphinx-quickstart on Sat Dec 13 19:54:47 2014.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to clintrials's documentation!
7 | ======================================
8 |
9 | Contents: BLAH
10 |
11 | .. toctree::
12 | :maxdepth: 2
13 |
14 |
15 |
16 | Common, helpful stuff
17 | ======================
18 |
19 | General functions
20 | __________________
21 |
22 | .. automodule:: clintrials.common
23 | :members: inverse_logit
24 |
25 | dfcrm-style link functions and their inverses
26 | ______________________________________________
27 |
28 | See http://cran.r-project.org/web/packages/dfcrm/dfcrm.pdf
29 |
30 | .. automodule:: clintrials.common
31 | :members: empiric, inverse_empiric, logistic, inverse_logistic, hyperbolic_tan, inverse_hyperbolic_tan
32 |
33 |
34 |
35 | Coll
36 | ____
37 |
38 | .. automodule:: clintrials.coll
39 | :members:
40 |
41 |
42 | Recruitment
43 | ____________
44 | .. autoclass:: clintrials.recruitment.RecruitmentStream
45 | :members:
46 | :special-members:
47 | .. autoclass:: clintrials.recruitment.ConstantRecruitmentStream
48 | :members:
49 | :special-members:
50 | .. autoclass:: clintrials.recruitment.QuadrilateralRecruitmentStream
51 | :members:
52 | :special-members:
53 |
54 |
55 | Util
56 | _____
57 | .. automodule:: clintrials.util
58 | :members:
59 |
60 |
61 |
62 | Phase I Trial Designs
63 | ======================
64 | Dose-finding based on toxicity
65 | _______________________________
66 |
67 | These designs are used to find the maximum tolerable dose (MTD) for cytotoxic agents.
68 |
69 |
70 | Dose-finding based on efficacy and toxicity
71 | ____________________________________________
72 |
73 | These designs are used to find the optimum biological dose (OBD) for cytotoxic and cytostatic agents.
74 |
75 |
76 |
77 | Phase II Trial Designs
78 | =======================
79 |
80 | .. automodule:: clintrials.phase2
81 | :members:
82 |
83 | Time-to-Event Designs
84 | ______________________
85 |
86 | Time-to-event outcomes are not typical in phase II clinical trials, but they do exist.
87 |
88 | .. automodule:: clintrials.tte
89 | :members:
90 |
91 |
92 |
93 | Indices and tables
94 | ==================
95 |
96 | * :ref:`genindex`
97 | * :ref:`modindex`
98 | * :ref:`search`
99 |
100 |
--------------------------------------------------------------------------------
/clintrials/common.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 |
5 | """ Common, useful functions in the statistics and mathematics of clinical trials. """
6 |
7 | import numpy as np
8 |
9 |
10 | def inverse_logit(x):
11 | """ Get the inverse logit function value:
12 |
13 | :math:`\\frac{e^x}{e^x+1}`,
14 | or equivalently, :math:`\\frac{1}{1 + e^{-x}}`
15 |
16 | :param x: x-variable
17 | :type x: float
18 | :return: Inverse logit function value.
19 | :rtype: float
20 |
21 | >>> inverse_logit(0)
22 | 0.5
23 |
24 | """
25 |
26 | return 1/(1+np.exp(-x))
27 |
28 |
29 | # Two-parameter link functions used in CRM-style designs
30 | # They are written in pairs and all use the same call signature.
31 | # They take their lead from the same in the dfcrm R-package.
32 | def empiric(x, a0=None, beta=0):
33 | """ Get the empiric function value:
34 |
35 | :math:`x^{e^\\beta}`
36 |
37 | :param x: x-variable
38 | :type x: float
39 | :param a0: intercept parameter. This param is ignored here but exists to match similar call signatures.
40 | :type a0: float
41 | :param beta: slope parameter
42 | :type beta: float
43 | :return: Empiric function value
44 | :rtype: float
45 |
46 | >>> import math
47 | >>> empiric(0.5, beta=math.log(2))
48 | 0.25
49 |
50 | """
51 |
52 | return x ** np.exp(beta)
53 |
54 |
55 | def inverse_empiric(x, a0=0, beta=0):
56 | """ Get the inverse empiric function value:
57 |
58 | :math:`x^{e^{-\\beta}}`
59 |
60 | .. note:: this function is the inverse of :func:`clintrials.common.empiric`.
61 |
62 | :param x: x-variable
63 | :type x: float
64 | :param a0: intercept parameter. This param is ignored here but exists to match similar call signatures.
65 | :type a0: float
66 | :param beta: slope parameter
67 | :type beta: float
68 | :return: Inverse empiric function value
69 | :rtype: float
70 |
71 | >>> import math
72 | >>> inverse_empiric(0.25, beta=math.log(2))
73 | 0.5
74 |
75 | """
76 |
77 | return x ** np.exp(-beta)
78 |
79 |
80 | def logistic(x, a0=0, beta=0):
81 | """ Get the logistic function value:
82 |
83 | :math:`\\frac{1}{1 + e^{-a_0 - e^\\beta x}}`
84 |
85 | :param x: x-variable
86 | :type x: float
87 | :param a0: intercept parameter.
88 | :type a0: float
89 | :param beta: slope parameter
90 | :type beta: float
91 | :return: Logistic function value
92 | :rtype: float
93 |
94 | >>> logistic(0.25, -1, 1)
95 | 0.42057106852688747
96 |
97 | """
98 |
99 | return 1 / (1 + np.exp(-a0 - np.exp(beta)*x))
100 |
101 |
102 | def inverse_logistic(x, a0=0, beta=0):
103 | """ Get the inverse logistic function value:
104 |
105 | :math:`\\frac{\\log(\\frac{x}{1-x}) - a_0}{e^\\beta}`
106 |
107 | .. note:: this function is the inverse of :func:`clintrials.common.logistic`.
108 |
109 | :param x: x-variable
110 | :type x: float
111 | :param a0: intercept parameter.
112 | :type a0: float
113 | :param beta: slope parameter
114 | :type beta: float
115 | :return: Inverse logistic function value
116 | :rtype: float
117 |
118 | >>> round(inverse_logistic(0.42057106852688747, -1, 1), 2)
119 | 0.25
120 |
121 | """
122 |
123 | return (np.log(x/(1-x)) - a0) / np.exp(beta)
124 |
125 |
126 | def hyperbolic_tan(x, a0=0, beta=0):
127 | return ((np.tanh(x) + 1) / 2) ** np.exp(beta)
128 |
129 |
130 | def inverse_hyperbolic_tan(x, a0=0, beta=0):
131 | return np.arctanh(2*x**np.exp(-beta) - 1)
132 |
--------------------------------------------------------------------------------
/doc/_build/html/_modules/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Overview: module code — clintrials 0.1.1 documentation
12 |
13 |
14 |
15 |
16 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
All modules for which code is available
51 |
58 |
59 |
60 |
61 |
62 |
79 |
80 |
81 |
93 |
97 |
98 |
--------------------------------------------------------------------------------
/doc/_build/html/search.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Search — clintrials 0.1.1 documentation
12 |
13 |
14 |
15 |
16 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
34 |
35 |
36 |
37 |
38 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
Search
57 |
58 |
59 |
60 | Please activate JavaScript to enable the search
61 | functionality.
62 |
63 |
64 |
65 | From here you can search these documents. Enter your search
66 | words into the box below and click "search". Note that the search
67 | function will automatically search for all of the words. Pages
68 | containing fewer words won't appear in the result list.
69 |
70 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
87 |
88 |
89 |
101 |
105 |
106 |
--------------------------------------------------------------------------------
/doc/_build/html/_static/pygments.css:
--------------------------------------------------------------------------------
1 | .highlight .hll { background-color: #ffffcc }
2 | .highlight { background: #eeffcc; }
3 | .highlight .c { color: #408090; font-style: italic } /* Comment */
4 | .highlight .err { border: 1px solid #FF0000 } /* Error */
5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */
6 | .highlight .o { color: #666666 } /* Operator */
7 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */
8 | .highlight .cp { color: #007020 } /* Comment.Preproc */
9 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */
10 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
11 | .highlight .gd { color: #A00000 } /* Generic.Deleted */
12 | .highlight .ge { font-style: italic } /* Generic.Emph */
13 | .highlight .gr { color: #FF0000 } /* Generic.Error */
14 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
15 | .highlight .gi { color: #00A000 } /* Generic.Inserted */
16 | .highlight .go { color: #333333 } /* Generic.Output */
17 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
18 | .highlight .gs { font-weight: bold } /* Generic.Strong */
19 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
20 | .highlight .gt { color: #0044DD } /* Generic.Traceback */
21 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */
22 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
23 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
24 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */
25 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
26 | .highlight .kt { color: #902000 } /* Keyword.Type */
27 | .highlight .m { color: #208050 } /* Literal.Number */
28 | .highlight .s { color: #4070a0 } /* Literal.String */
29 | .highlight .na { color: #4070a0 } /* Name.Attribute */
30 | .highlight .nb { color: #007020 } /* Name.Builtin */
31 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */
32 | .highlight .no { color: #60add5 } /* Name.Constant */
33 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */
34 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */
35 | .highlight .ne { color: #007020 } /* Name.Exception */
36 | .highlight .nf { color: #06287e } /* Name.Function */
37 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */
38 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
39 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */
40 | .highlight .nv { color: #bb60d5 } /* Name.Variable */
41 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */
42 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */
43 | .highlight .mf { color: #208050 } /* Literal.Number.Float */
44 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */
45 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */
46 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */
47 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */
48 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */
49 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */
50 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */
51 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */
52 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */
53 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */
54 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */
55 | .highlight .sr { color: #235388 } /* Literal.String.Regex */
56 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */
57 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */
58 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */
59 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */
60 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */
61 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */
62 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | clintrials
2 | ==========
3 |
4 | README
5 | ------
6 |
7 | clintrials is a library of clinical trial designs and methods in Python.
8 | This library is intended to facilitate research.
9 | It is provided "as-is" and the author accepts absolutely no responsibility whatsoever for the correctness or integrity of the calculations.
10 |
11 |
12 |
13 | What does clintrials do?
14 | ----
15 |
16 | * This library implements some designs used in clinical trials.
17 | * It has implementations of O'Quigley's CRM design, Thall & Cook's EffTox design, and Wages & Tait's efficacy+toxicity design.
18 | * There is also an implementation of my very own BEBOP trial design for the simultaneous study of bivariate binary outcomes (like efficacy and toxicity) in the presence of predictive variables, both continuous and binary.
19 | * There is a bias towards phase I and II trial designs because that is my research area.
20 | * I expect to add more designs in the future.
21 | * It is written in pure Python, intentionally. This library would be quicker if it was written in C++ or Java but it would not be as portable or readable.
22 | * Some of the code is fairly mature but the repo itself is young and in flux.
23 | * I use 64 bit Python 3.5 but endeavour to maintain 2.7 compatibility.
24 |
25 |
26 | Why Python?
27 | ----
28 | No biostatisticians use Python, they use R / Stata / SAS, so why is this in Python?
29 | Well, Python is used in lots of other sciences because it is rich and pleasant to work with.
30 | Python is object-orientated, which is important when you are writing a bunch of classes that do a similar job in fundamentally different ways, like clinical trial designs, say.
31 | It is nice to program in Python.
32 | I think it is sadly underused in clinical trials.
33 | Python also offers lots of extras and the parallel capabilities of IPython are having a positive impact on my work.
34 |
35 | If you have never used Python, I recommend you install Anaconda, a distribution of Python aimed at academics and researchers that includes the tools we need, switch to the tutorial directory of clintrials and then fire up jupyter notebook.
36 |
37 | Dependencies
38 | ----
39 |
40 | * numpy, scipy, pandas & statsmodels - all of these are installed by Anaconda so I highly recommend that
41 | * Some features also require matplotlib and ggplot. matplotlib also comes with Anaconda but ggplot will require a separate install.
42 | If you need ggplot, be nice to yourself and use pip:
43 | `pip install ggplot`
44 |
45 |
46 | How do I get set up?
47 | ----
48 |
49 | There are two ways.
50 | The first method uses pip and the Python package index.
51 | The extras like the tutorials are not provided.
52 | The second clones this repo using git.
53 | Tutorials are provided.
54 |
55 |
56 | Using pip to get just the clintrials code
57 | ----
58 | To get the latest milestone release, use pip.
59 | Open up a terminal or DOS session and fire off a:
60 |
61 | `pip install clintrials`
62 |
63 | The disadvantage of this method is that you don't get the nice tutorial workbooks that illustrate the methods. If you want those, use...
64 |
65 |
66 | Using git to clone this repo, including tutorial notebooks
67 | ----
68 |
69 | Navigate in terminal or DOS to a directory where you want the code and run
70 |
71 | `git clone https://github.com/brockk/clintrials.git`
72 |
73 | `cd clintrials`
74 |
75 | You need to put clintrials on your path.
76 | An easy way to do this is to edit the PYTHONPATH environment variable.
77 | To do this in Mac or Linux, run
78 |
79 | `export PYTHONPATH=$PYTHONPATH:$(pwd)`
80 |
81 | Or, in Windows run
82 |
83 | `set PYTHONPATH=%PYTHONPATH%;%CD%`
84 |
85 | Then, load a jupyter notebook session for the tutorials using:
86 |
87 | `jupyter notebook --notebook-dir=tutorials`
88 |
89 | A browser window should appear and you should see the tutorials.
90 | Tutorials related to the _Implementing the EffTox Dose-Finding Design in the Matchpoint Trial_ publication
91 | are in the `matchpoint` directory.
92 |
93 | Documentation
94 | ----
95 |
96 | Documentation will eventually appear at
97 |
98 |
99 |
100 | Contact
101 | ----
102 |
103 | The repo owner is Kristian Brock, @brockk.
104 | Feel free to get in contact through GitHub.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # clintrials #
2 |
3 | ## README ##
4 |
5 | clintrials is a library of clinical trial designs and methods in Python.
6 | This library is intended to facilitate research.
7 | It is provided "as-is" and the author accepts absolutely no responsibility whatsoever for the correctness or integrity of the calculations.
8 |
9 |
10 | ### What does clintrials do? ###
11 |
12 | * This library implements some designs used in clinical trials.
13 | * It has implementations of O'Quigley's CRM design, Thall & Cook's EffTox design, and Wages & Tait's efficacy+toxicity design.
14 | * There is also an implementation of my very own BEBOP trial design for the simultaneous study of bivariate binary outcomes (like efficacy and toxicity) in the presence of predictive variables, both continuous and binary.
15 | * There is a bias towards phase I and II trial designs because that is my research area.
16 | * I expect to add more designs in the future.
17 | * It is written in pure Python, intentionally. This library would be quicker if it was written in C++ or Java but it would not be as portable or readable.
18 | * Some of the code is fairly mature but the repo itself is young and in flux.
19 | * I use 64 bit Python 3.5 but endeavour to maintain 2.7 compatibility.
20 |
21 | Why Python?
22 | ----
23 | No biostatisticians use Python, they use R / Stata / SAS, so why is this in Python?
24 | Well, Python is used in lots of other sciences because it is rich and pleasant to work with.
25 | Python is object-orientated, which is important when you are writing a bunch of classes that do a similar job in fundamentally different ways, like clinical trial designs, say.
26 | It is nice to program in Python.
27 | I think it is sadly underused in clinical trials.
28 | Python also offers lots of extras and the parallel capabilities of IPython are having a positive impact on my work.
29 |
30 | If you have never used Python, I recommend you install Anaconda, a distribution of Python aimed at academics and researchers that includes the tools we need, switch to the tutorial directory of clintrials and then fire up jupyter notebook.
31 |
32 | ### Dependencies ###
33 |
34 | * numpy, scipy, pandas & statsmodels - all of these are installed by Anaconda so I highly recommend that.
35 | Install Anaconda from https://www.continuum.io/downloads
36 | * Some features also require matplotlib and ggplot. matplotlib also comes with Anaconda but ggplot will require a separate install.
37 | If you need ggplot, be nice to yourself and use pip:
38 | `pip install ggplot`
39 |
40 |
41 | ### How do I get set up? ###
42 |
43 | There are two ways.
44 | The first method uses pip and the Python package index.
45 | The extras like the tutorials are not provided.
46 | The second clones this repo using git.
47 | Tutorials are provided in the tutorials directory.
48 | The one complication is getting the clinitrials package on your path.
49 |
50 | #### Using pip to get just the clintrials code
51 | To get the latest milestone release, use pip.
52 | Open up a terminal or DOS session and fire off a:
53 |
54 | `pip install clintrials`
55 |
56 | The advantage of this method is that clintrials is added to your path.
57 | The disadvantage is that you don't get the nice tutorial workbooks that illustrate the methods.
58 | If you want those, use...
59 |
60 | #### Using git to clone this repo, including tutorial notebooks
61 |
62 | Navigate in terminal or DOS to a directory where you want the code and run
63 |
64 | `git clone https://github.com/brockk/clintrials.git`
65 |
66 | `cd clintrials`
67 |
68 | You need to put clintrials on your path.
69 | An easy way to do this is to edit the PYTHONPATH environment variable.
70 | To do this in Mac or Linux, run
71 |
72 | `export PYTHONPATH=$PYTHONPATH:$(pwd)`
73 |
74 | Or, in Windows run
75 |
76 | `set PYTHONPATH=%PYTHONPATH%;%CD%`
77 |
78 | Then, load a jupyter notebook session for the tutorials using:
79 |
80 | `jupyter notebook --notebook-dir=tutorials`
81 |
82 | A browser window should appear and you should see the tutorials.
83 | Tutorials related to the _Implementing the EffTox Dose-Finding Design in the Matchpoint Trial_ publication
84 | are in the `matchpoint` directory.
85 |
86 |
87 | ### Documentation
88 |
89 | Documentation will eventually appear at
90 |
91 |
92 |
93 | ### Contact ###
94 | The repo owner is Kristian Brock, @brockk.
95 | Feel free to get in contact through GitHub.
--------------------------------------------------------------------------------
/tests/test_wagestait.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 | """ Tests of the clintrials.dosefindings.wagestait module. """
5 |
6 | from nose.tools import with_setup
7 | import numpy as np
8 | from scipy.stats import norm
9 |
10 | from clintrials.common import empiric, logistic, inverse_empiric, inverse_logistic
11 | from clintrials.dosefinding.wagestait import WagesTait
12 |
13 |
14 | def setup_func():
15 | pass
16 |
17 |
18 | def teardown_func():
19 | pass
20 |
21 |
22 | @with_setup(setup_func, teardown_func)
23 | def test_wages_tait_1():
24 |
25 | tox_prior = [0.01, 0.08, 0.15, 0.22, 0.29, 0.36]
26 | tox_cutoff = 0.33
27 | eff_cutoff = 0.05
28 | tox_target = 0.30
29 |
30 | skeletons = [
31 | [0.60, 0.50, 0.40, 0.30, 0.20, 0.10],
32 | [0.50, 0.60, 0.50, 0.40, 0.30, 0.20],
33 | [0.40, 0.50, 0.60, 0.50, 0.40, 0.30],
34 | [0.30, 0.40, 0.50, 0.60, 0.50, 0.40],
35 | [0.20, 0.30, 0.40, 0.50, 0.60, 0.50],
36 | [0.10, 0.20, 0.30, 0.40, 0.50, 0.60],
37 | [0.20, 0.30, 0.40, 0.50, 0.60, 0.60],
38 | [0.30, 0.40, 0.50, 0.60, 0.60, 0.60],
39 | [0.40, 0.50, 0.60, 0.60, 0.60, 0.60],
40 | [0.50, 0.60, 0.60, 0.60, 0.60, 0.60],
41 | [0.60, 0.60, 0.60, 0.60, 0.60, 0.60],
42 | ]
43 |
44 | first_dose = 1
45 | trial_size = 64
46 | ar_size = 16
47 |
48 | trial = WagesTait(skeletons, tox_prior, tox_target, tox_cutoff, eff_cutoff, first_dose, trial_size, ar_size)
49 |
50 | cases = [
51 | (1,1,0), (1,0,0), (1,0,0),
52 | (2,0,0), (2,0,0), (2,0,1),
53 | (3,1,1), (3,0,1),
54 | ]
55 |
56 | trial.update(cases)
57 | # No idea what this will be because it is randomised
58 |
59 | assert np.all(np.abs(trial.post_tox_probs - np.array([0.1376486, 0.3126617, 0.4095831, 0.4856057, 0.5506505,
60 | 0.6086650])) < 0.001) # The first one varies a bit more
61 | assert np.all(np.abs(trial.post_eff_probs - np.array([0.2479070, 0.3639813, 0.4615474, 0.5497718, 0.6321674,
62 | 0.7105235])) < 0.00001)
63 | assert np.all(np.abs(trial.w - np.array([0.01347890, 0.03951504, 0.12006585, 0.11798287, 0.11764227, 0.12346595,
64 | 0.11764227, 0.11798287, 0.12006585, 0.07073296, 0.04142517])) < 0.00001)
65 | assert trial.most_likely_model_index == 5
66 | assert trial.admissable_set() == [1, 2]
67 | assert np.abs(trial.dose_toxicity_lower_bound(1) - 0.008403759) < 0.00001
68 |
69 | # The exact values above were taken from Nolan's implementation in R.
70 |
71 |
72 |
73 | def test_wages_tait_2():
74 |
75 | tox_prior = [0.01, 0.08, 0.15, 0.22, 0.29, 0.36]
76 | tox_cutoff = 0.33
77 | eff_cutoff = 0.05
78 | tox_target = 0.30
79 |
80 | skeletons = [
81 | [0.60, 0.50, 0.40, 0.30, 0.20, 0.10],
82 | [0.50, 0.60, 0.50, 0.40, 0.30, 0.20],
83 | [0.40, 0.50, 0.60, 0.50, 0.40, 0.30],
84 | [0.30, 0.40, 0.50, 0.60, 0.50, 0.40],
85 | [0.20, 0.30, 0.40, 0.50, 0.60, 0.50],
86 | [0.10, 0.20, 0.30, 0.40, 0.50, 0.60],
87 | [0.20, 0.30, 0.40, 0.50, 0.60, 0.60],
88 | [0.30, 0.40, 0.50, 0.60, 0.60, 0.60],
89 | [0.40, 0.50, 0.60, 0.60, 0.60, 0.60],
90 | [0.50, 0.60, 0.60, 0.60, 0.60, 0.60],
91 | [0.60, 0.60, 0.60, 0.60, 0.60, 0.60],
92 | ]
93 |
94 | first_dose = 1
95 | trial_size = 64
96 | ar_size = 16
97 |
98 | trial = WagesTait(skeletons, tox_prior, tox_target, tox_cutoff, eff_cutoff, first_dose, trial_size, ar_size)
99 |
100 | cases = [
101 | (1,1,0), (1,0,0), (1,0,0),
102 | (2,0,0), (2,0,0), (2,0,1),
103 | (3,1,1), (3,0,1), (3,1,1),
104 | (2,0,0), (2,0,0), (2,1,1),
105 | (3,0,1), (3,0,0), (3,1,1),
106 | (4,1,1), (4,0,1), (4,0,1),
107 | ]
108 |
109 | next_dose = trial.update(cases)
110 | assert next_dose == 2
111 | assert np.all(trial.post_tox_probs - np.array([0.1292270, 0.3118713, 0.4124382, 0.4906020, 0.5569092, 0.6155877])
112 | < 0.00001)
113 | assert np.all(trial.post_eff_probs - np.array([0.3999842, 0.4935573, 0.5830683, 0.6697644, 0.5830683, 0.4935573])
114 | < 0.00001)
115 | assert np.all(trial.w - np.array([0.001653197, 0.006509789, 0.069328268, 0.156959090, 0.141296982, 0.144650706,
116 | 0.141296982, 0.156959090, 0.117673776, 0.041764220, 0.021907900]) < 0.00001)
117 | assert trial.most_likely_model_index == 3
118 | assert trial.admissable_set() == [1, 2]
119 | assert trial.dose_toxicity_lower_bound(1) - 0.008403759 < 0.00001
120 | assert trial.dose_efficacy_upper_bound(next_dose) - 0.7772219 < 0.00001
121 |
122 | # The exact values above were taken from Nolan's implementation in R.
123 |
--------------------------------------------------------------------------------
/doc/_build/html/py-modindex.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | Python Module Index — clintrials 0.1.1 documentation
12 |
13 |
14 |
15 |
16 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
Python Module Index
55 |
56 |
59 |
60 |
96 |
97 |
98 |
99 |
100 |
101 |
118 |
119 |
120 |
132 |
136 |
137 |
--------------------------------------------------------------------------------
/doc/_build/html/_static/default.css:
--------------------------------------------------------------------------------
1 | /*
2 | * default.css_t
3 | * ~~~~~~~~~~~~~
4 | *
5 | * Sphinx stylesheet -- default theme.
6 | *
7 | * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | @import url("basic.css");
13 |
14 | /* -- page layout ----------------------------------------------------------- */
15 |
16 | body {
17 | font-family: sans-serif;
18 | font-size: 100%;
19 | background-color: #11303d;
20 | color: #000;
21 | margin: 0;
22 | padding: 0;
23 | }
24 |
25 | div.document {
26 | background-color: #1c4e63;
27 | }
28 |
29 | div.documentwrapper {
30 | float: left;
31 | width: 100%;
32 | }
33 |
34 | div.bodywrapper {
35 | margin: 0 0 0 230px;
36 | }
37 |
38 | div.body {
39 | background-color: #ffffff;
40 | color: #000000;
41 | padding: 0 20px 30px 20px;
42 | }
43 |
44 | div.footer {
45 | color: #ffffff;
46 | width: 100%;
47 | padding: 9px 0 9px 0;
48 | text-align: center;
49 | font-size: 75%;
50 | }
51 |
52 | div.footer a {
53 | color: #ffffff;
54 | text-decoration: underline;
55 | }
56 |
57 | div.related {
58 | background-color: #133f52;
59 | line-height: 30px;
60 | color: #ffffff;
61 | }
62 |
63 | div.related a {
64 | color: #ffffff;
65 | }
66 |
67 | div.sphinxsidebar {
68 | }
69 |
70 | div.sphinxsidebar h3 {
71 | font-family: 'Trebuchet MS', sans-serif;
72 | color: #ffffff;
73 | font-size: 1.4em;
74 | font-weight: normal;
75 | margin: 0;
76 | padding: 0;
77 | }
78 |
79 | div.sphinxsidebar h3 a {
80 | color: #ffffff;
81 | }
82 |
83 | div.sphinxsidebar h4 {
84 | font-family: 'Trebuchet MS', sans-serif;
85 | color: #ffffff;
86 | font-size: 1.3em;
87 | font-weight: normal;
88 | margin: 5px 0 0 0;
89 | padding: 0;
90 | }
91 |
92 | div.sphinxsidebar p {
93 | color: #ffffff;
94 | }
95 |
96 | div.sphinxsidebar p.topless {
97 | margin: 5px 10px 10px 10px;
98 | }
99 |
100 | div.sphinxsidebar ul {
101 | margin: 10px;
102 | padding: 0;
103 | color: #ffffff;
104 | }
105 |
106 | div.sphinxsidebar a {
107 | color: #98dbcc;
108 | }
109 |
110 | div.sphinxsidebar input {
111 | border: 1px solid #98dbcc;
112 | font-family: sans-serif;
113 | font-size: 1em;
114 | }
115 |
116 |
117 |
118 | /* -- hyperlink styles ------------------------------------------------------ */
119 |
120 | a {
121 | color: #355f7c;
122 | text-decoration: none;
123 | }
124 |
125 | a:visited {
126 | color: #355f7c;
127 | text-decoration: none;
128 | }
129 |
130 | a:hover {
131 | text-decoration: underline;
132 | }
133 |
134 |
135 |
136 | /* -- body styles ----------------------------------------------------------- */
137 |
138 | div.body h1,
139 | div.body h2,
140 | div.body h3,
141 | div.body h4,
142 | div.body h5,
143 | div.body h6 {
144 | font-family: 'Trebuchet MS', sans-serif;
145 | background-color: #f2f2f2;
146 | font-weight: normal;
147 | color: #20435c;
148 | border-bottom: 1px solid #ccc;
149 | margin: 20px -20px 10px -20px;
150 | padding: 3px 0 3px 10px;
151 | }
152 |
153 | div.body h1 { margin-top: 0; font-size: 200%; }
154 | div.body h2 { font-size: 160%; }
155 | div.body h3 { font-size: 140%; }
156 | div.body h4 { font-size: 120%; }
157 | div.body h5 { font-size: 110%; }
158 | div.body h6 { font-size: 100%; }
159 |
160 | a.headerlink {
161 | color: #c60f0f;
162 | font-size: 0.8em;
163 | padding: 0 4px 0 4px;
164 | text-decoration: none;
165 | }
166 |
167 | a.headerlink:hover {
168 | background-color: #c60f0f;
169 | color: white;
170 | }
171 |
172 | div.body p, div.body dd, div.body li {
173 | text-align: justify;
174 | line-height: 130%;
175 | }
176 |
177 | div.admonition p.admonition-title + p {
178 | display: inline;
179 | }
180 |
181 | div.admonition p {
182 | margin-bottom: 5px;
183 | }
184 |
185 | div.admonition pre {
186 | margin-bottom: 5px;
187 | }
188 |
189 | div.admonition ul, div.admonition ol {
190 | margin-bottom: 5px;
191 | }
192 |
193 | div.note {
194 | background-color: #eee;
195 | border: 1px solid #ccc;
196 | }
197 |
198 | div.seealso {
199 | background-color: #ffc;
200 | border: 1px solid #ff6;
201 | }
202 |
203 | div.topic {
204 | background-color: #eee;
205 | }
206 |
207 | div.warning {
208 | background-color: #ffe4e4;
209 | border: 1px solid #f66;
210 | }
211 |
212 | p.admonition-title {
213 | display: inline;
214 | }
215 |
216 | p.admonition-title:after {
217 | content: ":";
218 | }
219 |
220 | pre {
221 | padding: 5px;
222 | background-color: #eeffcc;
223 | color: #333333;
224 | line-height: 120%;
225 | border: 1px solid #ac9;
226 | border-left: none;
227 | border-right: none;
228 | }
229 |
230 | tt {
231 | background-color: #ecf0f3;
232 | padding: 0 1px 0 1px;
233 | font-size: 0.95em;
234 | }
235 |
236 | th {
237 | background-color: #ede;
238 | }
239 |
240 | .warning tt {
241 | background: #efc2c2;
242 | }
243 |
244 | .note tt {
245 | background: #d6d6d6;
246 | }
247 |
248 | .viewcode-back {
249 | font-family: sans-serif;
250 | }
251 |
252 | div.viewcode-block:target {
253 | background-color: #f4debf;
254 | border-top: 1px solid #ac9;
255 | border-bottom: 1px solid #ac9;
256 | }
--------------------------------------------------------------------------------
/doc/_build/html/_static/sidebar.js:
--------------------------------------------------------------------------------
1 | /*
2 | * sidebar.js
3 | * ~~~~~~~~~~
4 | *
5 | * This script makes the Sphinx sidebar collapsible.
6 | *
7 | * .sphinxsidebar contains .sphinxsidebarwrapper. This script adds
8 | * in .sphixsidebar, after .sphinxsidebarwrapper, the #sidebarbutton
9 | * used to collapse and expand the sidebar.
10 | *
11 | * When the sidebar is collapsed the .sphinxsidebarwrapper is hidden
12 | * and the width of the sidebar and the margin-left of the document
13 | * are decreased. When the sidebar is expanded the opposite happens.
14 | * This script saves a per-browser/per-session cookie used to
15 | * remember the position of the sidebar among the pages.
16 | * Once the browser is closed the cookie is deleted and the position
17 | * reset to the default (expanded).
18 | *
19 | * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
20 | * :license: BSD, see LICENSE for details.
21 | *
22 | */
23 |
24 | $(function() {
25 | // global elements used by the functions.
26 | // the 'sidebarbutton' element is defined as global after its
27 | // creation, in the add_sidebar_button function
28 | var bodywrapper = $('.bodywrapper');
29 | var sidebar = $('.sphinxsidebar');
30 | var sidebarwrapper = $('.sphinxsidebarwrapper');
31 |
32 | // for some reason, the document has no sidebar; do not run into errors
33 | if (!sidebar.length) return;
34 |
35 | // original margin-left of the bodywrapper and width of the sidebar
36 | // with the sidebar expanded
37 | var bw_margin_expanded = bodywrapper.css('margin-left');
38 | var ssb_width_expanded = sidebar.width();
39 |
40 | // margin-left of the bodywrapper and width of the sidebar
41 | // with the sidebar collapsed
42 | var bw_margin_collapsed = '.8em';
43 | var ssb_width_collapsed = '.8em';
44 |
45 | // colors used by the current theme
46 | var dark_color = $('.related').css('background-color');
47 | var light_color = $('.document').css('background-color');
48 |
49 | function sidebar_is_collapsed() {
50 | return sidebarwrapper.is(':not(:visible)');
51 | }
52 |
53 | function toggle_sidebar() {
54 | if (sidebar_is_collapsed())
55 | expand_sidebar();
56 | else
57 | collapse_sidebar();
58 | }
59 |
60 | function collapse_sidebar() {
61 | sidebarwrapper.hide();
62 | sidebar.css('width', ssb_width_collapsed);
63 | bodywrapper.css('margin-left', bw_margin_collapsed);
64 | sidebarbutton.css({
65 | 'margin-left': '0',
66 | 'height': bodywrapper.height()
67 | });
68 | sidebarbutton.find('span').text('»');
69 | sidebarbutton.attr('title', _('Expand sidebar'));
70 | document.cookie = 'sidebar=collapsed';
71 | }
72 |
73 | function expand_sidebar() {
74 | bodywrapper.css('margin-left', bw_margin_expanded);
75 | sidebar.css('width', ssb_width_expanded);
76 | sidebarwrapper.show();
77 | sidebarbutton.css({
78 | 'margin-left': ssb_width_expanded-12,
79 | 'height': bodywrapper.height()
80 | });
81 | sidebarbutton.find('span').text('«');
82 | sidebarbutton.attr('title', _('Collapse sidebar'));
83 | document.cookie = 'sidebar=expanded';
84 | }
85 |
86 | function add_sidebar_button() {
87 | sidebarwrapper.css({
88 | 'float': 'left',
89 | 'margin-right': '0',
90 | 'width': ssb_width_expanded - 28
91 | });
92 | // create the button
93 | sidebar.append(
94 | ''
95 | );
96 | var sidebarbutton = $('#sidebarbutton');
97 | light_color = sidebarbutton.css('background-color');
98 | // find the height of the viewport to center the '<<' in the page
99 | var viewport_height;
100 | if (window.innerHeight)
101 | viewport_height = window.innerHeight;
102 | else
103 | viewport_height = $(window).height();
104 | sidebarbutton.find('span').css({
105 | 'display': 'block',
106 | 'margin-top': (viewport_height - sidebar.position().top - 20) / 2
107 | });
108 |
109 | sidebarbutton.click(toggle_sidebar);
110 | sidebarbutton.attr('title', _('Collapse sidebar'));
111 | sidebarbutton.css({
112 | 'color': '#FFFFFF',
113 | 'border-left': '1px solid ' + dark_color,
114 | 'font-size': '1.2em',
115 | 'cursor': 'pointer',
116 | 'height': bodywrapper.height(),
117 | 'padding-top': '1px',
118 | 'margin-left': ssb_width_expanded - 12
119 | });
120 |
121 | sidebarbutton.hover(
122 | function () {
123 | $(this).css('background-color', dark_color);
124 | },
125 | function () {
126 | $(this).css('background-color', light_color);
127 | }
128 | );
129 | }
130 |
131 | function set_position_from_cookie() {
132 | if (!document.cookie)
133 | return;
134 | var items = document.cookie.split(';');
135 | for(var k=0; k' where is one of"
21 | @echo " html to make standalone HTML files"
22 | @echo " dirhtml to make HTML files named index.html in directories"
23 | @echo " singlehtml to make a single large HTML file"
24 | @echo " pickle to make pickle files"
25 | @echo " json to make JSON files"
26 | @echo " htmlhelp to make HTML files and a HTML help project"
27 | @echo " qthelp to make HTML files and a qthelp project"
28 | @echo " devhelp to make HTML files and a Devhelp project"
29 | @echo " epub to make an epub"
30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
31 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
32 | @echo " text to make text files"
33 | @echo " man to make manual pages"
34 | @echo " texinfo to make Texinfo files"
35 | @echo " info to make Texinfo files and run them through makeinfo"
36 | @echo " gettext to make PO message catalogs"
37 | @echo " changes to make an overview of all changed/added/deprecated items"
38 | @echo " linkcheck to check all external links for integrity"
39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
40 |
41 | clean:
42 | -rm -rf $(BUILDDIR)/*
43 |
44 | html:
45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
46 | @echo
47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
48 |
49 | dirhtml:
50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
51 | @echo
52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
53 |
54 | singlehtml:
55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
56 | @echo
57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
58 |
59 | pickle:
60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
61 | @echo
62 | @echo "Build finished; now you can process the pickle files."
63 |
64 | json:
65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
66 | @echo
67 | @echo "Build finished; now you can process the JSON files."
68 |
69 | htmlhelp:
70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
71 | @echo
72 | @echo "Build finished; now you can run HTML Help Workshop with the" \
73 | ".hhp project file in $(BUILDDIR)/htmlhelp."
74 |
75 | qthelp:
76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
77 | @echo
78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/clintrials.qhcp"
81 | @echo "To view the help file:"
82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/clintrials.qhc"
83 |
84 | devhelp:
85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
86 | @echo
87 | @echo "Build finished."
88 | @echo "To view the help file:"
89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/clintrials"
90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/clintrials"
91 | @echo "# devhelp"
92 |
93 | epub:
94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
95 | @echo
96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
97 |
98 | latex:
99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
100 | @echo
101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
103 | "(use \`make latexpdf' here to do that automatically)."
104 |
105 | latexpdf:
106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
107 | @echo "Running LaTeX files through pdflatex..."
108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
110 |
111 | text:
112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
113 | @echo
114 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
115 |
116 | man:
117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
118 | @echo
119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
120 |
121 | texinfo:
122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
123 | @echo
124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
125 | @echo "Run \`make' in that directory to run these through makeinfo" \
126 | "(use \`make info' here to do that automatically)."
127 |
128 | info:
129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
130 | @echo "Running Texinfo files through makeinfo..."
131 | make -C $(BUILDDIR)/texinfo info
132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
133 |
134 | gettext:
135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
136 | @echo
137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
138 |
139 | changes:
140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
141 | @echo
142 | @echo "The overview file is in $(BUILDDIR)/changes."
143 |
144 | linkcheck:
145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
146 | @echo
147 | @echo "Link check complete; look for any errors in the above output " \
148 | "or in $(BUILDDIR)/linkcheck/output.txt."
149 |
150 | doctest:
151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
152 | @echo "Testing of doctests in the sources finished, look at the " \
153 | "results in $(BUILDDIR)/doctest/output.txt."
154 |
--------------------------------------------------------------------------------
/tests/test_crm.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 | """ Tests of the clintrials.dosefindings.crm module. """
5 |
6 | from nose.tools import with_setup
7 | import numpy as np
8 | from scipy.stats import norm
9 |
10 | from clintrials.common import empiric, logistic, inverse_empiric, inverse_logistic
11 | from clintrials.dosefinding.crm import CRM
12 |
13 |
14 | def setup_func():
15 | pass
16 |
17 |
18 | def teardown_func():
19 | pass
20 |
21 |
22 | @with_setup(setup_func, teardown_func)
23 | def test_CRM_bayes():
24 |
25 | # Test that Bayesian CRM works by reproducing Table 3.2 on p.26 of Cheung's book:
26 | # Dose Finding By The Continual Reassessment Method, (Chapman & Hall/CRC Biostatistics Series)
27 | # Table 3.2 gives a patient-by-patient simulation of a CRM dose-finding trial, including
28 | # an estimate of the beta parameter at each iteration. If we can reproduce the doses delivered,
29 | # the presense or absence of toxic events, and the estimate of beta at each turn, that suggests
30 | # we are doing something right.
31 |
32 | # Trial simulation parameters. These are copied straight out of the book.
33 | true_toxicity = [0.02, 0.04, 0.10, 0.25, 0.50]
34 | doses = [3, 5, 5, 3, 4, 4, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4]
35 | toxicity_events = [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0]
36 | tolerances = [0.571, 0.642, 0.466, 0.870, 0.634, 0.390, 0.524, 0.773, 0.175, 0.627,
37 | 0.321, 0.099, 0.383, 0.995, 0.628, 0.346, 0.919, 0.022, 0.647, 0.469]
38 | beta_hats = [0.60, 0.93, 0.04, 0.18, 0.28, 0.34, 0.41, 0.47, 0.31, 0.35,
39 | 0.25, 0.15, 0.18, 0.21, 0.24, 0.26, 0.28, 0.21, 0.22, 0.24]
40 | beta_hat_epsilon = 0.005
41 |
42 | # CRM parameters. These are required to make the CRM algo reproduce all of the above.
43 | prior = [0.05, 0.12, 0.25, 0.40, 0.55]
44 | toxicity_target = 0.25
45 | first_dose = 3
46 | F_func = logistic
47 | inverse_F = inverse_logistic
48 | beta_prior = norm(loc=0, scale=np.sqrt(1.34))
49 |
50 | # Our trial object
51 | crm = CRM(prior, toxicity_target, first_dose, max_size=len(tolerances), F_func=F_func, inverse_F=inverse_F,
52 | beta_prior=beta_prior, method="bayes", use_quick_integration=False, estimate_var=True)
53 | dose = first_dose
54 |
55 | # Confirm that dose (x_i), toxicity (y_i), and beta estimate (hat(beta)_i) in Ken Cheung's
56 | # table can all be reproduced for each of the 20 patients:
57 | for patient_no in range(1, 21):
58 | assert dose == doses[patient_no-1]
59 | toxicity = 1 if tolerances[patient_no-1] < true_toxicity[dose-1] else 0
60 | assert toxicity == toxicity_events[patient_no-1]
61 | dose = crm.update([(dose, toxicity)])
62 | assert abs(crm.beta_hat - beta_hats[patient_no-1]) <= beta_hat_epsilon
63 |
64 |
65 | @with_setup(setup_func, teardown_func)
66 | def test_CRM_mle():
67 |
68 | # Test that MLE CRM works by reproducing an example in Python that can be verified in R
69 | # using Cheung's dfcrm package.
70 |
71 | # Trial simulation parameters.
72 | doses = [3, 3, 1, 2, 2, 3, 3, 2, 3, 2, 1, 2, 1, 1, 1, 2, 2]
73 | toxicity_events = [0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0]
74 | beta_hats = [np.nan, np.nan, -0.312, -0.193, -0.099, -0.040, 0.030, -0.121, -0.084, -0.177, -0.284, -0.256,
75 | -0.336, -0.308, -0.286, -0.266, -0.240]
76 | beta_hat_epsilon = 0.005
77 |
78 | # CRM parameters. These are required to make the CRM algo reproduce all of the above.
79 | prior = [0.05, 0.12, 0.25, 0.40, 0.55]
80 | toxicity_target = 0.25
81 | first_dose = 3
82 | F_func = logistic
83 | inverse_F = inverse_logistic
84 | beta_prior = norm(loc=0, scale=np.sqrt(1.34))
85 |
86 | # Our trial object
87 | crm = CRM(prior, toxicity_target, first_dose, max_size=len(doses), F_func=F_func, inverse_F=inverse_F,
88 | beta_prior=beta_prior, method="mle")
89 | # MLE CRM needs at least one of each toxicity event to proceed sensibly.
90 | # Cases 1 and 2 provide that:
91 | dose = crm.update([(doses[0], toxicity_events[0]), (doses[1], toxicity_events[1])])
92 |
93 | # Confirm that dose (x_i), toxicity (y_i), and beta estimate (hat(beta)_i) in Ken Cheung's
94 | # table can all be reproduced for each of the 20 patients:
95 | for patient_no in range(2, len(doses)):
96 | assert dose == doses[patient_no]
97 | assert abs(crm.beta_hat - beta_hats[patient_no]) <= beta_hat_epsilon
98 | toxicity = toxicity_events[patient_no]
99 | dose = crm.update([(dose, toxicity)])
100 |
101 | # This is all verifiable in R.
102 |
103 |
104 | def test_CRM_bayes_again():
105 | prior = [0.1, 0.2, 0.4, 0.6]
106 | target = 0.4
107 | doses = [1,1,1, 2,2,2]
108 | tox = [0,0,0, 1,0,1]
109 | cases = list(zip(doses, tox))
110 | trial_plugin_1 = CRM(prior, target, 1, 30, F_func=empiric, inverse_F=inverse_empiric, use_quick_integration=False,
111 | plugin_mean=True)
112 | trial_plugin_2 = CRM(prior, target, 1, 30, F_func=empiric, inverse_F=inverse_empiric, use_quick_integration=True,
113 | plugin_mean=True)
114 | trial_plugin_3 = CRM(prior, target, 1, 30, F_func=logistic, inverse_F=inverse_logistic, use_quick_integration=False,
115 | plugin_mean=True)
116 | trial_plugin_4 = CRM(prior, target, 1, 30, F_func=logistic, inverse_F=inverse_logistic, use_quick_integration=True,
117 | plugin_mean=True)
118 | trial_plugin_1.update(cases)
119 | trial_plugin_2.update(cases)
120 | trial_plugin_3.update(cases)
121 | trial_plugin_4.update(cases)
122 |
123 | assert np.all(np.array(trial_plugin_1.prob_tox()) - np.array([[0.240, 0.368, 0.566, 0.728]]) < 0.001)
124 | assert np.all(np.array(trial_plugin_2.prob_tox()) - np.array([[0.240, 0.368, 0.566, 0.728]]) < 0.001)
125 | assert np.all(np.array(trial_plugin_3.prob_tox()) - np.array([[0.274, 0.412, 0.598, 0.734]]) < 0.001)
126 | assert np.all(np.array(trial_plugin_4.prob_tox()) - np.array([[0.274, 0.412, 0.598, 0.734]]) < 0.001)
127 | # These are verifiable in R
128 |
129 |
130 | # TODO: tests of full Bayes CRM, verified against bcrm in R
131 |
132 |
133 |
--------------------------------------------------------------------------------
/doc/_build/html/_modules/clintrials/coll.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
7 |
8 |
9 |
10 |
11 | clintrials.coll — clintrials 0.1.1 documentation
12 |
13 |
14 |
15 |
16 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
Source code for clintrials.coll
53 | __author__ = 'Kristian Brock'
54 | __contact__ = 'kristian.brock@gmail.com'
55 |
56 | """ Functions and classes for manipulating collections. """
57 |
58 |
59 | [docs] def to_1d_list_gen ( x ):
60 |
""" Generator function to reduce lists of lists of arbitrary depth (and scalars) to single depth-1 list.
61 |
62 |
.. note:: this function is recursive.
63 |
64 |
"""
65 |
66 |
if isinstance ( x , list ):
67 |
for y in x :
68 |
for z in to_1d_list_gen ( y ):
69 |
yield z
70 |
else :
71 |
yield x
72 |
73 |
74 | [docs] def to_1d_list ( x ):
75 |
""" Reshape scalars, lists and lists of lists of arbitrary depth as a single flat list, i.e. list of depth 1.
76 |
77 |
.. note:: this function basically offloads all its work to a generator function because **we like yield**!
78 |
79 |
E.g.
80 |
81 |
>>> to_1d_list(0)
82 |
[0]
83 |
>>> to_1d_list([1])
84 |
[1]
85 |
>>> to_1d_list([[1,2],3,[4,5]])
86 |
[1, 2, 3, 4, 5]
87 |
>>> to_1d_list([[1,2],3,[4,5,[6,[7,8,[9]]]]])
88 |
[1, 2, 3, 4, 5, 6, 7, 8, 9]
89 |
90 |
"""
91 |
return list ( to_1d_list_gen ( x ))
92 |
93 |
94 |
95 |
96 |
97 |
114 |
115 |
116 |
129 |
133 |
134 |
--------------------------------------------------------------------------------
/clintrials/phase2/simple.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 | """ Implementations of simple phase II clinical trial designs. Long, complicated designs belong in own modules. """
5 |
6 | from collections import OrderedDict
7 | from itertools import product
8 | import numpy as np
9 | from scipy.stats import beta, binom, chi2
10 |
11 |
12 | def bayesian_2stage_dich_design(theta, p0, p1, N0, N1, p, q, prior_a=1, prior_b=1,
13 | labels=['StopAtInterim', 'StopAtFinal', 'GoAtFinal']):
14 | """ Calculate the outcome probabilities for a two-stage Bayesian trial of a dichotomous variable.
15 |
16 | We test the hypotheses H0: thetap1, stopping at interim if Prob(theta < p0 | data) > p,
17 | stopping at final analysis if Prob(theta < p0 | data) > q, otherwise concluding that theta > p1.
18 |
19 | .. note:: this is Prof Lucinda Billingham's dichotomous design used in the National Lung Matrix trial.
20 |
21 | :param theta: the true efficacy
22 | :type theta: float
23 | :param p0: hypothesised lower bound probability
24 | :type p0: float
25 | :param p1: hypothesised upper bound probability
26 | :type p1: float
27 | :param N0: number of participants at interim stage
28 | :type N0: int
29 | :param N1: number of participants at final stage
30 | :type N1: int
31 | :param p: certainty needed to reject H0 at end of interim stage
32 | :type p: float
33 | :param q: certainty needed to accept H1 at end of final stage
34 | :type q: float
35 | :param prior_a: first parameter to Beta distribution to describe prior beliefs about theta
36 | :type prior_a: float
37 | :param prior_b: second parameter to Beta distribution to describe prior beliefs about theta
38 | :type prior_b: float
39 | :param labels: labels for the cases of stopping at interim, stopping at final, and approving at final analysis.
40 | :type labels: list
41 | :return: dict, mapping outcome label to probability
42 | :rtype: dict
43 |
44 | e.g.
45 |
46 | >>> res = bayesian_2stage_dich_design(0.35, 0.2, 0.4, 15, 30, 0.8, 0.6)
47 | >>> res == {'GoAtFinal': 0.21978663862560768, 'StopAtFinal': 0.76603457678233555,
48 | ... 'StopAtInterim': 0.014178784592056803}
49 | True
50 |
51 | """
52 |
53 | a, b = prior_a, prior_b
54 | n0, n1 = zip(*product(range(N0+1), range(N1-N0+1)))
55 | n0, n1 = np.array(n0), np.array(n1)
56 | lik0 = binom.pmf(n0, n=N0, p=theta)
57 | lik1 = binom.pmf(n1, n=N1-N0, p=theta)
58 | joint_lik = lik0 * lik1
59 | prob_lt_p0 = beta.cdf(p0, a+n0, b+N0-n0)
60 | prob_gt_p1 = 1 - beta.cdf(p1, a+n0+n1, b+N1-n0-n1)
61 | stop_0 = sum(joint_lik[prob_lt_p0 > p])
62 | go_1 = sum(joint_lik[~(prob_lt_p0 > p) & (prob_gt_p1 > q)])
63 | stop_1 = 1 - stop_0 - go_1
64 | return { labels[0]: stop_0, labels[1]: stop_1, labels[2]: go_1}
65 |
66 |
67 | def bayesian_2stage_dich_design_df(theta, p0, p1, N0, N1, p, q, prior_a=1, prior_b=1,
68 | labels=['StopAtInterim', 'StopAtFinal', 'GoAtFinal']):
69 | """ Calculate the outcome probabilities for a two-stage Bayesian trial of a dichotomous variable.
70 |
71 | We test the hypotheses H0: thetap1, stopping at interim if Prob(theta < p0 | data) > p,
72 | stopping at final analysis if Prob(theta < p0 | data) > q, otherwise concluding that theta > p1.
73 |
74 | .. note:: this is Prof Lucinda Billingham's dichotomous design used in the National Lung Matrix trial.
75 |
76 | :param theta: the true efficacy
77 | :type theta: float
78 | :param p0: hypothesised lower bound probability
79 | :type p0: float
80 | :param p1: hypothesised upper bound probability
81 | :type p1: float
82 | :param N0: number of participants at interim stage
83 | :type N0: int
84 | :param N1: number of participants at final stage
85 | :type N1: int
86 | :param p: certainty needed to reject H0 at end of interim stage
87 | :type p: float
88 | :param q: certainty needed to accept H1 at end of final stage
89 | :type q: float
90 | :param prior_a: first parameter to Beta distribution to describe prior beliefs about theta
91 | :type prior_a: float
92 | :param prior_b: second parameter to Beta distribution to describe prior beliefs about theta
93 | :type prior_b: float
94 | :param labels: labels for the cases of stopping at interim, stopping at final, and approving at final analysis.
95 | :type labels: list
96 | :return: dict, mapping outcome label to probability
97 | :rtype: dict
98 |
99 | See bayesian_2stage_dich_design
100 |
101 | """
102 |
103 | a, b = prior_a, prior_b
104 | n0, n1 = zip(*product(range(N0+1), range(N1-N0+1)))
105 | n0, n1 = np.array(n0), np.array(n1)
106 | lik0 = binom.pmf(n0, n=N0, p=theta)
107 | lik1 = binom.pmf(n1, n=N1-N0, p=theta)
108 | joint_lik = lik0 * lik1
109 | prob_lt_p0 = beta.cdf(p0, a+n0, b+N0-n0)
110 | prob_gt_p1 = 1 - beta.cdf(p1, a+n0+n1, b+N1-n0-n1)
111 | stop_0 = prob_lt_p0 > p
112 | go_1 = ~stop_0 & (prob_gt_p1 > q)
113 | stop_1 = ~(stop_0 | go_1)
114 |
115 | dat = OrderedDict()
116 | dat['Successes0'] = n0
117 | dat['Pr(thetap1)'] = prob_gt_p1
121 | dat['Lik'] = joint_lik
122 | dat[labels[0]] = stop_0
123 | dat[labels[1]] = stop_1
124 | dat[labels[2]] = go_1
125 | import pandas as pd
126 | return pd.DataFrame(dat)
127 |
128 |
129 | def chisqu_two_arm_comparison(p0, p1, n, alpha):
130 | """ Test that p1 exceeds p0 with n patients per arm using the chi-squared distribution.
131 |
132 | :param p0: first proportion
133 | :type p0: float
134 | :param p1: second proportion
135 | :type p1: float
136 | :param n: n patients per arm
137 | :type n: int
138 | :param alpha: significance
139 | :type alpha: float
140 | :param to_pandas: True to get results as pandas.DataFrame else dict
141 | :type to_pandas: bool
142 | :return: tuple -- (probability of rejecting, probability of not-rejecting)
143 |
144 | E.g.
145 |
146 | >>> chisqu_two_arm_comparison(0.3, 0.5, 20, 0.05)
147 | (0.34534530091794574, 0.65465469908205098)
148 |
149 | """
150 |
151 | n0, n1 = zip(*list(product(range(n+1), range(n+1))))
152 | n0 = np.array(n0)
153 | n1 = np.array(n1)
154 | lik0 = binom.pmf(n0, n, p0)
155 | lik1 = binom.pmf(n1, n, p1)
156 | lik = lik0 * lik1
157 | observed = np.column_stack([n0, n-n0, n1, n-n1])
158 | success = n0 + n1
159 | fail = 2*n - n0 - n1
160 | expected = np.column_stack([success/2., fail/2., success/2., fail/2.])
161 | test_stat = ((observed-expected)**2 / expected).sum(axis=1)
162 | p = 1-chi2.cdf(test_stat, 1)
163 | reject = (p < alpha*2) & (n0= 0 && !jQuery(node.parentNode).hasClass(className)) {
86 | var span = document.createElement("span");
87 | span.className = className;
88 | span.appendChild(document.createTextNode(val.substr(pos, text.length)));
89 | node.parentNode.insertBefore(span, node.parentNode.insertBefore(
90 | document.createTextNode(val.substr(pos + text.length)),
91 | node.nextSibling));
92 | node.nodeValue = val.substr(0, pos);
93 | }
94 | }
95 | else if (!jQuery(node).is("button, select, textarea")) {
96 | jQuery.each(node.childNodes, function() {
97 | highlight(this);
98 | });
99 | }
100 | }
101 | return this.each(function() {
102 | highlight(this);
103 | });
104 | };
105 |
106 | /**
107 | * Small JavaScript module for the documentation.
108 | */
109 | var Documentation = {
110 |
111 | init : function() {
112 | this.fixFirefoxAnchorBug();
113 | this.highlightSearchWords();
114 | this.initIndexTable();
115 | },
116 |
117 | /**
118 | * i18n support
119 | */
120 | TRANSLATIONS : {},
121 | PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
122 | LOCALE : 'unknown',
123 |
124 | // gettext and ngettext don't access this so that the functions
125 | // can safely bound to a different name (_ = Documentation.gettext)
126 | gettext : function(string) {
127 | var translated = Documentation.TRANSLATIONS[string];
128 | if (typeof translated == 'undefined')
129 | return string;
130 | return (typeof translated == 'string') ? translated : translated[0];
131 | },
132 |
133 | ngettext : function(singular, plural, n) {
134 | var translated = Documentation.TRANSLATIONS[singular];
135 | if (typeof translated == 'undefined')
136 | return (n == 1) ? singular : plural;
137 | return translated[Documentation.PLURALEXPR(n)];
138 | },
139 |
140 | addTranslations : function(catalog) {
141 | for (var key in catalog.messages)
142 | this.TRANSLATIONS[key] = catalog.messages[key];
143 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
144 | this.LOCALE = catalog.locale;
145 | },
146 |
147 | /**
148 | * add context elements like header anchor links
149 | */
150 | addContextElements : function() {
151 | $('div[id] > :header:first').each(function() {
152 | $('').
153 | attr('href', '#' + this.id).
154 | attr('title', _('Permalink to this headline')).
155 | appendTo(this);
156 | });
157 | $('dt[id]').each(function() {
158 | $('').
159 | attr('href', '#' + this.id).
160 | attr('title', _('Permalink to this definition')).
161 | appendTo(this);
162 | });
163 | },
164 |
165 | /**
166 | * workaround a firefox stupidity
167 | */
168 | fixFirefoxAnchorBug : function() {
169 | if (document.location.hash && $.browser.mozilla)
170 | window.setTimeout(function() {
171 | document.location.href += '';
172 | }, 10);
173 | },
174 |
175 | /**
176 | * highlight the search words provided in the url in the text
177 | */
178 | highlightSearchWords : function() {
179 | var params = $.getQueryParameters();
180 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
181 | if (terms.length) {
182 | var body = $('div.body');
183 | window.setTimeout(function() {
184 | $.each(terms, function() {
185 | body.highlightText(this.toLowerCase(), 'highlighted');
186 | });
187 | }, 10);
188 | $('' + _('Hide Search Matches') + '
')
190 | .appendTo($('#searchbox'));
191 | }
192 | },
193 |
194 | /**
195 | * init the domain index toggle buttons
196 | */
197 | initIndexTable : function() {
198 | var togglers = $('img.toggler').click(function() {
199 | var src = $(this).attr('src');
200 | var idnum = $(this).attr('id').substr(7);
201 | $('tr.cg-' + idnum).toggle();
202 | if (src.substr(-9) == 'minus.png')
203 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
204 | else
205 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
206 | }).css('display', '');
207 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
208 | togglers.click();
209 | }
210 | },
211 |
212 | /**
213 | * helper function to hide the search marks again
214 | */
215 | hideSearchWords : function() {
216 | $('#searchbox .highlight-link').fadeOut(300);
217 | $('span.highlighted').removeClass('highlighted');
218 | },
219 |
220 | /**
221 | * make the url absolute
222 | */
223 | makeURL : function(relativeURL) {
224 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
225 | },
226 |
227 | /**
228 | * get the current relative url
229 | */
230 | getCurrentURL : function() {
231 | var path = document.location.pathname;
232 | var parts = path.split(/\//);
233 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
234 | if (this == '..')
235 | parts.pop();
236 | });
237 | var url = parts.join('/');
238 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
239 | }
240 | };
241 |
242 | // quick alias for translations
243 | _ = Documentation.gettext;
244 |
245 | $(document).ready(function() {
246 | Documentation.init();
247 | });
248 |
--------------------------------------------------------------------------------
/doc/_build/html/_static/underscore.js:
--------------------------------------------------------------------------------
1 | // Underscore.js 0.5.5
2 | // (c) 2009 Jeremy Ashkenas, DocumentCloud Inc.
3 | // Underscore is freely distributable under the terms of the MIT license.
4 | // Portions of Underscore are inspired by or borrowed from Prototype.js,
5 | // Oliver Steele's Functional, and John Resig's Micro-Templating.
6 | // For all details and documentation:
7 | // http://documentcloud.github.com/underscore/
8 | (function(){var j=this,n=j._,i=function(a){this._wrapped=a},m=typeof StopIteration!=="undefined"?StopIteration:"__break__",b=j._=function(a){return new i(a)};if(typeof exports!=="undefined")exports._=b;var k=Array.prototype.slice,o=Array.prototype.unshift,p=Object.prototype.toString,q=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;b.VERSION="0.5.5";b.each=function(a,c,d){try{if(a.forEach)a.forEach(c,d);else if(b.isArray(a)||b.isArguments(a))for(var e=0,f=a.length;e=e.computed&&(e={value:f,computed:g})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);var e={computed:Infinity};b.each(a,function(f,g,h){g=c?c.call(d,f,g,h):f;gf?1:0}),"value")};b.sortedIndex=function(a,c,d){d=d||b.identity;for(var e=0,f=a.length;e>1;d(a[g])=0})})};b.zip=function(){for(var a=b.toArray(arguments),c=b.max(b.pluck(a,"length")),d=new Array(c),e=0;e0?f-c:c-f)>=0)return e;e[g++]=f}};b.bind=function(a,c){var d=b.rest(arguments,2);return function(){return a.apply(c||j,d.concat(b.toArray(arguments)))}};b.bindAll=function(a){var c=b.rest(arguments);if(c.length==0)c=b.functions(a);b.each(c,function(d){a[d]=b.bind(a[d],a)});
17 | return a};b.delay=function(a,c){var d=b.rest(arguments,2);return setTimeout(function(){return a.apply(a,d)},c)};b.defer=function(a){return b.delay.apply(b,[a,1].concat(b.rest(arguments)))};b.wrap=function(a,c){return function(){var d=[a].concat(b.toArray(arguments));return c.apply(c,d)}};b.compose=function(){var a=b.toArray(arguments);return function(){for(var c=b.toArray(arguments),d=a.length-1;d>=0;d--)c=[a[d].apply(this,c)];return c[0]}};b.keys=function(a){if(b.isArray(a))return b.range(0,a.length);
18 | var c=[];for(var d in a)q.call(a,d)&&c.push(d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=function(a){return b.select(b.keys(a),function(c){return b.isFunction(a[c])}).sort()};b.extend=function(a,c){for(var d in c)a[d]=c[d];return a};b.clone=function(a){if(b.isArray(a))return a.slice(0);return b.extend({},a)};b.tap=function(a,c){c(a);return a};b.isEqual=function(a,c){if(a===c)return true;var d=typeof a;if(d!=typeof c)return false;if(a==c)return true;if(!a&&c||a&&!c)return false;
19 | if(a.isEqual)return a.isEqual(c);if(b.isDate(a)&&b.isDate(c))return a.getTime()===c.getTime();if(b.isNaN(a)&&b.isNaN(c))return true;if(b.isRegExp(a)&&b.isRegExp(c))return a.source===c.source&&a.global===c.global&&a.ignoreCase===c.ignoreCase&&a.multiline===c.multiline;if(d!=="object")return false;if(a.length&&a.length!==c.length)return false;d=b.keys(a);var e=b.keys(c);if(d.length!=e.length)return false;for(var f in a)if(!b.isEqual(a[f],c[f]))return false;return true};b.isEmpty=function(a){return b.keys(a).length==
20 | 0};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=function(a){return!!(a&&a.concat&&a.unshift)};b.isArguments=function(a){return a&&b.isNumber(a.length)&&!b.isArray(a)&&!r.call(a,"length")};b.isFunction=function(a){return!!(a&&a.constructor&&a.call&&a.apply)};b.isString=function(a){return!!(a===""||a&&a.charCodeAt&&a.substr)};b.isNumber=function(a){return p.call(a)==="[object Number]"};b.isDate=function(a){return!!(a&&a.getTimezoneOffset&&a.setUTCFullYear)};b.isRegExp=function(a){return!!(a&&
21 | a.test&&a.exec&&(a.ignoreCase||a.ignoreCase===false))};b.isNaN=function(a){return b.isNumber(a)&&isNaN(a)};b.isNull=function(a){return a===null};b.isUndefined=function(a){return typeof a=="undefined"};b.noConflict=function(){j._=n;return this};b.identity=function(a){return a};b.breakLoop=function(){throw m;};var s=0;b.uniqueId=function(a){var c=s++;return a?a+c:c};b.template=function(a,c){a=new Function("obj","var p=[],print=function(){p.push.apply(p,arguments);};with(obj){p.push('"+a.replace(/[\r\t\n]/g,
22 | " ").replace(/'(?=[^%]*%>)/g,"\t").split("'").join("\\'").split("\t").join("'").replace(/<%=(.+?)%>/g,"',$1,'").split("<%").join("');").split("%>").join("p.push('")+"');}return p.join('');");return c?a(c):a};b.forEach=b.each;b.foldl=b.inject=b.reduce;b.foldr=b.reduceRight;b.filter=b.select;b.every=b.all;b.some=b.any;b.head=b.first;b.tail=b.rest;b.methods=b.functions;var l=function(a,c){return c?b(a).chain():a};b.each(b.functions(b),function(a){var c=b[a];i.prototype[a]=function(){var d=b.toArray(arguments);
23 | o.call(d,this._wrapped);return l(c.apply(b,d),this._chain)}});b.each(["pop","push","reverse","shift","sort","splice","unshift"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){c.apply(this._wrapped,arguments);return l(this._wrapped,this._chain)}});b.each(["concat","join","slice"],function(a){var c=Array.prototype[a];i.prototype[a]=function(){return l(c.apply(this._wrapped,arguments),this._chain)}});i.prototype.chain=function(){this._chain=true;return this};i.prototype.value=function(){return this._wrapped}})();
24 |
--------------------------------------------------------------------------------
/tutorials/matchpoint/Ambivalence.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "collapsed": true
7 | },
8 | "source": [
9 | "# Implementing the EffTox Dose-Finding Design in the Matchpoint Trials"
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "This tutorial complements the manuscript _Implementing the EffTox Dose-Finding Design in the Matchpoint Trial_ (Brock _et al_.,in submission). Please consult the paper for the clinical background, the methodology details, and full explanation of the terminology."
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "## Dose Ambivalence"
24 | ]
25 | },
26 | {
27 | "cell_type": "markdown",
28 | "metadata": {},
29 | "source": [
30 | "In this notebook, we illustrate the phenomenon of _dose ambivalence_ using the EffTox design in the seamless phase I/II dose-finding clinical trial, Matchpoint."
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 49,
36 | "metadata": {
37 | "collapsed": false
38 | },
39 | "outputs": [],
40 | "source": [
41 | "import numpy as np\n",
42 | "from scipy.stats import norm\n",
43 | "\n",
44 | "from clintrials.dosefinding.efftox import EffTox, LpNormCurve"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 2,
50 | "metadata": {
51 | "collapsed": true
52 | },
53 | "outputs": [],
54 | "source": [
55 | "real_doses = [7.5, 15, 30, 45]\n",
56 | "trial_size = 30\n",
57 | "cohort_size = 3\n",
58 | "first_dose = 3\n",
59 | "prior_tox_probs = (0.025, 0.05, 0.1, 0.25)\n",
60 | "prior_eff_probs = (0.2, 0.3, 0.5, 0.6)\n",
61 | "tox_cutoff = 0.40\n",
62 | "eff_cutoff = 0.45\n",
63 | "tox_certainty = 0.05\n",
64 | "eff_certainty = 0.03"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": 3,
70 | "metadata": {
71 | "collapsed": true
72 | },
73 | "outputs": [],
74 | "source": [
75 | "mu_t_mean, mu_t_sd = -5.4317, 2.7643\n",
76 | "beta_t_mean, beta_t_sd = 3.1761, 2.7703\n",
77 | "mu_e_mean, mu_e_sd = -0.8442, 1.9786\n",
78 | "beta_e_1_mean, beta_e_1_sd = 1.9857, 1.9820\n",
79 | "beta_e_2_mean, beta_e_2_sd = 0, 0.2\n",
80 | "psi_mean, psi_sd = 0, 1\n",
81 | "efftox_priors = [\n",
82 | " norm(loc=mu_t_mean, scale=mu_t_sd),\n",
83 | " norm(loc=beta_t_mean, scale=beta_t_sd),\n",
84 | " norm(loc=mu_e_mean, scale=mu_e_sd),\n",
85 | " norm(loc=beta_e_1_mean, scale=beta_e_1_sd),\n",
86 | " norm(loc=beta_e_2_mean, scale=beta_e_2_sd),\n",
87 | " norm(loc=psi_mean, scale=psi_sd),\n",
88 | " ]"
89 | ]
90 | },
91 | {
92 | "cell_type": "markdown",
93 | "metadata": {},
94 | "source": [
95 | "The above parameters are explained in the manuscript."
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": 4,
101 | "metadata": {
102 | "collapsed": true
103 | },
104 | "outputs": [],
105 | "source": [
106 | "hinge_points = [(0.4, 0), (1, 0.7), (0.5, 0.4)]\n",
107 | "metric = LpNormCurve(hinge_points[0][0], hinge_points[1][1], hinge_points[2][0], hinge_points[2][1])"
108 | ]
109 | },
110 | {
111 | "cell_type": "code",
112 | "execution_count": 5,
113 | "metadata": {
114 | "collapsed": true
115 | },
116 | "outputs": [],
117 | "source": [
118 | "et = EffTox(real_doses, efftox_priors, tox_cutoff, eff_cutoff, tox_certainty, eff_certainty, metric, trial_size,\n",
119 | " first_dose)"
120 | ]
121 | },
122 | {
123 | "cell_type": "markdown",
124 | "metadata": {},
125 | "source": [
126 | "The EffTox class is an object-oriented implementation of the trial design by Thall & Cook (Thall, P. F., & Cook, J. D. (2004). Dose-Finding Based on Efficacy-Toxicity Trade-Offs. Biometrics, 60(3), 684–693.)"
127 | ]
128 | },
129 | {
130 | "cell_type": "markdown",
131 | "metadata": {},
132 | "source": [
133 | "## Dose ambivalence after 3NTE"
134 | ]
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "metadata": {},
139 | "source": [
140 | "Outcomes for a patient are represented by a three item tuple, where:\n",
141 | "\n",
142 | "- first item is 1-based dose-index give (i.e. 3 is dose-level 3);\n",
143 | "- second item is 1 if toxicity happened, else 0;\n",
144 | "- third item is 1 if efficacy happened, else 0.\n",
145 | "\n",
146 | "Outcomes for several patients are represented as lists:"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": 42,
152 | "metadata": {
153 | "collapsed": true
154 | },
155 | "outputs": [],
156 | "source": [
157 | "outcomes = [(3, 0, 0), (3, 1, 0), (3, 0, 1)]"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": 43,
163 | "metadata": {
164 | "collapsed": false
165 | },
166 | "outputs": [
167 | {
168 | "data": {
169 | "text/plain": [
170 | "3"
171 | ]
172 | },
173 | "execution_count": 43,
174 | "metadata": {},
175 | "output_type": "execute_result"
176 | }
177 | ],
178 | "source": [
179 | "et.reset()\n",
180 | "np.random.seed(123)\n",
181 | "et.update(outcomes)"
182 | ]
183 | },
184 | {
185 | "cell_type": "markdown",
186 | "metadata": {},
187 | "source": [
188 | "So, using seed 123, dose-level 3 is recommended to be given to the next patient after oberving 3NTE in the first cohort of patients. Fair enough."
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": 44,
194 | "metadata": {
195 | "collapsed": false
196 | },
197 | "outputs": [
198 | {
199 | "data": {
200 | "text/plain": [
201 | "4"
202 | ]
203 | },
204 | "execution_count": 44,
205 | "metadata": {},
206 | "output_type": "execute_result"
207 | }
208 | ],
209 | "source": [
210 | "et.reset()\n",
211 | "np.random.seed(321)\n",
212 | "et.update(outcomes)"
213 | ]
214 | },
215 | {
216 | "cell_type": "markdown",
217 | "metadata": {},
218 | "source": [
219 | "Wait...using seed 321, that advice is now dose-level 4. I need a single answer. What should I do?"
220 | ]
221 | },
222 | {
223 | "cell_type": "markdown",
224 | "metadata": {},
225 | "source": [
226 | "Let's define a simple function to calculate next dose based on some outcomes:"
227 | ]
228 | },
229 | {
230 | "cell_type": "code",
231 | "execution_count": 45,
232 | "metadata": {
233 | "collapsed": true
234 | },
235 | "outputs": [],
236 | "source": [
237 | "def get_next_dose(trial, outcomes, **kwargs):\n",
238 | " trial.reset()\n",
239 | " next_dose = trial.update(outcomes, **kwargs)\n",
240 | " return next_dose"
241 | ]
242 | },
243 | {
244 | "cell_type": "markdown",
245 | "metadata": {},
246 | "source": [
247 | "And then run that a number of times. For indication, 100 iterations will suffice (it takes a wee while...). In practice, you might use more iterations."
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": 46,
253 | "metadata": {
254 | "collapsed": false
255 | },
256 | "outputs": [],
257 | "source": [
258 | "np.random.seed(123)\n",
259 | "replicates = [get_next_dose(et, outcomes, n=10**5) for i in range(100)]"
260 | ]
261 | },
262 | {
263 | "cell_type": "code",
264 | "execution_count": 47,
265 | "metadata": {
266 | "collapsed": false
267 | },
268 | "outputs": [
269 | {
270 | "data": {
271 | "text/plain": [
272 | "[(3, 0.56000000000000005), (4, 0.44)]"
273 | ]
274 | },
275 | "execution_count": 47,
276 | "metadata": {},
277 | "output_type": "execute_result"
278 | }
279 | ],
280 | "source": [
281 | "doses, freq = np.unique(replicates, return_counts=True)\n",
282 | "list(zip(doses, 1.0 * freq / len(replicates)))"
283 | ]
284 | },
285 | {
286 | "cell_type": "markdown",
287 | "metadata": {},
288 | "source": [
289 | "So, dose 3 gets recommended in 56% of iterations; slightly more frequently dose 4. This is useful information. The lack of a strong consensus here would suggest that clinical opinion should be used to select the next dose from doses 3 and 4. Had the split been 90:10, we might have been more inclined to go with the majority decision."
290 | ]
291 | },
292 | {
293 | "cell_type": "code",
294 | "execution_count": null,
295 | "metadata": {
296 | "collapsed": true
297 | },
298 | "outputs": [],
299 | "source": []
300 | }
301 | ],
302 | "metadata": {
303 | "kernelspec": {
304 | "display_name": "Python 2",
305 | "language": "python",
306 | "name": "python2"
307 | },
308 | "language_info": {
309 | "codemirror_mode": {
310 | "name": "ipython",
311 | "version": 2
312 | },
313 | "file_extension": ".py",
314 | "mimetype": "text/x-python",
315 | "name": "python",
316 | "nbconvert_exporter": "python",
317 | "pygments_lexer": "ipython2",
318 | "version": "2.7.11"
319 | }
320 | },
321 | "nbformat": 4,
322 | "nbformat_minor": 0
323 | }
324 |
--------------------------------------------------------------------------------
/clintrials/tte.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 | """ Time-to-event trial designs """
5 |
6 | from collections import OrderedDict
7 | import numpy as np
8 | from scipy.stats import expon, poisson, invgamma
9 |
10 | from clintrials.util import atomic_to_json, iterable_to_json
11 |
12 |
13 | class BayesianTimeToEvent():
14 | """ An object-oriented implementation of a simple adaptive Bayesian design for time-to-event endpoints using a
15 | model assuming exponentially distributed event times and inverse-gamma prior beliefs on median survival time.
16 |
17 | .. note:: See Thall, P.F., Wooten, L.H., & Tannir, N.M. (2005) - *Monitoring Event Times in Early Phase Clinical
18 | Trials: Some Practical Issues* for full information.
19 |
20 | This class satisfies the interface for a time-to-event trial in the clintrials package, i.e. it supports methods:
21 |
22 | - event_times()
23 | - recruitment_times()
24 | - update(cases)
25 | - test(time, kwargs)
26 |
27 | .. note:: the event times are time-deltas *relative to the recruitment times*. E.g. recruitment at t=1
28 | and event at t=2 means the event took place at absolute time t=3. Using deltas gets around
29 | the silly scenario where events might occur before recruitment.
30 |
31 | """
32 |
33 | def __init__(self, alpha_prior, beta_prior):
34 | """ Create an instance.
35 |
36 | :param alpha_prior: first parameter in beta distribution for prior beliefs on median time-to-event
37 | :type alpha_prior: float
38 | :param beta_prior: second parameter in beta distribution for prior beliefs on median time-to-event
39 | :type beta_prior: float
40 |
41 | """
42 |
43 | self.alpha_prior = alpha_prior
44 | self.beta_prior = beta_prior
45 | self._times_to_event = []
46 | self._recruitment_times = []
47 |
48 | def event_times(self):
49 | """ Get list of the times at which events occurred.
50 |
51 | :return: list of event times in the order they were provided
52 | :rtype: list
53 |
54 | """
55 |
56 | return self._times_to_event
57 |
58 | def recruitment_times(self):
59 | """ Get list of the times at which patients were recruited.
60 |
61 | :return: list of recruitment times in the order they were provided
62 | :rtype: list
63 |
64 | """
65 |
66 | return self._recruitment_times
67 |
68 | def update(self, cases):
69 | """ Update the trial with new patient cases.
70 |
71 | :param cases: list of cases expressed as 2-tuples, (event_time, recruitment_time)
72 | :type cases: list
73 | :return: Nothing
74 | :rtype: None
75 |
76 | """
77 |
78 | for event_time, recruitment_time in cases:
79 | self._times_to_event.append(event_time)
80 | self._recruitment_times.append(recruitment_time)
81 |
82 | def test(self, time, cutoff, probability, less_than=True):
83 | """ Test posterior belief that median time-to-event parameter is less than or greater than some boundary value.
84 |
85 | :param time: test at this time
86 | :type time: float
87 | :param cutoff: test median time against this critical value
88 | :type cutoff: float
89 | :param probability: require at least this degree of posterior certainty to declare significance
90 | :type probability: float
91 | :param less_than: True, to test parameter is less than cut-off, a-posteriori. False to test greater than
92 | :type less_than: bool
93 | :return: JSON-able dict object reporting test output
94 | :rtype: dict
95 |
96 | """
97 |
98 | event_time = np.array(self._times_to_event)
99 | recruit_time = np.array(self._recruitment_times)
100 |
101 | # Filter to just patients who are registered by time
102 | registered_patients = recruit_time <= time
103 | has_failed = time - recruit_time[registered_patients] > event_time[registered_patients]
104 | survival_time = np.array([min(x, y) for (x, y) in
105 | zip(time - recruit_time[registered_patients], event_time[registered_patients])
106 | ])
107 | # Update posterior beliefs for mu_E
108 | alpha_post = self.alpha_prior + sum(has_failed)
109 | beta_post = self.beta_prior + np.log(2) * sum(survival_time)
110 | mu_post = beta_post / (alpha_post-1)
111 |
112 | # Run test:
113 | test_probability = invgamma.cdf(cutoff, a=alpha_post, scale=beta_post) if less_than \
114 | else 1 - invgamma.cdf(cutoff, a=alpha_post, scale=beta_post)
115 | stop_trial = test_probability > probability if less_than else test_probability < probability
116 |
117 | test_report = OrderedDict()
118 | test_report['Time'] = time
119 | test_report['Patients'] = sum(registered_patients)
120 | test_report['Events'] = sum(has_failed)
121 | test_report['TotalEventTime'] = sum(survival_time)
122 | test_report['AlphaPosterior'] = alpha_post
123 | test_report['BetaPosterior'] = beta_post
124 | test_report['MeanEventTimePosterior'] = mu_post
125 | test_report['MedianEventTimePosterior'] = mu_post * np.log(2)
126 | test_report['Cutoff'] = cutoff
127 | test_report['Certainty'] = probability
128 | test_report['Probability'] = test_probability
129 | test_report['LessThan'] = atomic_to_json(less_than)
130 | test_report['Stop'] = atomic_to_json(stop_trial)
131 | return test_report
132 |
133 |
134 | def matrix_cohort_analysis(n_simulations, n_patients, true_median, alpha_prior, beta_prior,
135 | lower_cutoff, upper_cutoff, interim_certainty, final_certainty,
136 | interim_analysis_after_patients, interim_analysis_time_delta,
137 | final_analysis_time_delta, recruitment_stream):
138 | """ Simulate TTE outcomes in the National Lung Matrix trial.
139 |
140 | .. note:: See Thall, P.F., Wooten, L.H., & Tannir, N.M. (2005) - *Monitoring Event Times in Early Phase Clinical
141 | Trials: Some Practical Issues* for full information.
142 |
143 | """
144 |
145 | reports = []
146 | for i in range(n_simulations):
147 | trial = BayesianTimeToEvent(alpha_prior, beta_prior)
148 | recruitment_stream.reset()
149 | # recruitment_times = np.arange(1, n_patients+1) / recruitment
150 | recruitment_times = np.array([recruitment_stream.next() for i in range(n_patients)])
151 | true_mean = true_median/np.log(2)
152 | event_times = expon(scale=true_mean).rvs(n_patients) # Exponential survival times
153 | cases = [(x, y) for (x, y) in zip(event_times, recruitment_times)]
154 | trial.update(cases)
155 | interim_analysis_times = list(set([recruitment_times[x-1] + interim_analysis_time_delta
156 | for x in interim_analysis_after_patients if x < n_patients]))
157 |
158 | trial_report = OrderedDict()
159 | # Call parameters
160 | trial_report['MaxPatients'] = n_patients
161 | trial_report['TrueMedianEventTime'] = true_median
162 | trial_report['PriorAlpha'] = alpha_prior
163 | trial_report['PriorBeta'] = beta_prior
164 | trial_report['LowerCutoff'] = lower_cutoff
165 | trial_report['UpperCutoff'] = upper_cutoff
166 | trial_report['InterimCertainty'] = interim_certainty
167 | trial_report['FinalCertainty'] = final_certainty
168 | trial_report['InterimAnalysisAfterPatients'] = interim_analysis_after_patients
169 | trial_report['InterimAnalysisTimeDelta'] = interim_analysis_time_delta
170 | trial_report['FinalAnalysisTimeDelta'] = final_analysis_time_delta
171 | # trial_report['Recruitment'] = recruitment
172 | # Simulated patient outcomes
173 | trial_report['RecruitmentTimes'] = iterable_to_json(recruitment_times)
174 | trial_report['EventTimes'] = iterable_to_json(event_times)
175 | trial_report['InterimAnalyses'] = []
176 | # Interim analyses
177 | for time in interim_analysis_times:
178 | interim_outcome = trial.test(time, lower_cutoff, interim_certainty, less_than=True)
179 | trial_report['InterimAnalyses'].append(interim_outcome)
180 | stop_trial = interim_outcome['Stop']
181 | if stop_trial:
182 | trial_report['Decision'] = 'StopAtInterim'
183 | trial_report['FinalAnalysis'] = interim_outcome
184 | trial_report['FinalPatients'] = interim_outcome['Patients']
185 | trial_report['FinalEvents'] = interim_outcome['Events']
186 | trial_report['FinalTotalEventTime'] = interim_outcome['TotalEventTime']
187 | return trial_report
188 | # Final analysis
189 | final_analysis_time = max(recruitment_times) + final_analysis_time_delta
190 | final_outcome = trial.test(final_analysis_time, upper_cutoff, final_certainty, less_than=False)
191 | trial_report['FinalAnalysis'] = final_outcome
192 | stop_trial = final_outcome['Stop']
193 | decision = 'StopAtFinal' if stop_trial else 'GoAtFinal'
194 | trial_report['Decision'] = decision
195 | trial_report['FinalPatients'] = final_outcome['Patients']
196 | trial_report['FinalEvents'] = final_outcome['Events']
197 | trial_report['FinalTotalEventTime'] = final_outcome['TotalEventTime']
198 | reports.append(trial_report)
199 |
200 | if n_simulations == 1:
201 | return reports[0]
202 | else:
203 | return reports
204 |
--------------------------------------------------------------------------------
/clintrials/phase2/bebop/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 | __all__ = ["peps2v1", "peps2v2"]
5 |
6 | """
7 |
8 | BeBOP: Bayesian design with Bivariate Outcomes and Predictive variables
9 | Brock, et al. To be published.
10 |
11 | BeBOP studies the dual primary outcomes efficacy and toxicity.
12 | The two events can be associated to reflect the potential for correlated
13 | outcomes. The design models the probabilities of efficacy and toxicity
14 | using logistic models so that the information in predictive variables
15 | can be incorporated to tailor the treatment acceptance / rejection
16 | decision.
17 |
18 | This is a generalisation of the design that was used in the PePS2 trial.
19 | PePS2 studies the efficacy and toxicity of a drug in a population of
20 | performance status 2 lung cancer patients. Patient outcomes may plausibly
21 | be effected by whether or not they have been treated before, and the
22 | expression rate of PD-L1 in their cells.
23 |
24 | PePS2 uses Brock et al's BeBOP design to incorporate the potentially
25 | predictive data in PD-L1 expression rate and whether or not a patient has
26 | been pre-treated to find the sub-population(s) where the drug works
27 | and is tolerable.
28 |
29 | """
30 |
31 | import numpy
32 | import pandas as pd
33 |
34 | from clintrials.stats import ProbabilityDensitySample
35 |
36 |
37 | class BeBOP():
38 | """
39 | """
40 |
41 | def __init__(self, theta_priors, efficacy_model, toxicity_model, joint_model):
42 | """
43 |
44 | Params:
45 | :param theta_priors: list of prior distributions for elements of parameter vector, theta.
46 | Each prior object should support obj.ppf(x) and obj.pdf(x) like classes in scipy
47 | :param efficacy_model: func with signature x, theta; where x is a case vector and theta a 2d array of
48 | parameter values, the first column containing values for the first parameter, the second column the
49 | second parameter, etc, so that each row in theta is a single parameter set. Function should return probability
50 | of efficacy of case x under each parameter set (i.e. each row of theta) so that a 1*len(theta) array should
51 | be returned.
52 | :param toxicity_model: func with signature x, theta; where x is a case vector and theta a 2d array of
53 | parameter values, the first column containing values for the first parameter, the second column the
54 | second parameter, etc, so that each row in theta is a single parameter set. Function should return probability
55 | of toxicity of case x under each parameter set (i.e. each row of theta) so that a 1*len(theta) array should
56 | be returned.
57 | :param joint_model: func with signature x, theta; where x is a case vector and theta a 2d array of
58 | parameter values, the first column containing values for the first parameter, the second column the
59 | second parameter, etc, so that each row in theta is a single parameter set. Function should return the joint
60 | probability of efficacy and toxicity of case x under each parameter set (i.e. each row of theta) so that
61 | a 1*len(theta) array should be returned. Generally this method would use efficacy_model and toxicity_model.
62 | For non-associated events, for instance, the simple product of efficacy_model(x, theta) and
63 | toxicity_model(x, theta) would do the job. For associated events, more complexity is required.
64 |
65 | In case vector x, the element x[0] should be boolean efficacy variable, with 1 showing efficacy occurred.
66 | In case vector x, the element x[1] should be boolean toxicity variable, with 1 showing toxicity occurred.
67 |
68 | See clintrials.phase2.bebop.peps2v2 for a working trio of efficacy_model, toxicity_model and joint_model that
69 | allow for associated efficacy and toxicity events.
70 |
71 | Note: efficacy_model, toxicity_model and joint_model should be vectorised to work with one case and many
72 | parameter sets (rather than just many cases and one parameter set) for quick integration using Monte Carlo.
73 |
74 | """
75 |
76 | self.priors = theta_priors
77 | self._pi_e = efficacy_model
78 | self._pi_t = toxicity_model
79 | self._pi_ab = joint_model
80 | # Initialise model
81 | self.reset()
82 |
83 | def reset(self):
84 | self.cases = []
85 | self._pds = None
86 |
87 | def _l_n(self, D, theta):
88 | if len(D) > 0:
89 | lik = numpy.array(map(lambda x: self._pi_ab(x, theta), D))
90 | return lik.prod(axis=0)
91 | else:
92 | return numpy.ones(len(theta))
93 |
94 | def size(self):
95 | return len(self.cases)
96 |
97 | def efficacies(self):
98 | return [case[0] for case in self.cases]
99 |
100 | def toxicities(self):
101 | return [case[1] for case in self.cases]
102 |
103 | def get_case_elements(self, i):
104 | return [case[i] for case in self.cases]
105 |
106 | def update(self, cases, n=10**6, epsilon = 0.00001, **kwargs):
107 | """ TODO
108 |
109 | :param n:
110 | :param epsilon:
111 |
112 | """
113 |
114 | self.cases.extend(cases)
115 | limits = [(dist.ppf(epsilon), dist.ppf(1-epsilon)) for dist in self.priors]
116 | samp = numpy.column_stack([numpy.random.uniform(*limit_pair, size=n) for limit_pair in limits])
117 | lik_integrand = lambda x: self._l_n(cases, x) * numpy.prod(numpy.array([dist.pdf(col) for (dist, col) in zip(self.priors, x.T)]), axis=0)
118 | self._pds = ProbabilityDensitySample(samp, lik_integrand)
119 | return
120 |
121 | def _predict_case(self, case, eff_cutoff, tox_cutoff, pds, samp, estimate_ci=False):
122 | x = case
123 | eff_probs = self._pi_e(x, samp)
124 | tox_probs = self._pi_t(x, samp)
125 | from collections import OrderedDict
126 | predictions = OrderedDict([
127 | ('Pr(Eff)', pds.expectation(eff_probs)),
128 | ('Pr(Tox)', pds.expectation(tox_probs)),
129 | ('Pr(AccEff)', pds.expectation((eff_probs > eff_cutoff))),
130 | ('Pr(AccTox)', pds.expectation((tox_probs < tox_cutoff))),
131 | ])
132 |
133 | if estimate_ci:
134 | predictions['Pr(Eff) Lower'] = pds.quantile_vector(eff_probs, 0.05, start_value=0.05)
135 | predictions['Pr(Eff) Upper'] = pds.quantile_vector(eff_probs, 0.95, start_value=0.95)
136 | predictions['Pr(Tox) Lower'] = pds.quantile_vector(tox_probs, 0.05, start_value=0.05)
137 | predictions['Pr(Tox) Upper'] = pds.quantile_vector(tox_probs, 0.95, start_value=0.95)
138 | return predictions
139 |
140 | def predict(self, cases, eff_cutoff, tox_cutoff, to_pandas=False, estimate_ci=False):
141 | if self._pds is not None:
142 | pds = self._pds
143 | samp = pds._samp
144 | fitted = [self._predict_case(x, eff_cutoff, tox_cutoff, pds, samp, estimate_ci=estimate_ci) for x in cases]
145 | if to_pandas:
146 | if estimate_ci:
147 | return pd.DataFrame(fitted, columns=['Pr(Eff)', 'Pr(Tox)', 'Pr(AccEff)', 'Pr(AccTox)',
148 | 'Pr(Eff) Lower', 'Pr(Eff) Upper', 'Pr(Tox) Lower', 'Pr(Tox) Upper'])
149 | else:
150 | return pd.DataFrame(fitted, columns=['Pr(Eff)', 'Pr(Tox)', 'Pr(AccEff)', 'Pr(AccTox)'])
151 | else:
152 | return fitted
153 | else:
154 | return None
155 |
156 | def get_posterior_param_means(self):
157 | if self._pds:
158 | return numpy.apply_along_axis(lambda x: self._pds.expectation(x), 0, self._pds._samp)
159 | else:
160 | return []
161 |
162 | def theta_estimate(self, i, alpha=0.05):
163 | """ Get posterior confidence interval and mean estimate of element i in parameter vector.
164 |
165 | Returns (lower, mean, upper)
166 |
167 | """
168 |
169 | if j < len(self.priors):
170 | mu = self._pds.expectation(self._pds._samp[:,i])
171 | return numpy.array([self._pds.quantile(i, alpha/2), mu, self._pds.quantile(i, 1-alpha/2)])
172 | else:
173 | return (0,0,0)
174 |
175 | # def efficacy_effect(self, j, alpha=0.05):
176 | # """ Get confidence interval and mean estimate of the effect on efficacy, expressed as odds-ratios.
177 |
178 | # Use:
179 | # - j=0, to get treatment effect of the intercept variable
180 | # - j=1, to get treatment effect of the pre-treated status variable
181 | # - j=2, to get treatment effect of the mutation status variable
182 |
183 | # """
184 |
185 | # if j==0:
186 | # expected_log_or = self._pds.expectation(self._pds._samp[:,1])
187 | # return np.exp([self._pds.quantile(1, alpha/2), expected_log_or, self._pds.quantile(1, 1-alpha/2)])
188 | # elif j==1:
189 | # expected_log_or = self._pds.expectation(self._pds._samp[:,2])
190 | # return np.exp([self._pds.quantile(2, alpha/2), expected_log_or, self._pds.quantile(2, 1-alpha/2)])
191 | # elif j==2:
192 | # expected_log_or = self._pds.expectation(self._pds._samp[:,3])
193 | # return np.exp([self._pds.quantile(3, alpha/2), expected_log_or, self._pds.quantile(3, 1-alpha/2)])
194 | # else:
195 | # return (0,0,0)
196 |
197 | # def toxicity_effect(self, j=0, alpha=0.05):
198 | # """ Get confidence interval and mean estimate of the effect on toxicity, expressed as odds-ratios.
199 |
200 | # Use:
201 | # - j=0, to get effect on toxicity of the intercept variable
202 |
203 | # """
204 |
205 | # if j==0:
206 | # expected_log_or = self._pds.expectation(self._pds._samp[:,0])
207 | # return np.exp([self._pds.quantile(0, alpha/2), expected_log_or, self._pds.quantile(0, 1-alpha/2)])
208 | # else:
209 | # return (0,0,0)
210 |
211 | # def correlation_effect(self, alpha=0.05):
212 | # """ Get confidence interval and mean estimate of the correlation between efficacy and toxicity. """
213 | # expected_psi = self._pds.expectation(self._pds._samp[:,4])
214 | # psi_levels = np.array([self._pds.quantile(4, alpha/2), expected_psi, self._pds.quantile(4, 1-alpha/2)])
215 | # return (np.exp(psi_levels) - 1) / (np.exp(psi_levels) + 1)
--------------------------------------------------------------------------------
/doc/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # clintrials documentation build configuration file, created by
4 | # sphinx-quickstart on Sat Dec 13 19:54:47 2014.
5 | #
6 | # This file is execfile()d with the current directory set to its containing dir.
7 | #
8 | # Note that not all possible configuration values are present in this
9 | # autogenerated file.
10 | #
11 | # All configuration values have a default; values that are commented out
12 | # serve to show the default.
13 |
14 | import sys, os
15 |
16 | # If extensions (or modules to document with autodoc) are in another directory,
17 | # add these directories to sys.path here. If the directory is relative to the
18 | # documentation root, use os.path.abspath to make it absolute, like shown here.
19 | #sys.path.insert(0, os.path.abspath('.'))
20 |
21 | # -- General configuration -----------------------------------------------------
22 |
23 | # If your documentation needs a minimal Sphinx version, state it here.
24 | #needs_sphinx = '1.0'
25 |
26 | # Add any Sphinx extension module names here, as strings. They can be extensions
27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
28 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
29 |
30 | # Add any paths that contain templates here, relative to this directory.
31 | templates_path = ['_templates']
32 |
33 | # The suffix of source filenames.
34 | source_suffix = '.rst'
35 |
36 | # The encoding of source files.
37 | #source_encoding = 'utf-8-sig'
38 |
39 | # The master toctree document.
40 | master_doc = 'index'
41 |
42 | # General information about the project.
43 | project = u'clintrials'
44 | copyright = u'2014, Kristian Brock'
45 |
46 | # The version info for the project you're documenting, acts as replacement for
47 | # |version| and |release|, also used in various other places throughout the
48 | # built documents.
49 | #
50 | # The short X.Y version.
51 | version = '0.1'
52 | # The full version, including alpha/beta/rc tags.
53 | release = '0.1.1'
54 |
55 | # The language for content autogenerated by Sphinx. Refer to documentation
56 | # for a list of supported languages.
57 | #language = None
58 |
59 | # There are two options for replacing |today|: either, you set today to some
60 | # non-false value, then it is used:
61 | #today = ''
62 | # Else, today_fmt is used as the format for a strftime call.
63 | #today_fmt = '%B %d, %Y'
64 |
65 | # List of patterns, relative to source directory, that match files and
66 | # directories to ignore when looking for source files.
67 | exclude_patterns = ['_build']
68 |
69 | # The reST default role (used for this markup: `text`) to use for all documents.
70 | #default_role = None
71 |
72 | # If true, '()' will be appended to :func: etc. cross-reference text.
73 | #add_function_parentheses = True
74 |
75 | # If true, the current module name will be prepended to all description
76 | # unit titles (such as .. function::).
77 | #add_module_names = True
78 |
79 | # If true, sectionauthor and moduleauthor directives will be shown in the
80 | # output. They are ignored by default.
81 | #show_authors = False
82 |
83 | # The name of the Pygments (syntax highlighting) style to use.
84 | pygments_style = 'sphinx'
85 |
86 | # A list of ignored prefixes for module index sorting.
87 | #modindex_common_prefix = []
88 |
89 |
90 | # -- Options for HTML output ---------------------------------------------------
91 |
92 | # The theme to use for HTML and HTML Help pages. See the documentation for
93 | # a list of builtin themes.
94 | html_theme = 'default'
95 |
96 | # Theme options are theme-specific and customize the look and feel of a theme
97 | # further. For a list of options available for each theme, see the
98 | # documentation.
99 | #html_theme_options = {}
100 |
101 | # Add any paths that contain custom themes here, relative to this directory.
102 | #html_theme_path = []
103 |
104 | # The name for this set of Sphinx documents. If None, it defaults to
105 | # " v documentation".
106 | #html_title = None
107 |
108 | # A shorter title for the navigation bar. Default is the same as html_title.
109 | #html_short_title = None
110 |
111 | # The name of an image file (relative to this directory) to place at the top
112 | # of the sidebar.
113 | #html_logo = None
114 |
115 | # The name of an image file (within the static path) to use as favicon of the
116 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
117 | # pixels large.
118 | #html_favicon = None
119 |
120 | # Add any paths that contain custom static files (such as style sheets) here,
121 | # relative to this directory. They are copied after the builtin static files,
122 | # so a file named "default.css" will overwrite the builtin "default.css".
123 | html_static_path = ['_static']
124 |
125 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
126 | # using the given strftime format.
127 | #html_last_updated_fmt = '%b %d, %Y'
128 |
129 | # If true, SmartyPants will be used to convert quotes and dashes to
130 | # typographically correct entities.
131 | #html_use_smartypants = True
132 |
133 | # Custom sidebar templates, maps document names to template names.
134 | #html_sidebars = {}
135 |
136 | # Additional templates that should be rendered to pages, maps page names to
137 | # template names.
138 | #html_additional_pages = {}
139 |
140 | # If false, no module index is generated.
141 | #html_domain_indices = True
142 |
143 | # If false, no index is generated.
144 | #html_use_index = True
145 |
146 | # If true, the index is split into individual pages for each letter.
147 | #html_split_index = False
148 |
149 | # If true, links to the reST sources are added to the pages.
150 | #html_show_sourcelink = True
151 |
152 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
153 | #html_show_sphinx = True
154 |
155 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
156 | #html_show_copyright = True
157 |
158 | # If true, an OpenSearch description file will be output, and all pages will
159 | # contain a tag referring to it. The value of this option must be the
160 | # base URL from which the finished HTML is served.
161 | #html_use_opensearch = ''
162 |
163 | # This is the file name suffix for HTML files (e.g. ".xhtml").
164 | #html_file_suffix = None
165 |
166 | # Output file base name for HTML help builder.
167 | htmlhelp_basename = 'clintrialsdoc'
168 |
169 |
170 | # -- Options for LaTeX output --------------------------------------------------
171 |
172 | latex_elements = {
173 | # The paper size ('letterpaper' or 'a4paper').
174 | #'papersize': 'letterpaper',
175 |
176 | # The font size ('10pt', '11pt' or '12pt').
177 | #'pointsize': '10pt',
178 |
179 | # Additional stuff for the LaTeX preamble.
180 | #'preamble': '',
181 | }
182 |
183 | # Grouping the document tree into LaTeX files. List of tuples
184 | # (source start file, target name, title, author, documentclass [howto/manual]).
185 | latex_documents = [
186 | ('index', 'clintrials.tex', u'clintrials Documentation',
187 | u'Kristian Brock', 'manual'),
188 | ]
189 |
190 | # The name of an image file (relative to this directory) to place at the top of
191 | # the title page.
192 | #latex_logo = None
193 |
194 | # For "manual" documents, if this is true, then toplevel headings are parts,
195 | # not chapters.
196 | #latex_use_parts = False
197 |
198 | # If true, show page references after internal links.
199 | #latex_show_pagerefs = False
200 |
201 | # If true, show URL addresses after external links.
202 | #latex_show_urls = False
203 |
204 | # Documents to append as an appendix to all manuals.
205 | #latex_appendices = []
206 |
207 | # If false, no module index is generated.
208 | #latex_domain_indices = True
209 |
210 |
211 | # -- Options for manual page output --------------------------------------------
212 |
213 | # One entry per manual page. List of tuples
214 | # (source start file, name, description, authors, manual section).
215 | man_pages = [
216 | ('index', 'clintrials', u'clintrials Documentation',
217 | [u'Kristian Brock'], 1)
218 | ]
219 |
220 | # If true, show URL addresses after external links.
221 | #man_show_urls = False
222 |
223 |
224 | # -- Options for Texinfo output ------------------------------------------------
225 |
226 | # Grouping the document tree into Texinfo files. List of tuples
227 | # (source start file, target name, title, author,
228 | # dir menu entry, description, category)
229 | texinfo_documents = [
230 | ('index', 'clintrials', u'clintrials Documentation',
231 | u'Kristian Brock', 'clintrials', 'One line description of project.',
232 | 'Miscellaneous'),
233 | ]
234 |
235 | # Documents to append as an appendix to all manuals.
236 | #texinfo_appendices = []
237 |
238 | # If false, no module index is generated.
239 | #texinfo_domain_indices = True
240 |
241 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
242 | #texinfo_show_urls = 'footnote'
243 |
244 |
245 | # -- Options for Epub output ---------------------------------------------------
246 |
247 | # Bibliographic Dublin Core info.
248 | epub_title = u'clintrials'
249 | epub_author = u'Kristian Brock'
250 | epub_publisher = u'Kristian Brock'
251 | epub_copyright = u'2014, Kristian Brock'
252 |
253 | # The language of the text. It defaults to the language option
254 | # or en if the language is not set.
255 | #epub_language = ''
256 |
257 | # The scheme of the identifier. Typical schemes are ISBN or URL.
258 | #epub_scheme = ''
259 |
260 | # The unique identifier of the text. This can be a ISBN number
261 | # or the project homepage.
262 | #epub_identifier = ''
263 |
264 | # A unique identification for the text.
265 | #epub_uid = ''
266 |
267 | # A tuple containing the cover image and cover page html template filenames.
268 | #epub_cover = ()
269 |
270 | # HTML files that should be inserted before the pages created by sphinx.
271 | # The format is a list of tuples containing the path and title.
272 | #epub_pre_files = []
273 |
274 | # HTML files shat should be inserted after the pages created by sphinx.
275 | # The format is a list of tuples containing the path and title.
276 | #epub_post_files = []
277 |
278 | # A list of files that should not be packed into the epub file.
279 | #epub_exclude_files = []
280 |
281 | # The depth of the table of contents in toc.ncx.
282 | #epub_tocdepth = 3
283 |
284 | # Allow duplicate toc entries.
285 | #epub_tocdup = True
286 |
287 |
288 | # Example configuration for intersphinx: refer to the Python standard library.
289 | intersphinx_mapping = {'http://docs.python.org/': None}
290 |
--------------------------------------------------------------------------------
/doc/_build/html/_static/basic.css:
--------------------------------------------------------------------------------
1 | /*
2 | * basic.css
3 | * ~~~~~~~~~
4 | *
5 | * Sphinx stylesheet -- basic theme.
6 | *
7 | * :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | /* -- main layout ----------------------------------------------------------- */
13 |
14 | div.clearer {
15 | clear: both;
16 | }
17 |
18 | /* -- relbar ---------------------------------------------------------------- */
19 |
20 | div.related {
21 | width: 100%;
22 | font-size: 90%;
23 | }
24 |
25 | div.related h3 {
26 | display: none;
27 | }
28 |
29 | div.related ul {
30 | margin: 0;
31 | padding: 0 0 0 10px;
32 | list-style: none;
33 | }
34 |
35 | div.related li {
36 | display: inline;
37 | }
38 |
39 | div.related li.right {
40 | float: right;
41 | margin-right: 5px;
42 | }
43 |
44 | /* -- sidebar --------------------------------------------------------------- */
45 |
46 | div.sphinxsidebarwrapper {
47 | padding: 10px 5px 0 10px;
48 | }
49 |
50 | div.sphinxsidebar {
51 | float: left;
52 | width: 230px;
53 | margin-left: -100%;
54 | font-size: 90%;
55 | }
56 |
57 | div.sphinxsidebar ul {
58 | list-style: none;
59 | }
60 |
61 | div.sphinxsidebar ul ul,
62 | div.sphinxsidebar ul.want-points {
63 | margin-left: 20px;
64 | list-style: square;
65 | }
66 |
67 | div.sphinxsidebar ul ul {
68 | margin-top: 0;
69 | margin-bottom: 0;
70 | }
71 |
72 | div.sphinxsidebar form {
73 | margin-top: 10px;
74 | }
75 |
76 | div.sphinxsidebar input {
77 | border: 1px solid #98dbcc;
78 | font-family: sans-serif;
79 | font-size: 1em;
80 | }
81 |
82 | div.sphinxsidebar #searchbox input[type="text"] {
83 | width: 170px;
84 | }
85 |
86 | div.sphinxsidebar #searchbox input[type="submit"] {
87 | width: 30px;
88 | }
89 |
90 | img {
91 | border: 0;
92 | }
93 |
94 | /* -- search page ----------------------------------------------------------- */
95 |
96 | ul.search {
97 | margin: 10px 0 0 20px;
98 | padding: 0;
99 | }
100 |
101 | ul.search li {
102 | padding: 5px 0 5px 20px;
103 | background-image: url(file.png);
104 | background-repeat: no-repeat;
105 | background-position: 0 7px;
106 | }
107 |
108 | ul.search li a {
109 | font-weight: bold;
110 | }
111 |
112 | ul.search li div.context {
113 | color: #888;
114 | margin: 2px 0 0 30px;
115 | text-align: left;
116 | }
117 |
118 | ul.keywordmatches li.goodmatch a {
119 | font-weight: bold;
120 | }
121 |
122 | /* -- index page ------------------------------------------------------------ */
123 |
124 | table.contentstable {
125 | width: 90%;
126 | }
127 |
128 | table.contentstable p.biglink {
129 | line-height: 150%;
130 | }
131 |
132 | a.biglink {
133 | font-size: 1.3em;
134 | }
135 |
136 | span.linkdescr {
137 | font-style: italic;
138 | padding-top: 5px;
139 | font-size: 90%;
140 | }
141 |
142 | /* -- general index --------------------------------------------------------- */
143 |
144 | table.indextable {
145 | width: 100%;
146 | }
147 |
148 | table.indextable td {
149 | text-align: left;
150 | vertical-align: top;
151 | }
152 |
153 | table.indextable dl, table.indextable dd {
154 | margin-top: 0;
155 | margin-bottom: 0;
156 | }
157 |
158 | table.indextable tr.pcap {
159 | height: 10px;
160 | }
161 |
162 | table.indextable tr.cap {
163 | margin-top: 10px;
164 | background-color: #f2f2f2;
165 | }
166 |
167 | img.toggler {
168 | margin-right: 3px;
169 | margin-top: 3px;
170 | cursor: pointer;
171 | }
172 |
173 | div.modindex-jumpbox {
174 | border-top: 1px solid #ddd;
175 | border-bottom: 1px solid #ddd;
176 | margin: 1em 0 1em 0;
177 | padding: 0.4em;
178 | }
179 |
180 | div.genindex-jumpbox {
181 | border-top: 1px solid #ddd;
182 | border-bottom: 1px solid #ddd;
183 | margin: 1em 0 1em 0;
184 | padding: 0.4em;
185 | }
186 |
187 | /* -- general body styles --------------------------------------------------- */
188 |
189 | a.headerlink {
190 | visibility: hidden;
191 | }
192 |
193 | h1:hover > a.headerlink,
194 | h2:hover > a.headerlink,
195 | h3:hover > a.headerlink,
196 | h4:hover > a.headerlink,
197 | h5:hover > a.headerlink,
198 | h6:hover > a.headerlink,
199 | dt:hover > a.headerlink {
200 | visibility: visible;
201 | }
202 |
203 | div.body p.caption {
204 | text-align: inherit;
205 | }
206 |
207 | div.body td {
208 | text-align: left;
209 | }
210 |
211 | .field-list ul {
212 | padding-left: 1em;
213 | }
214 |
215 | .first {
216 | margin-top: 0 !important;
217 | }
218 |
219 | p.rubric {
220 | margin-top: 30px;
221 | font-weight: bold;
222 | }
223 |
224 | img.align-left, .figure.align-left, object.align-left {
225 | clear: left;
226 | float: left;
227 | margin-right: 1em;
228 | }
229 |
230 | img.align-right, .figure.align-right, object.align-right {
231 | clear: right;
232 | float: right;
233 | margin-left: 1em;
234 | }
235 |
236 | img.align-center, .figure.align-center, object.align-center {
237 | display: block;
238 | margin-left: auto;
239 | margin-right: auto;
240 | }
241 |
242 | .align-left {
243 | text-align: left;
244 | }
245 |
246 | .align-center {
247 | text-align: center;
248 | }
249 |
250 | .align-right {
251 | text-align: right;
252 | }
253 |
254 | /* -- sidebars -------------------------------------------------------------- */
255 |
256 | div.sidebar {
257 | margin: 0 0 0.5em 1em;
258 | border: 1px solid #ddb;
259 | padding: 7px 7px 0 7px;
260 | background-color: #ffe;
261 | width: 40%;
262 | float: right;
263 | }
264 |
265 | p.sidebar-title {
266 | font-weight: bold;
267 | }
268 |
269 | /* -- topics ---------------------------------------------------------------- */
270 |
271 | div.topic {
272 | border: 1px solid #ccc;
273 | padding: 7px 7px 0 7px;
274 | margin: 10px 0 10px 0;
275 | }
276 |
277 | p.topic-title {
278 | font-size: 1.1em;
279 | font-weight: bold;
280 | margin-top: 10px;
281 | }
282 |
283 | /* -- admonitions ----------------------------------------------------------- */
284 |
285 | div.admonition {
286 | margin-top: 10px;
287 | margin-bottom: 10px;
288 | padding: 7px;
289 | }
290 |
291 | div.admonition dt {
292 | font-weight: bold;
293 | }
294 |
295 | div.admonition dl {
296 | margin-bottom: 0;
297 | }
298 |
299 | p.admonition-title {
300 | margin: 0px 10px 5px 0px;
301 | font-weight: bold;
302 | }
303 |
304 | div.body p.centered {
305 | text-align: center;
306 | margin-top: 25px;
307 | }
308 |
309 | /* -- tables ---------------------------------------------------------------- */
310 |
311 | table.docutils {
312 | border: 0;
313 | border-collapse: collapse;
314 | }
315 |
316 | table.docutils td, table.docutils th {
317 | padding: 1px 8px 1px 5px;
318 | border-top: 0;
319 | border-left: 0;
320 | border-right: 0;
321 | border-bottom: 1px solid #aaa;
322 | }
323 |
324 | table.field-list td, table.field-list th {
325 | border: 0 !important;
326 | }
327 |
328 | table.footnote td, table.footnote th {
329 | border: 0 !important;
330 | }
331 |
332 | th {
333 | text-align: left;
334 | padding-right: 5px;
335 | }
336 |
337 | table.citation {
338 | border-left: solid 1px gray;
339 | margin-left: 1px;
340 | }
341 |
342 | table.citation td {
343 | border-bottom: none;
344 | }
345 |
346 | /* -- other body styles ----------------------------------------------------- */
347 |
348 | ol.arabic {
349 | list-style: decimal;
350 | }
351 |
352 | ol.loweralpha {
353 | list-style: lower-alpha;
354 | }
355 |
356 | ol.upperalpha {
357 | list-style: upper-alpha;
358 | }
359 |
360 | ol.lowerroman {
361 | list-style: lower-roman;
362 | }
363 |
364 | ol.upperroman {
365 | list-style: upper-roman;
366 | }
367 |
368 | dl {
369 | margin-bottom: 15px;
370 | }
371 |
372 | dd p {
373 | margin-top: 0px;
374 | }
375 |
376 | dd ul, dd table {
377 | margin-bottom: 10px;
378 | }
379 |
380 | dd {
381 | margin-top: 3px;
382 | margin-bottom: 10px;
383 | margin-left: 30px;
384 | }
385 |
386 | dt:target, .highlighted {
387 | background-color: #fbe54e;
388 | }
389 |
390 | dl.glossary dt {
391 | font-weight: bold;
392 | font-size: 1.1em;
393 | }
394 |
395 | .field-list ul {
396 | margin: 0;
397 | padding-left: 1em;
398 | }
399 |
400 | .field-list p {
401 | margin: 0;
402 | }
403 |
404 | .refcount {
405 | color: #060;
406 | }
407 |
408 | .optional {
409 | font-size: 1.3em;
410 | }
411 |
412 | .versionmodified {
413 | font-style: italic;
414 | }
415 |
416 | .system-message {
417 | background-color: #fda;
418 | padding: 5px;
419 | border: 3px solid red;
420 | }
421 |
422 | .footnote:target {
423 | background-color: #ffa;
424 | }
425 |
426 | .line-block {
427 | display: block;
428 | margin-top: 1em;
429 | margin-bottom: 1em;
430 | }
431 |
432 | .line-block .line-block {
433 | margin-top: 0;
434 | margin-bottom: 0;
435 | margin-left: 1.5em;
436 | }
437 |
438 | .guilabel, .menuselection {
439 | font-family: sans-serif;
440 | }
441 |
442 | .accelerator {
443 | text-decoration: underline;
444 | }
445 |
446 | .classifier {
447 | font-style: oblique;
448 | }
449 |
450 | abbr, acronym {
451 | border-bottom: dotted 1px;
452 | cursor: help;
453 | }
454 |
455 | /* -- code displays --------------------------------------------------------- */
456 |
457 | pre {
458 | overflow: auto;
459 | overflow-y: hidden; /* fixes display issues on Chrome browsers */
460 | }
461 |
462 | td.linenos pre {
463 | padding: 5px 0px;
464 | border: 0;
465 | background-color: transparent;
466 | color: #aaa;
467 | }
468 |
469 | table.highlighttable {
470 | margin-left: 0.5em;
471 | }
472 |
473 | table.highlighttable td {
474 | padding: 0 0.5em 0 0.5em;
475 | }
476 |
477 | tt.descname {
478 | background-color: transparent;
479 | font-weight: bold;
480 | font-size: 1.2em;
481 | }
482 |
483 | tt.descclassname {
484 | background-color: transparent;
485 | }
486 |
487 | tt.xref, a tt {
488 | background-color: transparent;
489 | font-weight: bold;
490 | }
491 |
492 | h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt {
493 | background-color: transparent;
494 | }
495 |
496 | .viewcode-link {
497 | float: right;
498 | }
499 |
500 | .viewcode-back {
501 | float: right;
502 | font-family: sans-serif;
503 | }
504 |
505 | div.viewcode-block:target {
506 | margin: -1px -10px;
507 | padding: 0 10px;
508 | }
509 |
510 | /* -- math display ---------------------------------------------------------- */
511 |
512 | img.math {
513 | vertical-align: middle;
514 | }
515 |
516 | div.body div.math p {
517 | text-align: center;
518 | }
519 |
520 | span.eqno {
521 | float: right;
522 | }
523 |
524 | /* -- printout stylesheet --------------------------------------------------- */
525 |
526 | @media print {
527 | div.document,
528 | div.documentwrapper,
529 | div.bodywrapper {
530 | margin: 0 !important;
531 | width: 100%;
532 | }
533 |
534 | div.sphinxsidebar,
535 | div.related,
536 | div.footer,
537 | #top-link {
538 | display: none;
539 | }
540 | }
--------------------------------------------------------------------------------
/tutorials/matchpoint/DTPs.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Implementing the EffTox Dose-Finding Design in the Matchpoint Trials"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "This tutorial complements the manuscript _Implementing the EffTox Dose-Finding Design in the Matchpoint Trial_ (Brock _et al_.,in submission). Please consult the paper for the clinical background, the methodology details, and full explanation of the terminology."
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "## Dose Transition Pathways"
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {},
27 | "source": [
28 | "In this notebook, we illustrate the calculation of _dose transition pathways_ (DTPs) in the seamless phase I/II dose-finding clinical trial, Matchpoint."
29 | ]
30 | },
31 | {
32 | "cell_type": "code",
33 | "execution_count": 1,
34 | "metadata": {
35 | "collapsed": true
36 | },
37 | "outputs": [],
38 | "source": [
39 | "import numpy as np\n",
40 | "from scipy.stats import norm\n",
41 | "\n",
42 | "from clintrials.dosefinding.efftox import EffTox, LpNormCurve, efftox_dtp_detail\n",
43 | "from clintrials.dosefinding.efficacytoxicity import dose_transition_pathways, print_dtps"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": 2,
49 | "metadata": {
50 | "collapsed": true
51 | },
52 | "outputs": [],
53 | "source": [
54 | "real_doses = [7.5, 15, 30, 45]\n",
55 | "trial_size = 30\n",
56 | "cohort_size = 3\n",
57 | "first_dose = 3\n",
58 | "prior_tox_probs = (0.025, 0.05, 0.1, 0.25)\n",
59 | "prior_eff_probs = (0.2, 0.3, 0.5, 0.6)\n",
60 | "tox_cutoff = 0.40\n",
61 | "eff_cutoff = 0.45\n",
62 | "tox_certainty = 0.05\n",
63 | "eff_certainty = 0.03"
64 | ]
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": 3,
69 | "metadata": {
70 | "collapsed": true
71 | },
72 | "outputs": [],
73 | "source": [
74 | "mu_t_mean, mu_t_sd = -5.4317, 2.7643\n",
75 | "beta_t_mean, beta_t_sd = 3.1761, 2.7703\n",
76 | "mu_e_mean, mu_e_sd = -0.8442, 1.9786\n",
77 | "beta_e_1_mean, beta_e_1_sd = 1.9857, 1.9820\n",
78 | "beta_e_2_mean, beta_e_2_sd = 0, 0.2\n",
79 | "psi_mean, psi_sd = 0, 1\n",
80 | "efftox_priors = [\n",
81 | " norm(loc=mu_t_mean, scale=mu_t_sd),\n",
82 | " norm(loc=beta_t_mean, scale=beta_t_sd),\n",
83 | " norm(loc=mu_e_mean, scale=mu_e_sd),\n",
84 | " norm(loc=beta_e_1_mean, scale=beta_e_1_sd),\n",
85 | " norm(loc=beta_e_2_mean, scale=beta_e_2_sd),\n",
86 | " norm(loc=psi_mean, scale=psi_sd),\n",
87 | " ]"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "metadata": {},
93 | "source": [
94 | "The above parameters are explained in the manuscript."
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": 4,
100 | "metadata": {
101 | "collapsed": true
102 | },
103 | "outputs": [],
104 | "source": [
105 | "hinge_points = [(0.4, 0), (1, 0.7), (0.5, 0.4)]\n",
106 | "metric = LpNormCurve(hinge_points[0][0], hinge_points[1][1], hinge_points[2][0], hinge_points[2][1])"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 5,
112 | "metadata": {
113 | "collapsed": true
114 | },
115 | "outputs": [],
116 | "source": [
117 | "et = EffTox(real_doses, efftox_priors, tox_cutoff, eff_cutoff, tox_certainty, eff_certainty, metric, trial_size,\n",
118 | " first_dose)"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "The EffTox class is an object-oriented implementation of the trial design by Thall & Cook (Thall, P. F., & Cook, J. D. (2004). Dose-Finding Based on Efficacy-Toxicity Trade-Offs. Biometrics, 60(3), 684–693.)"
126 | ]
127 | },
128 | {
129 | "cell_type": "markdown",
130 | "metadata": {},
131 | "source": [
132 | "Create patient outcomes 3TTT. Outcomes for a patient are represented by a three item tuple, where:\n",
133 | "\n",
134 | "- first item is 1-based dose-index give (i.e. 3 is dose-level 3);\n",
135 | "- second item is 1 if toxicity happened, else 0;\n",
136 | "- third item is 1 if efficacy happened, else 0.\n",
137 | "\n",
138 | "Outcomes for several patients are represented as lists:"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": 6,
144 | "metadata": {
145 | "collapsed": true
146 | },
147 | "outputs": [],
148 | "source": [
149 | "outcomes = [(3, 1, 0), (3, 1, 0), (3, 1, 0)]"
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": 7,
155 | "metadata": {
156 | "collapsed": false
157 | },
158 | "outputs": [
159 | {
160 | "data": {
161 | "text/plain": [
162 | "2"
163 | ]
164 | },
165 | "execution_count": 7,
166 | "metadata": {},
167 | "output_type": "execute_result"
168 | }
169 | ],
170 | "source": [
171 | "np.random.seed(123)\n",
172 | "et.update(outcomes)"
173 | ]
174 | },
175 | {
176 | "cell_type": "markdown",
177 | "metadata": {},
178 | "source": [
179 | "The next dose recommended is 2.\n",
180 | "\n",
181 | "Calculate DTPs for the next cohort of three patients. n=100,000 will give a fairly quick and dirty estimate of the DTPs."
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": 8,
187 | "metadata": {
188 | "collapsed": false
189 | },
190 | "outputs": [],
191 | "source": [
192 | "np.random.seed(123)\n",
193 | "dtps = dose_transition_pathways(et, next_dose=2, cohort_sizes=[3], cohort_number=2, \n",
194 | " cases_already_observed=outcomes, custom_output_func=efftox_dtp_detail,\n",
195 | " n=10**5)"
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": 9,
201 | "metadata": {
202 | "collapsed": false
203 | },
204 | "outputs": [
205 | {
206 | "name": "stdout",
207 | "output_type": "stream",
208 | "text": [
209 | "2NNN -> Dose 3, Superiority=0.11 * tentative *\n",
210 | "2NNE -> Dose 1, Superiority=0.57 * tentative *\n",
211 | "2NNT -> Dose -1, Superiority=nan\n",
212 | "2NNB -> Dose 1, Superiority=0.47 * tentative *\n",
213 | "2NEE -> Dose 1, Superiority=0.64\n",
214 | "2NET -> Dose 1, Superiority=0.58 * tentative *\n",
215 | "2NEB -> Dose 1, Superiority=0.69\n",
216 | "2NTT -> Dose -1, Superiority=nan\n",
217 | "2NTB -> Dose 1, Superiority=0.6\n",
218 | "2NBB -> Dose 1, Superiority=0.86\n",
219 | "2EEE -> Dose 1, Superiority=0.86\n",
220 | "2EET -> Dose 1, Superiority=0.66\n",
221 | "2EEB -> Dose 1, Superiority=0.9\n",
222 | "2ETT -> Dose 1, Superiority=0.7\n",
223 | "2ETB -> Dose 1, Superiority=0.81\n",
224 | "2EBB -> Dose 1, Superiority=0.9\n",
225 | "2TTT -> Dose -1, Superiority=nan\n",
226 | "2TTB -> Dose 1, Superiority=0.65\n",
227 | "2TBB -> Dose 1, Superiority=0.8\n",
228 | "2BBB -> Dose 1, Superiority=0.87\n"
229 | ]
230 | }
231 | ],
232 | "source": [
233 | "print_dtps(dtps)"
234 | ]
235 | },
236 | {
237 | "cell_type": "markdown",
238 | "metadata": {},
239 | "source": [
240 | "These match Table 3 in the publication.\n",
241 | "\n",
242 | "_Superiority_ is the least of the three pairwise probabilities that the utility of the recommended dose is greater than each of the other doses.\n",
243 | "Ideally, we want Superiority to be high to be confident that the dose recommended genuinely has the highest utility.\n",
244 | "Scenarios where Superiority is less than 0.6 are marked as _tentative_.\n",
245 | "Dose ambivalence may be a problem here.\n",
246 | "Sometimes, the design may be restricted from recommending the best dose because of no-skipping rules and dose-inadmissibility.\n",
247 | "\n",
248 | "Using n=1,000,000 will be appreciably slower for 20 paths, but will be more accurate:"
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "execution_count": 10,
254 | "metadata": {
255 | "collapsed": true
256 | },
257 | "outputs": [],
258 | "source": [
259 | "dtps2 = dose_transition_pathways(et, next_dose=2, cohort_sizes=[3], cohort_number=2, \n",
260 | " cases_already_observed=outcomes, custom_output_func=efftox_dtp_detail,\n",
261 | " n=10**6)"
262 | ]
263 | },
264 | {
265 | "cell_type": "code",
266 | "execution_count": 11,
267 | "metadata": {
268 | "collapsed": false
269 | },
270 | "outputs": [
271 | {
272 | "name": "stdout",
273 | "output_type": "stream",
274 | "text": [
275 | "2NNN -> Dose 3, Superiority=0.16 * tentative *\n",
276 | "2NNE -> Dose 1, Superiority=0.44 * tentative *\n",
277 | "2NNT -> Dose -1, Superiority=nan\n",
278 | "2NNB -> Dose 1, Superiority=0.52 * tentative *\n",
279 | "2NEE -> Dose 1, Superiority=0.62\n",
280 | "2NET -> Dose 1, Superiority=0.6\n",
281 | "2NEB -> Dose 1, Superiority=0.68\n",
282 | "2NTT -> Dose -1, Superiority=nan\n",
283 | "2NTB -> Dose 1, Superiority=0.76\n",
284 | "2NBB -> Dose 1, Superiority=0.81\n",
285 | "2EEE -> Dose 1, Superiority=0.78\n",
286 | "2EET -> Dose 1, Superiority=0.7\n",
287 | "2EEB -> Dose 1, Superiority=0.84\n",
288 | "2ETT -> Dose 1, Superiority=0.74\n",
289 | "2ETB -> Dose 1, Superiority=0.82\n",
290 | "2EBB -> Dose 1, Superiority=0.88\n",
291 | "2TTT -> Dose -1, Superiority=nan\n",
292 | "2TTB -> Dose 1, Superiority=0.74\n",
293 | "2TBB -> Dose 1, Superiority=0.8\n",
294 | "2BBB -> Dose 1, Superiority=0.89\n"
295 | ]
296 | }
297 | ],
298 | "source": [
299 | "print_dtps(dtps2)"
300 | ]
301 | },
302 | {
303 | "cell_type": "markdown",
304 | "metadata": {},
305 | "source": [
306 | "Again, these match Table 3."
307 | ]
308 | },
309 | {
310 | "cell_type": "markdown",
311 | "metadata": {
312 | "collapsed": true
313 | },
314 | "source": [
315 | "We use DTPs continuously in the running of the Matchpoint trial. They aid in planning, they help to overcome outcome ambiguity, they have highlighted hidden undesirable behaviour implicit in out parameter choices, etc."
316 | ]
317 | },
318 | {
319 | "cell_type": "code",
320 | "execution_count": null,
321 | "metadata": {
322 | "collapsed": true
323 | },
324 | "outputs": [],
325 | "source": []
326 | }
327 | ],
328 | "metadata": {
329 | "anaconda-cloud": {},
330 | "kernelspec": {
331 | "display_name": "Python 2",
332 | "language": "python",
333 | "name": "python2"
334 | },
335 | "language_info": {
336 | "codemirror_mode": {
337 | "name": "ipython",
338 | "version": 2
339 | },
340 | "file_extension": ".py",
341 | "mimetype": "text/x-python",
342 | "name": "python",
343 | "nbconvert_exporter": "python",
344 | "pygments_lexer": "ipython2",
345 | "version": "2.7.11"
346 | }
347 | },
348 | "nbformat": 4,
349 | "nbformat_minor": 0
350 | }
351 |
--------------------------------------------------------------------------------
/clintrials/recruitment.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 |
5 | import abc
6 | import copy
7 | import numpy as np
8 |
9 |
10 | """ Classes and functions for modelling recruitment to clinical trials. """
11 |
12 |
13 | class RecruitmentStream(object):
14 |
15 | __metaclass__ = abc.ABCMeta
16 |
17 | @abc.abstractmethod
18 | def reset(self):
19 | """ Reset the recruitment stream to start anew.
20 |
21 | :return: None
22 | :rtype: None
23 |
24 | """
25 | pass
26 |
27 | @abc.abstractmethod
28 | def next(self):
29 | """ Get the time that the next patient is recruited.
30 |
31 | :return: The time that the next patient is recruited.
32 | :rtype: float
33 |
34 | """
35 | pass
36 |
37 |
38 | class ConstantRecruitmentStream(RecruitmentStream):
39 | """ Recruitment stream where the intrapatient wait is constant.
40 |
41 | This is the simplest recruitment stream case. A patient arrives every delta units of time.
42 |
43 | E.g.
44 |
45 | >>> s = ConstantRecruitmentStream(2.5)
46 | >>> s.next()
47 | 2.5
48 | >>> s.next()
49 | 5.0
50 | >>> s.next()
51 | 7.5
52 | >>> s.reset()
53 | >>> s.next()
54 | 2.5
55 |
56 |
57 | """
58 |
59 | def __init__(self, intrapatient_gap):
60 | """ Create instance
61 |
62 | :param intrapatient_gap: the gap between recruitment times, aka delta.
63 | :type intrapatient_gap: float
64 |
65 | """
66 |
67 | self.delta = intrapatient_gap
68 | self.cursor = 0
69 |
70 | def reset(self):
71 | """ Reset the recruitment stream to start anew.
72 |
73 | :return: None
74 | :rtype: None
75 |
76 | """
77 |
78 | self.cursor = 0
79 |
80 | def next(self):
81 | """ Get the time that the next patient is recruited.
82 |
83 | :return: The time that the next patient is recruited.
84 | :rtype: float
85 |
86 | """
87 | self.cursor += self.delta
88 | return self.cursor
89 |
90 |
91 | class QuadrilateralRecruitmentStream(RecruitmentStream):
92 | """ Recruitment stream that allows recruitment potential to vary as a function of time using vertices.
93 | Between two vertices, recruitment potential is represented by areas of quadrilaterals. Recruitment potential
94 | may change linearly using interpolation, or instantananeously using steps. In the former case, the quadrilaterals
95 | are trapeziums; in the latter, rectangles.
96 |
97 | I started by calling this class DampenedRecruitmentStream because recruitment typically opens at something
98 | like 50% potency where half recruitment centres are open and then increases linearly to 100% after about a year.
99 | However, I settled on the name QuadrilateralRecruitmentStream because of the important role quadrilaterals play in
100 | calculating the cumulative recruitment mass between two times.
101 |
102 | Let's do an example. Imagine a hypothetical trial that will recruit using several recruitment centres. When all
103 | recruitment centres are open, the trial expects to recruit a patient every four days, thus the intrapatient gap
104 | is 4.0. The trial will open with initial recruitment potential of 50% (i.e. half of the recruiting sites are open).
105 | Recruitment potential is expected to reach 100% after 20 days, linearly increasing from 50% to 100% over the first
106 | 20 days, i.e. recruitment centres will be continually opened at a constant rate. The first patient will be recruited
107 | at time t where t satisfies the integral equation
108 |
109 | :math:`\\int_0^t 0.5 + \\frac{1.0 - 0.5}{20 - 0}s ds = \\int_0^t 0.5 + \\frac{s}{40} ds
110 | = \\frac{t}{2} + \\frac{t^2}{80} = 4`
111 |
112 | i.e. solving the quadratic
113 |
114 | :math:`t = \\frac{-\\frac{1}{2} + \\sqrt{\\frac{1}{2}^2 - 4 \\times \\frac{1}{80} \\times -4}}{\\frac{2}{80}}
115 | = 6.83282`
116 |
117 | , and so on. The root of the quadratic yielded by :math:`-b - \\sqrt{b^2-4ac}` is ignored because it makes no sense.
118 |
119 | E.g.
120 |
121 | >>> s1 = QuadrilateralRecruitmentStream(4.0, 0.5, [(20, 1.0)], interpolate=True)
122 | >>> s1.next()
123 | 6.8328157299974768
124 | >>> s1.next()
125 | 12.2490309931942
126 | >>> s1.next()
127 | 16.878177829171548
128 | >>> s1.next()
129 | 21.0
130 | >>> s1.next()
131 | 25.0
132 |
133 | Now, let's consider the same scenario again, with stepped transition rather than interpolated transition. In this
134 | scenario, a patient is recruited after each 4 / 0.5 = 8 days for times from 0 to 20 when recruitment potential is
135 | at 50%. After time=20, a patient is recruited after every 4 days because recruitment potential is at 100%. For the
136 | patient that straddles the time t=20, the time to recruit is 4 days at 50% potential plus 2 days at 100% = 4 days,
137 | as required.
138 |
139 | E.g.
140 |
141 | >>> s2 = QuadrilateralRecruitmentStream(4.0, 0.5, [(20, 1.0)], interpolate=False)
142 | >>> s2.next()
143 | 8.0
144 | >>> s2.next()
145 | 16.0
146 | >>> s2.next()
147 | 22.0
148 | >>> s2.next()
149 | 26.0
150 |
151 | """
152 |
153 | def __init__(self, intrapatient_gap, initial_intensity, vertices, interpolate=True):
154 | """ Create instance
155 |
156 | :param intrapatient_gap: time to recruit one patient at 100% recruitment intensity, i.e. the gap between
157 | recruitment times when recruitment is at 100% intensity.
158 | :type intrapatient_gap: float
159 | :param initial_intensity: recruitment commences at this % of total power.
160 | E.g. if it takes 2 days to recruit a patient at full recruitment power,
161 | at intensity 0.1 it will take 20 days to recruit a patient.
162 | TODO: zero? negative?
163 | :type initial_intensity: float
164 | :param vertices: list of additional vertices as (time t, intensity r) tuples, where recruitment power is r% at t
165 | Recruitment intensity is linearly extrapolated between vertex times, including the origin, t=0.
166 | .. note::
167 | - intensity can dampen (e.g. intensity=50%) or amplify (e.g. intensity=150%) average recruitment;
168 | - intensity should not be negative. Any negative values will yield a TypeError
169 | :type vertices: list of (float, float) tuples
170 | :param interpolate: True to linearly interpolate between vertices; False to use steps.
171 | :type interpolate: bool
172 |
173 | """
174 |
175 | self.delta = intrapatient_gap
176 | self.initial_intensity = initial_intensity
177 | self.interpolate = interpolate
178 |
179 | v = vertices
180 | v.sort(key=lambda x: x[0])
181 | self.shapes = {} # t1 -> t0, t1, y0, y1 vertex parameters
182 | self.recruiment_mass = {} # t1 -> recruitment mass available (i.e. area of quadrilateral) to left of t1
183 | if len(v) > 0:
184 | t0 = 0
185 | y0 = initial_intensity
186 | for x in v:
187 | t1, y1 = x
188 | if interpolate:
189 | mass = 0.5 * (t1-t0) * (y0+y1) # Area of trapezium
190 | else:
191 | mass = (t1-t0) * y0 # Are of rectangle
192 | self.recruiment_mass[t1] = mass
193 | self.shapes[t1] = (t0, t1, y0, y1)
194 | t0, y0 = t1, y1
195 | self.available_mass = copy.copy(self.recruiment_mass)
196 | else:
197 | self.available_mass = {}
198 | self.vertices = v
199 | self.cursor = 0
200 |
201 | def reset(self):
202 | """ Reset the recruitment stream to start anew.
203 |
204 | :return: None
205 | :rtype: None
206 |
207 | """
208 |
209 | self.cursor = 0
210 | self.available_mass = copy.copy(self.recruiment_mass)
211 |
212 | def next(self):
213 | """ Get the time that the next patient is recruited.
214 |
215 | :return: The time that the next patient is recruited.
216 | :rtype: float
217 |
218 | """
219 |
220 | sought_mass = self.delta
221 | t = sorted(self.available_mass.keys())
222 | for t1 in t:
223 | avail_mass = self.available_mass[t1]
224 | t0, _, y0, y1 = self.shapes[t1]
225 | if avail_mass >= sought_mass:
226 | if self.interpolate:
227 | y_at_cursor = self._linearly_interpolate_y(self.cursor, t0, t1, y0, y1)
228 | new_cursor = self._invert(self.cursor, t1, y_at_cursor, y1, sought_mass)
229 | self.cursor = new_cursor
230 | else:
231 | y_at_cursor = y0
232 | new_cursor = self._invert(self.cursor, t1, y_at_cursor, y1, sought_mass, as_rectangle=True)
233 | self.cursor = new_cursor
234 |
235 | self.available_mass[t1] -= sought_mass
236 | return self.cursor
237 | else:
238 | sought_mass -= avail_mass
239 | self.available_mass[t1] = 0.0
240 | if t1 > self.cursor:
241 | self.cursor = t1
242 |
243 | # Got here? Satisfy outstanding sought mass using terminal recruitment intensity
244 | terminal_rate = y1 if len(self.vertices) else self.initial_intensity
245 | if terminal_rate > 0:
246 | self.cursor += sought_mass / terminal_rate
247 | return self.cursor
248 | else:
249 | return np.nan
250 |
251 | def _linearly_interpolate_y(self, t, t0, t1, y0, y1):
252 | """ Linearly interpolate y-value at t using line through (t0, y0) and (t1, y1) """
253 | if t1 == t0:
254 | # The line either has infiniite gradient or is not a line at all, but a point. No logical response
255 | return np.nan
256 | else:
257 | m = (y1-y0) / (t1-t0)
258 | return y0 + m * (t-t0)
259 |
260 | def _invert(self, t0, t1, y0, y1, mass, as_rectangle=False):
261 | """ Returns time t at which the area of quadrilateral with vertices at t0, t, f(t), f(t0) equals mass. """
262 | if t1 == t0:
263 | # The quadrilateral has no area
264 | return np.nan
265 | elif y0 == y1 and y0 <= 0:
266 | # The quadrilateral has no area or is badly defined
267 | return np.nan
268 | elif (y0 == y1 and y0 > 0) or as_rectangle:
269 | # We require area of a rectangle; easy!
270 | return t0 + 1.0 * mass / y0
271 | else:
272 | # We require area of a trapezium. That requires solving a quadratic.
273 | m = (y1-y0) / (t1-t0)
274 | discriminant = y0**2 + 2 * m * mass
275 | if discriminant < 0:
276 | raise TypeError('Discriminant is negative')
277 | z = np.sqrt(discriminant)
278 | tau0 = (-y0 + z) / m
279 | tau1 = (-y0 - z) / m
280 | if tau0 + t0 > 0:
281 | return t0 + tau0
282 | else:
283 | assert(t0 + tau1 > 0)
284 | return t0 + tau1
--------------------------------------------------------------------------------
/clintrials/simulation.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Kristian Brock'
2 | __contact__ = 'kristian.brock@gmail.com'
3 |
4 |
5 | from collections import OrderedDict
6 | from datetime import datetime
7 | import glob
8 | import itertools
9 | import json
10 |
11 |
12 | def run_sims(sim_func, n1=1, n2=1, out_file=None, **kwargs):
13 | """ Run simulations using a delegate function.
14 |
15 | :param sim_func: Delegate function to be called to yield single simulation.
16 | :type sim_func: func
17 | :param n1: Number of batches
18 | :type n1: int
19 | :param n2: Number of iterations per batch
20 | :type n2: int
21 | :param out_file: Location of file for incremental saving after completion of each batch.
22 | :type out_file: str
23 | :param kwargs: key-word args for sim_func
24 | :type kwargs: dict
25 |
26 | .. note::
27 |
28 | - n1 * n2 simualtions are performed, in all.
29 | - sim_func is expected to return a JSON-able object
30 | - file is saved after each of n1 iterations, where applicable.
31 |
32 | """
33 |
34 | sims = []
35 | for j in range(n1):
36 | sims1 = [sim_func(**kwargs) for i in range(n2)]
37 | sims += sims1
38 | if out_file:
39 | try:
40 | with open(out_file, 'w') as outfile:
41 | json.dump(sims, outfile)
42 | except Exception as e:
43 | print('Error writing: %s' % e)
44 | print('{} {} {}'.format(j, datetime.now(), len(sims)))
45 | return sims
46 |
47 |
48 | def sim_parameter_space(sim_func, ps, n1=1, n2=None, out_file=None):
49 | """ Run simulations using a function and a ParameterSpace.
50 |
51 | :param sim_func: function to be called to yield single simulation. Parameters are provided via ps as unpacked kwargs
52 | :type sim_func: func
53 | :param ps: Parameter space to explore via simulation
54 | :type ps: clintrials.util.ParameterSpace
55 | :param n1: Number of batches
56 | :type n1: int
57 | :param n2: Number of iterations per batch
58 | :type n2: int
59 | :param out_file: Location of file for incremental saving after completion of each batch.
60 | :type out_file: str
61 |
62 | .. note::
63 |
64 | - n1 * n2 simualtions are performed, in all.
65 | - sim_func is expected to return a JSON-able object
66 | - file is saved after each of n1 iterations, where applicable.
67 |
68 | """
69 |
70 | if not n2 or n2 <= 0:
71 | n2 = ps.size()
72 |
73 | sims = []
74 | params_iterator = ps.get_cyclical_iterator()
75 | for j in range(n1):
76 | sims1 = [sim_func(**params_iterator.next()) for i in range(n2)]
77 | sims += sims1
78 | if out_file:
79 | try:
80 | with open(out_file, 'w') as outfile:
81 | json.dump(sims, outfile)
82 | except Exception as e:
83 | print('Error writing: %s' % e)
84 | print('{} {} {}'.format(j, datetime.now(), len(sims)))
85 | return sims
86 |
87 |
88 | def _open_json_local(file_loc):
89 | return json.load(open(file_loc, 'r'))
90 |
91 |
92 | def _open_json_url(url):
93 | try:
94 | from urllib2 import urlopen
95 | except:
96 | from urllib import urlopen
97 | return json.load(urlopen(url))
98 |
99 |
100 | def go_fetch_json_sims(file_pattern):
101 | files = glob.glob(file_pattern)
102 | sims = []
103 | for f in files:
104 | sub_sims = _open_json_local(f)
105 | print('{} {}'.format(f, len(sub_sims)))
106 | sims += sub_sims
107 | print('Fetched %s sims' % len(sims))
108 | return sims
109 |
110 |
111 | def filter_sims(sims, filter):
112 | """ Filter a list of simulations.
113 |
114 | :param sims: list of simulations (probably in JSON format)
115 | :type sims: list
116 | :param filter: map of item -> value pairs that forms the filter. Exact matches are retained.
117 | :type filter: dict
118 |
119 | """
120 |
121 | for key, val in filter.iteritems():
122 | # In JSON, tuples are masked as lists. In this filter, we treat them as equivalent:
123 | if isinstance(val, (tuple)):
124 | sims = [x for x in sims if x[key] == val or x[key] == list(val)]
125 | else:
126 | sims = [x for x in sims if x[key] == val]
127 | return sims
128 |
129 |
130 | def summarise_sims(sims, ps, func_map, var_map=None, to_pandas=True):
131 | """ Summarise a list of simulations.
132 |
133 | Method partitions simulations into subsets that used the same set of parameters, and then invokes
134 | a collection of summary functions on each subset; outputs a pandas DataFrame with a multi-index.
135 |
136 | :param sims: list of simulations (probably in JSON format)
137 | :type sims: list
138 | :param ps: ParameterSpace that will explain how to filter simulations
139 | :type ps: ParameterSpace
140 | :param var_map: map from variable name in simulation JSON to arg name in ParameterSpace
141 | :type var_map: dict
142 | :param func_map: map from item name to function that takes list of sims and parameter map as args and returns
143 | a summary statistic or object.
144 | :type func_map: dict
145 | :param to_pandas: True, to get a pandas.DataFrame; False, to get several lists
146 | :type to_pandas: bool
147 |
148 | """
149 |
150 | if var_map is None:
151 | var_names = ps.keys()
152 | var_map = {}
153 | for var_name in var_names:
154 | var_map[var_name] = var_name
155 | else:
156 | var_names = var_map.keys()
157 |
158 | z = [(var_name, ps[var_map[var_name]]) for var_name in var_names]
159 | labels, val_arrays = zip(*z)
160 | param_combinations = list(itertools.product(*val_arrays))
161 | index_tuples = []
162 | row_tuples = []
163 | for param_combo in param_combinations:
164 | these_params = dict(zip(labels, param_combo))
165 | these_sims = filter_sims(sims, these_params)
166 | if len(these_sims):
167 | these_metrics = dict([(label, func(these_sims, these_params)) for label, func in func_map.iteritems()])
168 | index_tuples.append(param_combo)
169 | row_tuples.append(these_metrics)
170 | if len(row_tuples):
171 | if to_pandas:
172 | import pandas as pd
173 | return pd.DataFrame(row_tuples, pd.MultiIndex.from_tuples(index_tuples, names=var_names))
174 | else:
175 | # TODO
176 | return row_tuples, index_tuples
177 | else:
178 | if to_pandas:
179 | import pandas as pd
180 | return pd.DataFrame(columns=func_map.keys())
181 | else:
182 | # TODO
183 | return [], []
184 |
185 |
186 | # Map-Reduce methods for summarising sims in memory-efficient ways
187 | def map_reduce_files(files, map_func, reduce_func):
188 | """
189 | Invoke map_func on each file in sim_files and reduce results using reduce_func.
190 |
191 | :param files: list of files that contain simulations
192 | :type files: list
193 | :param map_func:function to create summary content for object x
194 | :type map_func: function
195 | :param reduce_func: function to reduce summary content of objects x & y
196 | :type reduce_func: function
197 |
198 | :returns: ?
199 | :rtype: ?
200 |
201 | """
202 | if len(files):
203 | x = map(map_func, files)
204 | return reduce(reduce_func, x)
205 | else:
206 | raise TypeError('No files')
207 |
208 |
209 | def invoke_map_reduce_function_map(sims, function_map):
210 | """ Invokes map/reduce pattern for many items on a list of simulations.
211 | Functions are specified as "item name" -> (map_func, reduce_func) pairs in function_map.
212 | In each iteration, map_func is invoked on sims, and then reduce_func is invoked on result.
213 | As usual, map_func takes iterable as single argument and reduce_func takes x and y as args.
214 |
215 | Returns a dict with keys function_map.keys() and values the result of reduce_func
216 | """
217 |
218 | response = OrderedDict()
219 | for item, function_tuple in function_map.iteritems():
220 | map_func, reduce_func = function_tuple
221 | x = reduce(reduce_func, map(map_func, sims))
222 | response[item] = x
223 |
224 | return response
225 |
226 |
227 | def reduce_maps_by_summing(x, y):
228 | """ Reduces maps x and y by adding the value of every item in x to matching value in y.
229 |
230 | :param x: first map
231 | :type x: dict
232 | :param y: second map
233 | :type y: dict
234 | :returns: map of summed values
235 | :rtype: dict
236 |
237 | """
238 |
239 | response = OrderedDict()
240 | for k in x.keys():
241 | response[k] = x[k] + y[k]
242 | return response
243 |
244 |
245 | # I wrote the functions below during a specific analysis.
246 | # TODO: Do they make sense in a general package?
247 | def partition_and_aggregate(sims, ps, function_map):
248 | """ Function partitions simulations into subsets that used the same set of parameters,
249 | and then invokes a collection of map/reduce function pairs on each subset.
250 |
251 | :param sims: list of simulations (probably in JSON format)
252 | :type sims: list
253 | :param ps: ParameterSpace that will explain how to filter simulations
254 | :type ps: ParameterSpace
255 | :param function_map: map of item -> (map_func, reduce_func) pairs
256 | :type function_map: dict
257 |
258 | :returns: map of parameter combination to reduced object
259 | :rtype: dict
260 |
261 | """
262 |
263 | var_names = ps.keys()
264 | z = [(var_name, ps[var_name]) for var_name in var_names]
265 | labels, val_arrays = zip(*z)
266 | param_combinations = list(itertools.product(*val_arrays))
267 | out = OrderedDict()
268 | for param_combo in param_combinations:
269 |
270 | these_params = dict(zip(labels, param_combo))
271 | these_sims = filter_sims(sims, these_params)
272 |
273 | out[param_combo] = invoke_map_reduce_function_map(these_sims, function_map)
274 |
275 | return out
276 |
277 |
278 | def fetch_partition_and_aggregate(f, ps, function_map, verbose=False):
279 | """ Function loads JSON sims in file f and then hands off to partition_and_aggregate.
280 |
281 | :param f: file location
282 | :type f: str
283 | :param ps: ParameterSpace that will explain how to filter simulations
284 | :type ps: ParameterSpace
285 | :param function_map: map of item -> (map_func, reduce_func) pairs
286 | :type function_map: dict
287 |
288 | :returns: map of parameter combination to reduced object
289 | :rtype: dict
290 |
291 | """
292 |
293 | sims = _open_json_local(f)
294 | if verbose:
295 | print('Fetched {} sims from {}'.format(len(sims), f))
296 | return partition_and_aggregate(sims, ps, function_map)
297 |
298 |
299 | def reduce_product_of_two_files_by_summing(x, y):
300 | """ Reduce the summaries of two files by summing. """
301 | response = OrderedDict()
302 | for k in x.keys():
303 | response[k] = reduce_maps_by_summing(x[k], y[k])
304 | return response
305 |
306 |
307 | def multiindex_dataframe_from_tuple_map(x, labels):
308 | """ Create pandas.DataFrame from map of param-tuple -> value
309 |
310 | :param x: map of parameter-tuple -> value pairs
311 | :type x: dict
312 | :param labels: list of item labels
313 | :type labels: list
314 | :returns: DataFrame object
315 | :rtype: pandas.DataFrame
316 |
317 | """
318 | import pandas as pd
319 | k, v = zip(*[(k, v) for (k, v) in x.iteritems()])
320 | i = pd.MultiIndex.from_tuples(k, names=labels)
321 | return pd.DataFrame(list(v), index=i)
322 |
--------------------------------------------------------------------------------