├── .gitignore
├── .travis.yml
├── LICENSE.txt
├── README.md
├── docker
├── Dockerfile
├── Dockerfile-gpu
├── Makefile
└── README.md
├── docs
├── CNAME
├── README.md
├── compile.sh
├── css
│ ├── normalize.css
│ └── skeleton.css
├── deploy.sh
├── generate_api_navbar_and_symbols.py
├── generate_api_toc.py
├── icons
│ ├── android-chrome-144x144.png
│ ├── android-chrome-192x192.png
│ ├── android-chrome-36x36.png
│ ├── android-chrome-48x48.png
│ ├── android-chrome-72x72.png
│ ├── android-chrome-96x96.png
│ ├── apple-touch-icon-114x114.png
│ ├── apple-touch-icon-120x120.png
│ ├── apple-touch-icon-144x144.png
│ ├── apple-touch-icon-152x152.png
│ ├── apple-touch-icon-180x180.png
│ ├── apple-touch-icon-57x57.png
│ ├── apple-touch-icon-60x60.png
│ ├── apple-touch-icon-72x72.png
│ ├── apple-touch-icon-76x76.png
│ ├── apple-touch-icon-precomposed.png
│ ├── apple-touch-icon.png
│ ├── browserconfig.xml
│ ├── favicon-16x16.png
│ ├── favicon-32x32.png
│ ├── favicon-96x96.png
│ ├── favicon.ico
│ ├── manifest.json
│ ├── mstile-144x144.png
│ ├── mstile-150x150.png
│ ├── mstile-310x150.png
│ ├── mstile-310x310.png
│ ├── mstile-70x70.png
│ └── safari-pinned-tab.svg
├── images
│ ├── automated-transformations-0.png
│ ├── automated-transformations-1.png
│ ├── beta_bernoulli.png
│ ├── decoder.png
│ ├── dirichlet-process-fig0.png
│ ├── dirichlet-process-fig1.png
│ ├── dynamic_graph.png
│ ├── edward.png
│ ├── edward_200.png
│ ├── edward_logo.pdf
│ ├── gan-fig0.png
│ ├── gan-fig1.png
│ ├── getting-started-fig0.png
│ ├── getting-started-fig1.png
│ ├── github-mark.svg
│ ├── hierarchical_model_subgraph.png
│ ├── inference_structure.png
│ ├── linear-mixed-effects-models-fig0.png
│ ├── linear-mixed-effects-models-fig1.png
│ ├── linear-mixed-effects-models-fig2.png
│ ├── linear-mixed-effects-models-fig3.png
│ ├── mixture-density-network-fig0.png
│ ├── mixture-density-network-fig1.png
│ ├── mixture-density-network-fig2.png
│ ├── mixture-density-network-fig3.png
│ ├── model_infer_criticize.png
│ ├── ppc.png
│ ├── probabilistic-pca-fig0.png
│ ├── probabilistic-pca-fig1.png
│ ├── random_variable_ops.png
│ ├── supervised-regression-fig0.png
│ ├── supervised-regression-fig1.png
│ ├── tensorboard-distributions.png
│ ├── tensorboard-graphs-0.png
│ ├── tensorboard-graphs-1.png
│ ├── tensorboard-histograms.png
│ ├── tensorboard-scalars.png
│ ├── unsupervised-fig0.png
│ └── unsupervised-fig1.png
├── pandoc-code2raw.py
├── parser
│ ├── README.md
│ ├── doc_generator_visitor.py
│ ├── generate.py
│ ├── generate_lib.py
│ ├── parser.py
│ ├── pretty_docs.py
│ ├── public_api.py
│ ├── py_guide_parser.py
│ └── traverse.py
├── strip_p_in_li.py
└── tex
│ ├── apa.csl
│ ├── api
│ ├── criticism.tex
│ ├── data.tex
│ ├── index.tex
│ ├── inference-classes.tex
│ ├── inference-compositionality.tex
│ ├── inference-data-subsampling.tex
│ ├── inference-development.tex
│ ├── inference.tex
│ ├── model-compositionality.tex
│ ├── model-development.tex
│ ├── model.tex
│ └── reference.tex
│ ├── bib.bib
│ ├── community.tex
│ ├── contributing.tex
│ ├── getting-started.tex
│ ├── iclr2017.tex
│ ├── index.tex
│ ├── license.tex
│ ├── template-api.pandoc
│ ├── template.pandoc
│ ├── troubleshooting.tex
│ └── tutorials
│ ├── automated-transformations.tex
│ ├── batch-training.tex
│ ├── bayesian-neural-network.tex
│ ├── criticism.tex
│ ├── decoder.tex
│ ├── gan.tex
│ ├── index.tex
│ ├── inference-networks.tex
│ ├── inference.tex
│ ├── klpq.tex
│ ├── klqp.tex
│ ├── latent-space-models.tex
│ ├── linear-mixed-effects-models.tex
│ ├── map-laplace.tex
│ ├── map.tex
│ ├── mixture-density-network.tex
│ ├── model.tex
│ ├── probabilistic-pca.tex
│ ├── supervised-classification.tex
│ ├── supervised-regression.tex
│ ├── tensorboard.tex
│ ├── unsupervised.tex
│ └── variational-inference.tex
├── edward
├── __init__.py
├── criticisms
│ ├── __init__.py
│ ├── evaluate.py
│ ├── ppc.py
│ └── ppc_plots.py
├── inferences
│ ├── __init__.py
│ ├── bigan_inference.py
│ ├── conjugacy
│ │ ├── __init__.py
│ │ ├── conjugacy.py
│ │ ├── conjugate_log_probs.py
│ │ └── simplify.py
│ ├── gan_inference.py
│ ├── gibbs.py
│ ├── hmc.py
│ ├── implicit_klqp.py
│ ├── inference.py
│ ├── klpq.py
│ ├── klqp.py
│ ├── laplace.py
│ ├── map.py
│ ├── metropolis_hastings.py
│ ├── monte_carlo.py
│ ├── replica_exchange_mc.py
│ ├── sghmc.py
│ ├── sgld.py
│ ├── variational_inference.py
│ ├── wake_sleep.py
│ └── wgan_inference.py
├── models
│ ├── __init__.py
│ ├── dirichlet_process.py
│ ├── empirical.py
│ ├── param_mixture.py
│ ├── point_mass.py
│ ├── random_variable.py
│ └── random_variables.py
├── util
│ ├── __init__.py
│ ├── graphs.py
│ ├── metrics.py
│ ├── progbar.py
│ ├── random_variables.py
│ └── tensorflow.py
└── version.py
├── examples
├── bayesian_linear_regression.py
├── bayesian_linear_regression_implicitklqp.py
├── bayesian_logistic_regression.py
├── bayesian_nn.py
├── beta_bernoulli.py
├── beta_bernoulli_conjugate.py
├── bigan.py
├── cox_process.py
├── deep_exponential_family.py
├── dirichlet_categorical.py
├── eight_schools
│ ├── eight_schools.py
│ ├── eight_schools.stan
│ └── eight_schools_pystan.py
├── factor_analysis.py
├── gan_synthetic_data.py
├── gan_wasserstein.py
├── gan_wasserstein_synthetic.py
├── invgamma_normal_mh.py
├── irt.py
├── iwvi.py
├── lstm.py
├── mixture_gaussian_gibbs.py
├── mixture_gaussian_mh.py
├── normal.py
├── normal_normal.py
├── normal_sgld.py
├── pp_dirichlet_process.py
├── pp_dynamic_shape.py
├── pp_persistent_randomness.py
├── pp_stochastic_control_flow.py
├── pp_stochastic_recursion.py
├── probabilistic_matrix_factorization.py
├── probabilistic_pca_subsampling.py
├── rasch_model.py
├── sigmoid_belief_network.py
├── stochastic_block_model.py
├── vae.py
└── vae_convolutional.py
├── notebooks
├── automated_transformations.ipynb
├── batch_training.ipynb
├── data
│ ├── insteval_dept_ranefs_r.csv
│ ├── insteval_instructor_ranefs_r.csv
│ └── insteval_student_ranefs_r.csv
├── eight_schools.ipynb
├── gan.ipynb
├── getting_started.ipynb
├── iclr2017.ipynb
├── latent_space_models.ipynb
├── linear_mixed_effects_models.ipynb
├── mixture_density_network.ipynb
├── probabilistic_pca.ipynb
├── supervised_classification.ipynb
├── supervised_regression.ipynb
├── tensorboard.ipynb
└── unsupervised.ipynb
├── setup.cfg
├── setup.py
└── tests
├── criticisms
├── evaluate_test.py
├── metrics_test.py
├── ppc_plots_test.py
└── ppc_test.py
├── data
├── generate_test_saver.py
├── generate_toy_data_tfrecords.py
├── strip_markdown.tpl
├── test_saver.data-00000-of-00001
├── test_saver.index
├── test_saver.meta
└── toy_data.tfrecords
├── inferences
├── ar_process_test.py
├── bayesian_nn_test.py
├── conjugacy_test.py
├── gan_inference_test.py
├── gibbs_test.py
├── hmc_test.py
├── implicitklqp_test.py
├── inference_auto_transform_test.py
├── inference_data_test.py
├── inference_debug_test.py
├── inference_integer_test.py
├── inference_reset_test.py
├── inference_scale_test.py
├── klpq_test.py
├── klqp_test.py
├── laplace_test.py
├── map_test.py
├── metropolishastings_test.py
├── replicaexchangemc_test.py
├── saver_test.py
├── sghmc_test.py
├── sgld_test.py
├── simplify_test.py
├── wakesleep_test.py
└── wgan_inference_test.py
├── models
├── bernoulli_doc_test.py
├── bernoulli_log_prob_test.py
├── bernoulli_sample_test.py
├── dirichlet_process_sample_test.py
├── empirical_sample_test.py
├── keras_core_layers_test.py
├── param_mixture_sample_test.py
├── param_mixture_stats_test.py
├── point_mass_sample_test.py
├── random_variable_gradients_test.py
├── random_variable_operators_test.py
├── random_variable_session_test.py
├── random_variable_shape_test.py
└── random_variable_value_test.py
├── notebooks
└── notebooks_test.py
└── util
├── check_data_test.py
├── check_latent_vars_test.py
├── copy_test.py
├── dot_test.py
├── get_ancestors_test.py
├── get_blanket_test.py
├── get_children_test.py
├── get_control_variate_coef_test.py
├── get_descendants_test.py
├── get_parents_test.py
├── get_siblings_test.py
├── get_variables_test.py
├── is_independent_test.py
├── random_variables_test.py
├── rbf_test.py
├── to_simplex_test.py
└── transform_test.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | *.fls
3 | *.acn
4 | *.acr
5 | *.alg
6 | *.aux
7 | *.bbl
8 | *.blg
9 | *.dvi
10 | *.fdb_latexmk
11 | *.glg
12 | *.glo
13 | *.gls
14 | *.idx
15 | *.ilg
16 | *.ind
17 | *.ist
18 | *.lof
19 | *.log
20 | *.lot
21 | *.maf
22 | *.mtc
23 | *.mtc0
24 | *.nav
25 | *.nlo
26 | *.out
27 | *.pdfsync
28 | *.ps
29 | *.snm
30 | *.synctex.gz
31 | *.toc
32 | *.vrb
33 | *.xdy
34 | *.tdo
35 | *.pyc
36 | *.swp
37 | # Byte-compiled / optimized / DLL files
38 | __pycache__/
39 | *.py[cod]
40 |
41 | *.csv
42 |
43 | # C extensions
44 | *.so
45 |
46 | # Distribution / packaging
47 | .Python
48 | env/
49 | build/
50 | develop-eggs/
51 | dist/
52 | downloads/
53 | eggs/
54 | .eggs/
55 | lib/
56 | lib64/
57 | parts/
58 | sdist/
59 | var/
60 | *.egg-info/
61 | .installed.cfg
62 | *.egg
63 |
64 | # Installer logs
65 | pip-log.txt
66 | pip-delete-this-directory.txt
67 |
68 | # Unit test / coverage reports
69 | htmlcov/
70 | .tox/
71 | .coverage
72 | .coverage.*
73 | .cache
74 | nosetests.xml
75 | coverage.xml
76 | *,cover
77 |
78 | # Translations
79 | *.mo
80 | *.pot
81 |
82 | # Django stuff:
83 | *.log
84 |
85 | # MkDocs documentation
86 | site/
87 |
88 | # PyBuilder
89 | target/
90 |
91 | examples/data/mnist
92 | img/
93 | *.ipynb_checkpoints/
94 |
95 | # Web development
96 | docs/api/
97 | docs/tutorials/
98 | docs/*.html
99 |
100 | # IDE related
101 | .idea/
102 | .vscode/
103 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: required
2 | dist: trusty
3 | language: python
4 | matrix:
5 | include:
6 | - python: 2.7
7 | - python: 3.4
8 | notifications:
9 | email: false
10 | before_install:
11 | # To avoid matplotlib error about X11:
12 | # 'no display name and no $DISPLAY environment variable'
13 | # source: http://docs.travis-ci.com/user/gui-and-headless-browsers/#Starting-a-Web-Server
14 | - "export DISPLAY=:99.0"
15 | - "sh -e /etc/init.d/xvfb start"
16 | install:
17 | # source: http://conda.pydata.org/docs/travis.html
18 | # We do this conditionally because it saves us some downloading if the
19 | # version is the same.
20 | - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
21 | wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh;
22 | else
23 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
24 | fi
25 | - bash miniconda.sh -b -p $HOME/miniconda
26 | - export PATH="$HOME/miniconda/bin:$PATH"
27 | - hash -r
28 | - conda config --set always_yes yes --set changeps1 no
29 | - conda update -q conda
30 | # Useful for debugging any issues with conda
31 | - conda info -a
32 |
33 | - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION numpy pytest
34 | - source activate test-environment
35 | - pip install pep8 pytest-pep8 pytest-xdist
36 | - pip install python-coveralls pytest-cov
37 | - pip install keras
38 | - pip install matplotlib seaborn scipy
39 | - pip install networkx==1.9.1 observations sklearn
40 | - pip install tensorflow==1.5.0
41 | - pip install pystan
42 | - pip install nbformat nbconvert jupyter_client jupyter
43 | - python setup.py install
44 | script:
45 | # Convert and preprocess Jupyter notebooks to Python files for PEP8.
46 | - for file in notebooks/*.ipynb; do
47 | if [ "$file" != "notebooks/iclr2017.ipynb" ]; then
48 | jupyter nbconvert --template=tests/data/strip_markdown.tpl --to python --stdout $file | grep -v '^get_ipython' | sed '$d' > ${file%.*}.py;
49 | fi
50 | done;
51 | - PYTHONPATH=$PWD:$PYTHONPATH pytest --cov=edward --pep8;
52 |
53 | after_success:
54 | - coveralls
55 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright 2016 Dustin Tran
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](http://edwardlib.org)
2 |
3 | [](https://travis-ci.org/blei-lab/edward)
4 | [](https://coveralls.io/github/blei-lab/edward?branch=master)
5 | [](https://gitter.im/blei-lab/edward?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
6 |
7 | [Edward](http://edwardlib.org) is a Python library for probabilistic modeling,
8 | inference, and criticism. It is a testbed for fast experimentation and research
9 | with probabilistic models, ranging from classical hierarchical models on small
10 | data sets to complex deep probabilistic models on large data sets. Edward fuses
11 | three fields: Bayesian statistics and machine learning, deep learning, and
12 | probabilistic programming.
13 |
14 | It supports __modeling__ with
15 |
16 | + Directed graphical models
17 | + Neural networks (via libraries such as
18 | [`tf.layers`](https://www.tensorflow.org/api_docs/python/tf/layers)
19 | and
20 | [Keras](http://keras.io))
21 | + Implicit generative models
22 | + Bayesian nonparametrics and probabilistic programs
23 |
24 | It supports __inference__ with
25 |
26 | + Variational inference
27 | + Black box variational inference
28 | + Stochastic variational inference
29 | + Generative adversarial networks
30 | + Maximum a posteriori estimation
31 | + Monte Carlo
32 | + Gibbs sampling
33 | + Hamiltonian Monte Carlo
34 | + Stochastic gradient Langevin dynamics
35 | + Compositions of inference
36 | + Expectation-Maximization
37 | + Pseudo-marginal and ABC methods
38 | + Message passing algorithms
39 |
40 | It supports __criticism__ of the model and inference with
41 |
42 | + Point-based evaluations
43 | + Posterior predictive checks
44 |
45 | Edward is built on top of [TensorFlow](https://www.tensorflow.org).
46 | It enables features such as computational graphs, distributed
47 | training, CPU/GPU integration, automatic differentiation, and
48 | visualization with TensorBoard.
49 |
50 | ## Resources
51 |
52 | + [Edward website](http://edwardlib.org)
53 | + [Edward Forum](http://discuss.edwardlib.org)
54 | + [Edward Gitter channel](http://gitter.im/blei-lab/edward)
55 | + [Edward releases](https://github.com/blei-lab/edward/releases)
56 | + [Edward papers, posters, and slides](https://github.com/edwardlib/papers)
57 |
58 | See [Getting Started](http://edwardlib.org/getting-started) for how to install Edward.
59 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | #################################################################################################################
2 | # Base Images
3 | #################################################################################################################
4 | FROM ubuntu:14.04
5 |
6 | #################################################################################################################
7 | # ENV Setting
8 | #################################################################################################################
9 | ENV CONDA_DIR /opt/conda
10 | ENV PATH $CONDA_DIR/bin:$PATH
11 | ENV LANG C.UTF-8
12 |
13 | #################################################################################################################
14 | # Initial Setting
15 | #################################################################################################################
16 | RUN mkdir -p $CONDA_DIR && \
17 | echo export PATH=$CONDA_DIR/bin:'$PATH' > /etc/profile.d/conda.sh && \
18 | apt-get update && \
19 | apt-get install -y wget git libhdf5-dev g++ graphviz && \
20 | wget --quiet https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
21 | /bin/bash /Miniconda3-latest-Linux-x86_64.sh -f -b -p $CONDA_DIR && \
22 | rm Miniconda3-latest-Linux-x86_64.sh
23 |
24 | #################################################################################################################
25 | # User Setting
26 | #################################################################################################################
27 | ENV NB_USER edward
28 | ENV NB_UID 1000
29 |
30 | RUN useradd -m -s /bin/bash -N -u $NB_UID $NB_USER && \
31 | mkdir -p $CONDA_DIR && \
32 | chown edward $CONDA_DIR -R && \
33 | mkdir -p /src && \
34 | chown edward /src
35 |
36 | USER edward
37 |
38 | #################################################################################################################
39 | # Python Setting
40 | #################################################################################################################
41 | # Python
42 | ARG python_version=3.5.3-0
43 | ARG python_qt_version=4
44 | RUN conda install -y python=${python_version} && \
45 | pip install observations numpy six tensorflow keras prettytensor && \
46 | pip install ipdb pytest pytest-cov python-coveralls coverage==3.7.1 pytest-xdist pep8 pytest-pep8 pydot_ng && \
47 | conda install Pillow scikit-learn matplotlib notebook pandas seaborn pyyaml h5py && \
48 | conda install -y pyqt=${python_qt_version} && \
49 | pip install edward && \
50 | conda clean -yt
51 |
52 | ENV PYTHONPATH='/src/:$PYTHONPATH'
53 |
54 | #################################################################################################################
55 | # WORK Jupyter
56 | #################################################################################################################
57 | WORKDIR /src
58 |
59 | EXPOSE 8888
60 |
61 | CMD jupyter notebook --port=8888 --ip=0.0.0.0
62 |
--------------------------------------------------------------------------------
/docker/Dockerfile-gpu:
--------------------------------------------------------------------------------
1 | #################################################################################################################
2 | # Reference
3 | # https://github.com/NVIDIA/nvidia-docker
4 | #################################################################################################################
5 | FROM nvidia/cuda:8.0-cudnn5-devel
6 |
7 | #################################################################################################################
8 | # ENV Setting
9 | #################################################################################################################
10 | ENV CONDA_DIR /opt/conda
11 | ENV PATH $CONDA_DIR/bin:$PATH
12 | ENV LANG C.UTF-8
13 |
14 | #################################################################################################################
15 | # Initial Setting
16 | #################################################################################################################
17 | RUN mkdir -p $CONDA_DIR && \
18 | echo export PATH=$CONDA_DIR/bin:'$PATH' > /etc/profile.d/conda.sh && \
19 | apt-get update && \
20 | apt-get install -y wget git libhdf5-dev g++ graphviz && \
21 | wget --quiet https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
22 | /bin/bash /Miniconda3-latest-Linux-x86_64.sh -f -b -p $CONDA_DIR && \
23 | rm Miniconda3-latest-Linux-x86_64.sh
24 |
25 | #################################################################################################################
26 | # User Setting
27 | #################################################################################################################
28 | ENV NB_USER edward
29 | ENV NB_UID 1000
30 |
31 | RUN useradd -m -s /bin/bash -N -u $NB_UID $NB_USER && \
32 | mkdir -p $CONDA_DIR && \
33 | chown edward $CONDA_DIR -R && \
34 | mkdir -p /src && \
35 | chown edward /src
36 |
37 | USER edward
38 |
39 | #################################################################################################################
40 | # Python Setting
41 | #################################################################################################################
42 | # Python
43 | ARG python_version=3.5.3-0
44 | RUN conda install -y python=${python_version} && \
45 | pip install observations numpy six tensorflow-gpu keras prettytensor && \
46 | pip install ipdb pytest pytest-cov python-coveralls coverage==3.7.1 pytest-xdist pep8 pytest-pep8 pydot_ng && \
47 | conda install Pillow scikit-learn matplotlib notebook pandas seaborn pyyaml h5py && \
48 | pip install edward && \
49 | conda clean -yt
50 |
51 | ENV PYTHONPATH='/src/:$PYTHONPATH'
52 |
53 | #################################################################################################################
54 | # WORK Jupyter
55 | #################################################################################################################
56 | WORKDIR /src
57 |
58 | EXPOSE 8888
59 |
60 | CMD jupyter notebook --port=8888 --ip=0.0.0.0
61 |
--------------------------------------------------------------------------------
/docker/Makefile:
--------------------------------------------------------------------------------
1 | help:
2 | @cat Makefile
3 |
4 | DATA?="${HOME}/Data"
5 | GPU?=0
6 | DOCKER_FILE=Dockerfile
7 | DOCKER_FILE_GPU=Dockerfile-gpu
8 | DOCKER=docker
9 | DOCKER_GPU=GPU=$(GPU) nvidia-docker
10 | BACKEND=tensorflow
11 | TEST=tests/
12 | SRC=$(shell dirname `pwd`)
13 | CUDA=/usr/local/cuda
14 | LD_LIBRARY=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/usr/local/cuda/lib64
15 |
16 | # CPU environment
17 |
18 | build:
19 | docker build -t edward --build-arg python_version=3.5 -f $(DOCKER_FILE) .
20 |
21 | bash: build
22 | $(DOCKER) run -it -v $(SRC):/src -v $(DATA):/data edward bash
23 |
24 | ipython: build
25 | $(DOCKER) run -it -v $(SRC):/src -v $(DATA):/data edward ipython
26 |
27 | notebook: build
28 | $(DOCKER) run -it -v $(SRC):/src -v $(DATA):/data --net=host edward
29 |
30 | test: build
31 | $(DOCKER) run -it -v $(SRC):/src -v $(DATA):/data edward pytest $(TEST)
32 |
33 | # GPU environment
34 |
35 | build-gpu:
36 | docker build -t edward-gpu --build-arg python_version=3.5 -f $(DOCKER_FILE_GPU) .
37 |
38 | bash-gpu: build-gpu
39 | $(DOCKER_GPU) run -it -v $(SRC):/src -v $(DATA):/data --env CUDA_HOME=${CUDA} \
40 | --env LD_LIBRARY_PATH=${LD_LIBRARY} \
41 | edward-gpu bash
42 |
43 | ipython-gpu: build-gpu
44 | $(DOCKER_GPU) run -it -v $(SRC):/src -v $(DATA):/data --env CUDA_HOME=${CUDA} \
45 | --env LD_LIBRARY_PATH=${LD_LIBRARY} \
46 | edward-gpu ipython
47 |
48 | notebook-gpu: build-gpu
49 | $(DOCKER_GPU) run -it -v $(SRC):/src -v $(DATA):/data --net=host --env CUDA_HOME=${CUDA} \
50 | --env LD_LIBRARY_PATH=${LD_LIBRARY} \
51 | edward-gpu
52 |
53 | test-gpu: build-gpu
54 | $(DOCKER_GPU) run -it -v $(SRC):/src -v $(DATA):/data --env CUDA_HOME=${CUDA} \
55 | --env LD_LIBRARY_PATH=${LD_LIBRARY} \
56 | edward-gpu pytest $(TEST)
57 |
58 |
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 | # Using Edward via Docker
2 |
3 | This directory contains `Dockerfile` to make it easy to get up and running with
4 | Edward via [Docker](http://www.docker.com/).
5 |
6 | ## Installing Docker
7 |
8 | General installation instructions are
9 | [on the Docker site](https://docs.docker.com/installation/), but we give some
10 | quick links here:
11 |
12 | * [OSX](https://docs.docker.com/installation/mac/): [docker toolbox](https://www.docker.com/toolbox)
13 | * [ubuntu](https://docs.docker.com/installation/ubuntulinux/)
14 |
15 | ## Installing NVIDIA Docker (GPU Environment)
16 |
17 | General installation instructions are
18 | [on the NVIDIA Docker site](https://github.com/NVIDIA/nvidia-docker)
19 |
20 | ## Running the container
21 |
22 | We are using `Makefile` to simplify docker commands within make commands.
23 |
24 | ### CPU environment
25 |
26 | Build the container and start a jupyter notebook
27 |
28 | $ make notebook
29 |
30 | Build the container and start an iPython shell
31 |
32 | $ make ipython
33 |
34 | Build the container and start a bash
35 |
36 | $ make bash
37 |
38 | Build the container and start a test
39 |
40 | $ make test
41 |
42 | ### GPU environment
43 |
44 | Build the container and start a jupyter notebook
45 |
46 | $ make notebook-gpu
47 |
48 | Build the container and start an iPython shell
49 |
50 | $ make ipython-gpu
51 |
52 | Build the container and start a bash
53 |
54 | $ make bash-gpu
55 |
56 | Build the container and start a test
57 |
58 | $ make test-gpu
59 |
60 | For GPU support install NVidia drivers (ideally latest) and
61 | [nvidia-docker](https://github.com/NVIDIA/nvidia-docker). Run using
62 |
63 | $ make notebook-gpu GPU=0 # or [ipython, bash]
64 |
65 | Mount a volume for external data sets
66 |
67 | $ make DATA=~/mydata
68 |
69 | Prints all make tasks
70 |
71 | $ make help
72 |
73 |
74 | Note: If you would have a problem running nvidia-docker you may try the old way
75 | we have used. But it is not recommended. If you find a bug in the nvidia-docker report
76 | it there please and try using the nvidia-docker as described above.
77 |
78 | $ export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | xargs -I{} echo '-v {}:{}')
79 | $ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
80 | $ docker run -it -p 8888:8888 $CUDA_SO $DEVICES gcr.io/tensorflow/tensorflow:latest-gpu
81 |
--------------------------------------------------------------------------------
/docs/CNAME:
--------------------------------------------------------------------------------
1 | edwardlib.org
2 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Edward website
2 |
3 | The back end of our website depends on [pandoc](http://pandoc.org). Pandoc lets us write stand-alone pages for documentation using LaTeX, with functional bibliographies. We also use a custom parser to generate API documentation from the source code's docstrings.
4 |
5 | The front end of our website depends on [skeleton.css](http://getskeleton.com/), [Google Fonts](https://www.google.com/fonts), [highlight.js](https://highlightjs.org/), and [KaTeX](https://khan.github.io/KaTeX/).
6 |
7 | ## Editing the website
8 |
9 | All stand-alone pages are under `docs/tex`. These compile to HTML pages. Our custom pandoc html template is `docs/tex/template.pandoc`. Our APA styling for citations is `docs/tex/apa.csl`.
10 |
11 | ## Building the website
12 |
13 | + Install the dependencies
14 | ```{bash}
15 | pip install argparse beautifulsoup4 ghp-import observations pandoc pandoc-attributes pandocfilters PyYAML
16 | ```
17 | + You can build the website locally. Go to this `docs/` directory and run
18 | ```{bash}
19 | ./compile.sh
20 | ```
21 | The output of the compile script is a set of static HTML pages. The
22 | HTML pages use absolute filepaths. In order to view them locally, use
23 | a HTTP server such as Python's built-in
24 | [`SimpleHTTPServer`](https://docs.python.org/2/library/simplehttpserver.html)
25 | or Node.js'
26 | [`http-server`](https://www.npmjs.com/package/http-server).
27 |
28 | ## Deploying the website
29 |
30 | + We deploy the documentation so that it is available on this repo's
31 | Github pages (the `gh-pages` branch). To do this (and assuming you
32 | have push permission), go to this directory. Then run
33 | ```{bash}
34 | ./deploy.sh
35 | ```
36 | We forward the main domain url to the Github pages url,
37 | [blei-lab.github.io/edward](http://blei-lab.github.io/edward).
38 | Following
39 | [Github's guide](https://help.github.com/articles/setting-up-a-custom-domain-with-github-pages),
40 | namely, we make a `CNAME` file; then we update the DNS record on
41 | the DNS provider.
42 |
--------------------------------------------------------------------------------
/docs/compile.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | printf "Compiling Edward website.\n\n"
3 | docdir=$(pwd)
4 | outdir=$docdir/build
5 | tmpdir=/tmp/docs
6 | tmpdir2=/tmp/docs2
7 |
8 | echo "Clearing any previously built files."
9 | rm -rf "$outdir/"
10 | printf "Done.\n\n"
11 |
12 | echo "Begin docstring generation."
13 | python generate_api_navbar_and_symbols.py \
14 | --src_dir="$docdir/tex/" \
15 | --out_dir="$tmpdir2/"
16 | python parser/generate.py \
17 | --src_dir="$tmpdir2/" \
18 | --output_dir="$tmpdir/"
19 | python generate_api_toc.py \
20 | --src_dir="$docdir/tex/template-api.pandoc" \
21 | --yaml_dir="$tmpdir/api/_toc.yaml" \
22 | --out_dir="$tmpdir/template.pandoc"
23 | printf "Done.\n\n"
24 |
25 | echo "Begin pandoc compilation."
26 | cd "$tmpdir"
27 | for filename in $(find api -name '*.md'); do
28 | echo "$filename"
29 | mkdir -p "$outdir/$(dirname $filename)"
30 | if [[ "$filename" == api/observations* ]]; then
31 | # assume observations/ lives in same parent directory as edward/
32 | bib="$docdir/../../observations/bib.bib"
33 | else
34 | bib="$docdir/tex/bib.bib"
35 | fi
36 | pandoc "$filename" \
37 | --from=markdown+link_attributes+native_spans \
38 | --to=html \
39 | --filter="$docdir/pandoc-code2raw.py" \
40 | --mathjax \
41 | --no-highlight \
42 | --bibliography="$bib" \
43 | --csl="$docdir/tex/apa.csl" \
44 | --title-prefix="Edward" \
45 | --template="$tmpdir/template.pandoc" \
46 | --output="$outdir/${filename%.*}.html"
47 | done
48 | for filename in $(find api -name '*.tex'); do
49 | echo "$filename"
50 | mkdir -p "$outdir/$(dirname $filename)"
51 | pandoc "$filename" \
52 | --from=latex+link_attributes+native_spans \
53 | --to=html \
54 | --filter="$docdir/pandoc-code2raw.py" \
55 | --mathjax \
56 | --no-highlight \
57 | --bibliography="$docdir/tex/bib.bib" \
58 | --csl="$docdir/tex/apa.csl" \
59 | --title-prefix="Edward" \
60 | --template="$tmpdir/template.pandoc" \
61 | --output="$outdir/${filename%.*}.html"
62 | done
63 | for filename in {./,tutorials/}*.tex; do
64 | echo "$filename"
65 | mkdir -p "$outdir/$(dirname $filename)"
66 | pandoc "$filename" \
67 | --from=latex+link_attributes+native_spans \
68 | --to=html \
69 | --filter="$docdir/pandoc-code2raw.py" \
70 | --mathjax \
71 | --no-highlight \
72 | --bibliography="$docdir/tex/bib.bib" \
73 | --csl="$docdir/tex/apa.csl" \
74 | --title-prefix="Edward" \
75 | --template="$docdir/tex/template.pandoc" \
76 | --output="$outdir/${filename%.*}.html"
77 | done
78 | printf "Done.\n\n"
79 |
80 | echo "Begin postprocessing scripts."
81 | cd "$docdir"
82 | echo "./strip_p_in_li.py"
83 | python "strip_p_in_li.py"
84 | printf "Done.\n\n"
85 |
86 | echo "Begin copying index files."
87 | cp -r "css/" "$outdir/"
88 | cp -r "icons/" "$outdir/"
89 | cp -r "images/" "$outdir/"
90 | cp "CNAME" "$outdir/"
91 | printf "Done.\n\n"
92 |
93 | # Clear intermediate docstring-generated files
94 | rm -rf "$tmpdir"
95 | rm -rf "$tmpdir2"
96 |
--------------------------------------------------------------------------------
/docs/deploy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ghp-import -n -p build
3 |
--------------------------------------------------------------------------------
/docs/generate_api_toc.py:
--------------------------------------------------------------------------------
1 | """Take generated TOC YAML file and format it into template.pandoc."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import argparse
7 | import os
8 | import yaml
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument('--src_dir', type=str)
12 | parser.add_argument('--yaml_dir', type=str)
13 | parser.add_argument('--out_dir', type=str)
14 | args = parser.parse_args()
15 |
16 | src_dir = os.path.expanduser(args.src_dir)
17 | yaml_dir = os.path.expanduser(args.yaml_dir)
18 | out_dir = os.path.expanduser(args.out_dir)
19 |
20 | with open(yaml_dir) as f:
21 | data_map = yaml.safe_load(f)
22 |
23 | toc = ''
24 | for entry in data_map['toc']:
25 | title = entry['title']
26 | if title == 'ed':
27 | continue
28 |
29 | section = entry['section']
30 | assert section[0]['title'] == 'Overview'
31 | path = section[0]['path']
32 | toc += '{}'.format(path, title)
33 | toc += '\n'
34 |
35 | document = open(src_dir).read()
36 | document = document.replace('{{toc}}', toc)
37 | open(out_dir, 'w').write(document)
38 |
--------------------------------------------------------------------------------
/docs/icons/android-chrome-144x144.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/android-chrome-144x144.png
--------------------------------------------------------------------------------
/docs/icons/android-chrome-192x192.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/android-chrome-192x192.png
--------------------------------------------------------------------------------
/docs/icons/android-chrome-36x36.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/android-chrome-36x36.png
--------------------------------------------------------------------------------
/docs/icons/android-chrome-48x48.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/android-chrome-48x48.png
--------------------------------------------------------------------------------
/docs/icons/android-chrome-72x72.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/android-chrome-72x72.png
--------------------------------------------------------------------------------
/docs/icons/android-chrome-96x96.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/android-chrome-96x96.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-114x114.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-114x114.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-120x120.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-120x120.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-144x144.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-144x144.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-152x152.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-152x152.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-180x180.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-180x180.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-57x57.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-57x57.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-60x60.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-60x60.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-72x72.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-72x72.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-76x76.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-76x76.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon-precomposed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon-precomposed.png
--------------------------------------------------------------------------------
/docs/icons/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/apple-touch-icon.png
--------------------------------------------------------------------------------
/docs/icons/browserconfig.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | #da532c
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/docs/icons/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/favicon-16x16.png
--------------------------------------------------------------------------------
/docs/icons/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/favicon-32x32.png
--------------------------------------------------------------------------------
/docs/icons/favicon-96x96.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/favicon-96x96.png
--------------------------------------------------------------------------------
/docs/icons/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/favicon.ico
--------------------------------------------------------------------------------
/docs/icons/manifest.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Edward",
3 | "icons": [
4 | {
5 | "src": "icons\/android-chrome-36x36.png",
6 | "sizes": "36x36",
7 | "type": "image\/png",
8 | "density": 0.75
9 | },
10 | {
11 | "src": "icons\/android-chrome-48x48.png",
12 | "sizes": "48x48",
13 | "type": "image\/png",
14 | "density": 1
15 | },
16 | {
17 | "src": "icons\/android-chrome-72x72.png",
18 | "sizes": "72x72",
19 | "type": "image\/png",
20 | "density": 1.5
21 | },
22 | {
23 | "src": "icons\/android-chrome-96x96.png",
24 | "sizes": "96x96",
25 | "type": "image\/png",
26 | "density": 2
27 | },
28 | {
29 | "src": "icons\/android-chrome-144x144.png",
30 | "sizes": "144x144",
31 | "type": "image\/png",
32 | "density": 3
33 | },
34 | {
35 | "src": "icons\/android-chrome-192x192.png",
36 | "sizes": "192x192",
37 | "type": "image\/png",
38 | "density": 4
39 | }
40 | ]
41 | }
42 |
--------------------------------------------------------------------------------
/docs/icons/mstile-144x144.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/mstile-144x144.png
--------------------------------------------------------------------------------
/docs/icons/mstile-150x150.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/mstile-150x150.png
--------------------------------------------------------------------------------
/docs/icons/mstile-310x150.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/mstile-310x150.png
--------------------------------------------------------------------------------
/docs/icons/mstile-310x310.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/mstile-310x310.png
--------------------------------------------------------------------------------
/docs/icons/mstile-70x70.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/icons/mstile-70x70.png
--------------------------------------------------------------------------------
/docs/icons/safari-pinned-tab.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
29 |
--------------------------------------------------------------------------------
/docs/images/automated-transformations-0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/automated-transformations-0.png
--------------------------------------------------------------------------------
/docs/images/automated-transformations-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/automated-transformations-1.png
--------------------------------------------------------------------------------
/docs/images/beta_bernoulli.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/beta_bernoulli.png
--------------------------------------------------------------------------------
/docs/images/decoder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/decoder.png
--------------------------------------------------------------------------------
/docs/images/dirichlet-process-fig0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/dirichlet-process-fig0.png
--------------------------------------------------------------------------------
/docs/images/dirichlet-process-fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/dirichlet-process-fig1.png
--------------------------------------------------------------------------------
/docs/images/dynamic_graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/dynamic_graph.png
--------------------------------------------------------------------------------
/docs/images/edward.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/edward.png
--------------------------------------------------------------------------------
/docs/images/edward_200.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/edward_200.png
--------------------------------------------------------------------------------
/docs/images/edward_logo.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/edward_logo.pdf
--------------------------------------------------------------------------------
/docs/images/gan-fig0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/gan-fig0.png
--------------------------------------------------------------------------------
/docs/images/gan-fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/gan-fig1.png
--------------------------------------------------------------------------------
/docs/images/getting-started-fig0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/getting-started-fig0.png
--------------------------------------------------------------------------------
/docs/images/getting-started-fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/getting-started-fig1.png
--------------------------------------------------------------------------------
/docs/images/github-mark.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
15 |
--------------------------------------------------------------------------------
/docs/images/hierarchical_model_subgraph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/hierarchical_model_subgraph.png
--------------------------------------------------------------------------------
/docs/images/inference_structure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/inference_structure.png
--------------------------------------------------------------------------------
/docs/images/linear-mixed-effects-models-fig0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/linear-mixed-effects-models-fig0.png
--------------------------------------------------------------------------------
/docs/images/linear-mixed-effects-models-fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/linear-mixed-effects-models-fig1.png
--------------------------------------------------------------------------------
/docs/images/linear-mixed-effects-models-fig2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/linear-mixed-effects-models-fig2.png
--------------------------------------------------------------------------------
/docs/images/linear-mixed-effects-models-fig3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/linear-mixed-effects-models-fig3.png
--------------------------------------------------------------------------------
/docs/images/mixture-density-network-fig0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/mixture-density-network-fig0.png
--------------------------------------------------------------------------------
/docs/images/mixture-density-network-fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/mixture-density-network-fig1.png
--------------------------------------------------------------------------------
/docs/images/mixture-density-network-fig2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/mixture-density-network-fig2.png
--------------------------------------------------------------------------------
/docs/images/mixture-density-network-fig3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/mixture-density-network-fig3.png
--------------------------------------------------------------------------------
/docs/images/model_infer_criticize.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/model_infer_criticize.png
--------------------------------------------------------------------------------
/docs/images/ppc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/ppc.png
--------------------------------------------------------------------------------
/docs/images/probabilistic-pca-fig0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/probabilistic-pca-fig0.png
--------------------------------------------------------------------------------
/docs/images/probabilistic-pca-fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/probabilistic-pca-fig1.png
--------------------------------------------------------------------------------
/docs/images/random_variable_ops.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/random_variable_ops.png
--------------------------------------------------------------------------------
/docs/images/supervised-regression-fig0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/supervised-regression-fig0.png
--------------------------------------------------------------------------------
/docs/images/supervised-regression-fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/supervised-regression-fig1.png
--------------------------------------------------------------------------------
/docs/images/tensorboard-distributions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/tensorboard-distributions.png
--------------------------------------------------------------------------------
/docs/images/tensorboard-graphs-0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/tensorboard-graphs-0.png
--------------------------------------------------------------------------------
/docs/images/tensorboard-graphs-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/tensorboard-graphs-1.png
--------------------------------------------------------------------------------
/docs/images/tensorboard-histograms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/tensorboard-histograms.png
--------------------------------------------------------------------------------
/docs/images/tensorboard-scalars.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/tensorboard-scalars.png
--------------------------------------------------------------------------------
/docs/images/unsupervised-fig0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/unsupervised-fig0.png
--------------------------------------------------------------------------------
/docs/images/unsupervised-fig1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/docs/images/unsupervised-fig1.png
--------------------------------------------------------------------------------
/docs/pandoc-code2raw.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | """Pandoc filter to insert arbitrary raw output markup
4 | as Code/CodeBlocks with an attribute raw=.
5 |
6 | Especially useful for inserting LaTeX code which pandoc will
7 | otherwise mangle:
8 |
9 | ````{raw=latex}
10 | \let\Begin\begin
11 | \let\End\end
12 | ````
13 | or for making HTML opaque to pandoc, which will otherwise
14 | show the text between tags in other output formats,
15 |
16 | or for allowing Markdown in the arguments of LaTeX commands
17 | or the contents of LaTeX environments
18 |
19 | `\textsf{`{raw=latex}San Seriffe`}`{raw=latex}
20 |
21 | ````{raw=latex}
22 | \begin{center}
23 | ````
24 | This is *centered*!
25 | ````{raw=latex}
26 | \end{center}
27 | ````
28 |
29 | or for header-includes in metadata:
30 |
31 | ---
32 | header-includes: |
33 | ````{raw=latex}
34 | \usepackage{pgfpages}
35 | \pgfpagesuselayout{2 on 1}[a4paper]
36 | ````
37 | ...
38 |
39 | See
40 | """
41 |
42 | from pandocfilters import RawInline, RawBlock, toJSONFilter
43 | from pandocattributes import PandocAttributes
44 |
45 | raw4code = {'Code': RawInline, 'CodeBlock': RawBlock}
46 |
47 |
48 | def code2raw(key, val, format, meta):
49 | if key not in raw4code:
50 | return None
51 | attrs = PandocAttributes(val[0], format='pandoc')
52 | raw = attrs.kvs.get('raw', None)
53 | if raw:
54 | # if raw != format: # but what if we output markdown?
55 | # return []
56 | return raw4code[key](raw, val[-1])
57 | else:
58 | return None
59 |
60 |
61 | if __name__ == "__main__":
62 | toJSONFilter(code2raw)
63 |
--------------------------------------------------------------------------------
/docs/parser/README.md:
--------------------------------------------------------------------------------
1 | # Parser
2 |
3 | These are tools to parse docstrings into Markdown.
4 |
5 | We build on the parser from TensorFlow
6 | (commit `3cb3cc6d426112f432db62db6f493ea00ce31e0f`):
7 |
8 | + https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/docs
9 | + https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/common
10 |
11 | Use a diff tool to analyze the line-by-line differences.
12 |
13 | ## Usage
14 |
15 | The main command is
16 | ```python
17 | python parser/generate.py \
18 | --src_dir=/absolute/path/to/docs/tex/ \
19 | --output_dir=/tmp/docs/
20 | ```
21 | It builds the API pages into `/tmp/docs/api/`. And it will copy all
22 | non-MD and non-tex files from `tex/` into `/tmp/docs/` and
23 | appropriately make code references.
24 |
--------------------------------------------------------------------------------
/docs/parser/generate.py:
--------------------------------------------------------------------------------
1 | """Generate docs for the Edward API."""
2 |
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import os
8 | import sys
9 |
10 | import edward as ed
11 | import observations
12 |
13 | from tensorflow.python import debug as tf_debug
14 | from tensorflow.python.util import tf_inspect
15 | import generate_lib
16 |
17 | if __name__ == '__main__':
18 | doc_generator = generate_lib.DocGenerator()
19 | doc_generator.add_output_dir_argument()
20 | doc_generator.add_src_dir_argument()
21 |
22 | # This doc generator works on the TensorFlow codebase. Since this script lives
23 | # at docs/parser, and all code is defined somewhere inside
24 | # edward/, we can compute the base directory (two levels up), which is
25 | # valid unless we're trying to apply this to a different code base, or are
26 | # moving the script around.
27 | script_dir = os.path.dirname(tf_inspect.getfile(tf_inspect.currentframe()))
28 | default_base_dir = os.path.join(script_dir, '..', '..', 'edward')
29 | doc_generator.add_base_dir_argument(default_base_dir)
30 |
31 | flags = doc_generator.parse_known_args()
32 |
33 | doc_generator.set_py_modules([('ed', ed), ('observations', observations)])
34 |
35 | sys.exit(doc_generator.build(flags))
36 |
--------------------------------------------------------------------------------
/docs/strip_p_in_li.py:
--------------------------------------------------------------------------------
1 | """Strip paragraphs in lists in pandoc's html output."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import glob
7 |
8 | from bs4 import BeautifulSoup
9 |
10 | paths = ("build/*.html", "build/api/*.html", "build/tutorials/*.html")
11 | filenames = []
12 |
13 | for path in paths:
14 | filenames.extend(glob.glob(path))
15 |
16 | for filename in filenames:
17 | soup = BeautifulSoup(open(filename), 'html.parser')
18 | all_li = soup.find_all('li')
19 | if all_li:
20 | for list_item in all_li:
21 | if list_item.p is not None:
22 | list_item.p.unwrap()
23 | html = str(soup)
24 | html = html.replace('border="1"', '')
25 | with open(filename, 'wb') as file:
26 | file.write(html)
27 |
--------------------------------------------------------------------------------
/docs/tex/api/criticism.tex:
--------------------------------------------------------------------------------
1 | \title{Criticism}
2 |
3 | {{navbar}}
4 |
5 | \subsubsection{Criticism}
6 |
7 | We can never validate whether a model is true. In practice, ``all
8 | models are wrong'' \citep{box1976science}. However, we can try to
9 | uncover where the model goes wrong. Model criticism helps justify the
10 | model as an approximation or point to good directions for revising the
11 | model.
12 | For background, see the criticism \href{/tutorials/criticism}{tutorial}.
13 |
14 | Edward explores model criticism using
15 | \begin{itemize}
16 | \item point evaluations, such as mean squared error or
17 | classification accuracy;
18 | \item posterior predictive checks, for making probabilistic
19 | assessments of the model fit using discrepancy functions.
20 | \end{itemize}
21 |
22 | % \subsubsection{Developing new criticism techniques}
23 |
24 | % Criticism is defined simply with utility functions. They take random
25 | % variables as input and output NumPy arrays.
26 | % Criticism techniques are simply functions which take as input data,
27 | % the probability model and variational model (binded through a latent
28 | % variable dictionary), and any additional inputs.
29 |
30 | % \begin{lstlisting}[language=Python]
31 | % def criticize(data, latent_vars, ...)
32 | % ...
33 | % \end{lstlisting}
34 |
35 | % Developing new criticism techniques is easy. They can be derived from
36 | % the current techniques or built as a standalone function.
37 |
38 | \begin{center}\rule{3in}{0.4pt}\end{center}
39 |
40 | \begin{itemize}
41 | \item {{criticisms}}
42 | \end{itemize}
43 |
44 | \subsubsection{References}\label{references}
45 |
--------------------------------------------------------------------------------
/docs/tex/api/data.tex:
--------------------------------------------------------------------------------
1 | \title{Data}
2 |
3 | {{navbar}}
4 |
5 | \subsubsection{Data}
6 |
7 | Data defines a set of observations. There are three ways
8 | to read data in Edward. They follow the
9 | \href{https://www.tensorflow.org/programmers_guide/reading_data}
10 | {three ways to read data in TensorFlow}.
11 |
12 | \textbf{Preloaded data.}
13 | A constant or variable in the TensorFlow graph holds all the data.
14 | This setting is the fastest to work with and is recommended if the
15 | data fits in memory.
16 |
17 | Represent the data as NumPy arrays or TensorFlow tensors.
18 |
19 | \begin{lstlisting}[language=Python]
20 | x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])
21 | x_data = tf.constant([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])
22 | \end{lstlisting}
23 |
24 | During inference, we store them in TensorFlow variables internally to
25 | prevent copying data more than once in memory. As an example, see the
26 | \href{http://nbviewer.jupyter.org/github/blei-lab/edward/blob/master/notebooks/getting_started.ipynb}{getting started} notebook.
27 |
28 | \textbf{Feeding.}
29 | Manual code provides the data when running each step of inference.
30 | This setting provides the most fine control which is useful for
31 | experimentation.
32 |
33 | Represent the data as
34 | \href{https://www.tensorflow.org/programmers_guide/reading_data#feeding}{TensorFlow placeholders},
35 | which are nodes in the graph that are fed at runtime.
36 |
37 | \begin{lstlisting}[language=Python]
38 | x_data = tf.placeholder(tf.float32, [100, 25]) # placeholder of shape (100, 25)
39 | \end{lstlisting}
40 |
41 | During inference, the user must manually feed the placeholders. At each
42 | step, call \texttt{inference.update()} while
43 | passing in a \texttt{feed\_dict} dictionary
44 | which binds placeholders to realized values as an argument. As an example, see the
45 | \href{https://github.com/blei-lab/edward/blob/master/examples/vae.py}
46 | {variational auto-encoder} script.
47 | If the values do not change over inference updates, one can also bind
48 | the placeholder to values within the \texttt{data} argument when
49 | first constructing inference.
50 |
51 | \textbf{Reading from files.}
52 | An input pipeline reads the data from files at the beginning of a
53 | TensorFlow graph. This setting is recommended if the data does not
54 | fit in memory.
55 |
56 | \begin{lstlisting}[language=Python]
57 | filename_queue = tf.train.string_input_producer(...)
58 | reader = tf.SomeReader()
59 | ...
60 | \end{lstlisting}
61 |
62 | Represent the data as TensorFlow tensors, where the tensors are the
63 | output of data readers. During inference, each update will be
64 | automatically evaluated over new batch tensors represented through
65 | the data readers. As an example, see the
66 | \href{https://github.com/blei-lab/edward/blob/master/tests/inferences/test_inference_data.py}{data unit test}.
67 |
--------------------------------------------------------------------------------
/docs/tex/api/index.tex:
--------------------------------------------------------------------------------
1 | \title{API}
2 |
3 | {{navbar}}
4 |
5 | Edward's design reflects the building blocks for probabilistic
6 | modeling. It defines interchangeable components, enabling rapid
7 | experimentation and research with probabilistic models.
8 |
9 | Edward is named after the innovative statistician
10 | \href{https://en.wikipedia.org/wiki/George_E._P._Box}{George Edward
11 | Pelham Box}. Edward follows Box's philosophy of statistics and machine
12 | learning \citep{box1976science}.
13 |
14 | First gather data from some real-world phenomena. Then cycle through
15 | \href{http://www.annualreviews.org/eprint/7xbyci3nwAg5kEttvvjk/full/10.1146/annurev-statistics-022513-115657}
16 | {Box's loop} \citep{blei2014build}.
17 |
18 | \begin{enumerate}
19 | \item Build a probabilistic model of the phenomena.
20 | \item Reason about the phenomena given model and data.
21 | \item Criticize the model, revise and repeat.
22 | \end{enumerate}
23 |
24 | \includegraphics{/images/model_infer_criticize.png}
25 |
26 | Here's a toy example. A child flips a coin ten times, with the set of outcomes
27 | being \texttt{{[}0,\ 1,\ 0,\ 0,\ 0,\ 0,\ 0,\ 0,\ 0,\ 1{]}}, where \texttt{0}
28 | denotes tails and \texttt{1} denotes heads. She is interested in the
29 | probability that the coin lands heads. To analyze this, she first
30 | builds a model: suppose she assumes the coin flips are independent and
31 | land heads with the same probability. Second, she reasons about the
32 | phenomenon: she infers the model's hidden structure given data.
33 | Finally, she criticizes the model: she analyzes whether her model
34 | captures the real-world phenomenon of coin flips. If it doesn't, then
35 | she may revise the model and repeat.
36 |
37 | Navigate modules enabling this analysis above.
38 | See the
39 | \href{/api/reference}{reference page} for a list of the API.
40 |
41 | \subsubsection{References}\label{references}
42 |
--------------------------------------------------------------------------------
/docs/tex/api/inference-development.tex:
--------------------------------------------------------------------------------
1 | \title{Developing Inference Algorithms}
2 |
3 | {{navbar}}
4 |
5 | \subsubsection{Developing Inference Algorithms}
6 |
7 | Edward uses class inheritance to provide a hierarchy of inference
8 | methods. This enables fast experimentation on top of existing
9 | algorithms, whether it be developing new black box algorithms or
10 | new model-specific algorithms.
11 | For examples of algorithms developed in Edward, see the inference
12 | \href{/tutorials/}{tutorials}.
13 |
14 | \includegraphics[width=700px]{/images/inference_structure.png}
15 | {\small\textit{Dependency graph of several inference methods.
16 | Nodes are classes in Edward and arrows represent class inheritance.}}
17 |
18 | There is a base class \texttt{Inference}, from which all inference
19 | methods are derived from. Note that \texttt{Inference} says nothing
20 | about the class of models that an algorithm must work with. One can
21 | build inference algorithms which are tailored to a restricted class of
22 | models available in Edward (such as differentiable models or
23 | conditionally conjugate models), or even tailor it to a single model.
24 | The algorithm can raise an error if the model is outside this class.
25 |
26 | We organize inference under two paradigms:
27 | \texttt{VariationalInference} and \texttt{MonteCarlo} (or more plainly,
28 | optimization and sampling). These inherit from \texttt{Inference} and each
29 | have their own default methods.
30 |
31 | For example, developing a new variational inference algorithm is as simple as
32 | inheriting from \texttt{VariationalInference} and writing a
33 | \texttt{build\_loss\_and\_gradients()} method. \texttt{VariationalInference} implements many default methods such
34 | as \texttt{initialize()} with options for an optimizer.
35 | For example, see the
36 | \href{https://github.com/blei-lab/edward/blob/master/examples/iwvi.py}{importance
37 | weighted variational inference} script.
38 |
--------------------------------------------------------------------------------
/docs/tex/api/model.tex:
--------------------------------------------------------------------------------
1 | \title{Model}
2 |
3 | {{navbar}}
4 |
5 | \subsubsection{Model}
6 |
7 | A probabilistic model is a joint distribution $p(\mathbf{x},
8 | \mathbf{z})$ of data $\mathbf{x}$ and latent variables $\mathbf{z}$.
9 | For background, see the \href{/tutorials/model}{Probabilistic Models tutorial}.
10 |
11 | In Edward, we specify models using a simple language of random variables.
12 | A random variable $\mathbf{x}$ is an object parameterized by
13 | tensors $\theta^*$, where
14 | the number of random variables in one object is determined by
15 | the dimensions of its parameters.
16 |
17 | \begin{lstlisting}[language=Python]
18 | from edward.models import Normal, Exponential
19 |
20 | # univariate normal
21 | Normal(loc=tf.constant(0.0), scale=tf.constant(1.0))
22 | # vector of 5 univariate normals
23 | Normal(loc=tf.zeros(5), scale=tf.ones(5))
24 | # 2 x 3 matrix of Exponentials
25 | Exponential(rate=tf.ones([2, 3]))
26 | \end{lstlisting}
27 |
28 | For multivariate distributions, the multivariate dimension is the
29 | innermost (right-most) dimension of the parameters.
30 |
31 | \begin{lstlisting}[language=Python]
32 | from edward.models import Dirichlet, MultivariateNormalTriL
33 |
34 | # K-dimensional Dirichlet
35 | Dirichlet(concentration=tf.constant([0.1] * K))
36 | # vector of 5 K-dimensional multivariate normals with lower triangular cov
37 | MultivariateNormalTriL(loc=tf.zeros([5, K]), scale_tril=tf.ones([5, K, K]))
38 | # 2 x 5 matrix of K-dimensional multivariate normals
39 | MultivariateNormalTriL(loc=tf.zeros([2, 5, K]), scale_tril=tf.ones([2, 5, K, K]))
40 | \end{lstlisting}
41 |
42 | Random variables are equipped with methods such as
43 | \texttt{log\_prob()}, $\log p(\mathbf{x}\mid\theta^*)$,
44 | \texttt{mean()}, $\mathbb{E}_{p(\mathbf{x}\mid\theta^*)}[\mathbf{x}]$,
45 | and \texttt{sample()}, $\mathbf{x}^*\sim p(\mathbf{x}\mid\theta^*)$.
46 | Further, each random variable is associated to a tensor $\mathbf{x}^*$ in the
47 | computational graph, which represents a single sample $\mathbf{x}^*\sim
48 | p(\mathbf{x}\mid\theta^*)$.
49 |
50 | This makes it easy to parameterize random variables with complex
51 | deterministic structure, such as with deep neural networks, a diverse
52 | set of math operations, and compatibility with third party libraries
53 | which also build on TensorFlow.
54 | The design also enables compositions of random variables
55 | to capture complex stochastic structure.
56 | They operate on $\mathbf{x}^*$.
57 |
58 | \includegraphics[width=375px]{/images/random_variable_ops.png}
59 |
60 | \begin{lstlisting}[language=Python]
61 | from edward.models import Normal
62 |
63 | x = Normal(loc=tf.zeros(10), scale=tf.ones(10))
64 | y = tf.constant(5.0)
65 | x + y, x - y, x * y, x / y
66 | tf.tanh(x * y)
67 | x[2] # 3rd normal rv in the vector
68 | \end{lstlisting}
69 |
70 | In the \href{/api/model-compositionality}{compositionality page}, we
71 | describe how to build models by composing random variables.
72 |
73 | \begin{center}\rule{3in}{0.4pt}\end{center}
74 |
75 | \begin{itemize}
76 | \item @{ed.models.RandomVariable}
77 | \item {{models}}
78 | \end{itemize}
79 |
--------------------------------------------------------------------------------
/docs/tex/api/reference.tex:
--------------------------------------------------------------------------------
1 | \title{Reference}
2 |
3 | {{navbar}}
4 |
5 | There are four modules in Edward:
6 | \texttt{ed.criticisms},
7 | \texttt{ed.inferences},
8 | \texttt{ed.models},
9 | and
10 | \texttt{ed.util}.
11 |
12 | \subsubsection{Criticism}
13 |
14 | \texttt{ed.criticisms} is comprised of functions. They operate on
15 | random variables in a model or they operate on NumPy arrays
16 | representing values drawn from the random variables.
17 |
18 | \begin{itemize}
19 | \item {{criticisms}}
20 | \end{itemize}
21 |
22 | \subsubsection{Inference}
23 |
24 | \texttt{ed.inferences} is mostly comprised of classes. They are
25 | organized in a class hierarchy, where methods are shared via parent
26 | classes and \texttt{Inference} is the top-most base class.
27 |
28 | \begin{itemize}
29 | \item @{ed.inferences.Inference}
30 | \item @{ed.inferences.VariationalInference}
31 | \begin{itemize}
32 | \item @{ed.inferences.KLqp}
33 | \begin{itemize}
34 | \item @{ed.inferences.ReparameterizationKLqp}
35 | \item @{ed.inferences.ReparameterizationKLKLqp}
36 | \item @{ed.inferences.ReparameterizationEntropyKLqp}
37 | \item @{ed.inferences.ScoreKLqp}
38 | \item @{ed.inferences.ScoreKLKLqp}
39 | \item @{ed.inferences.ScoreEntropyKLqp}
40 | \end{itemize}
41 | \item @{ed.inferences.KLpq}
42 | \item @{ed.inferences.GANInference}
43 | \begin{itemize}
44 | \item @{ed.inferences.BiGANInference}
45 | \item @{ed.inferences.ImplicitKLqp}
46 | \item @{ed.inferences.WGANInference}
47 | \end{itemize}
48 | \item @{ed.inferences.MAP}
49 | \begin{itemize}
50 | \item @{ed.inferences.Laplace}
51 | \end{itemize}
52 | \end{itemize}
53 | \item @{ed.inferences.MonteCarlo}
54 | \begin{itemize}
55 | \item @{ed.inferences.Gibbs}
56 | \item @{ed.inferences.MetropolisHastings}
57 | \item @{ed.inferences.HMC}
58 | \item @{ed.inferences.SGLD}
59 | \item @{ed.inferences.SGHMC}
60 | \end{itemize}
61 | \item @{ed.inferences.complete_conditional}
62 | \end{itemize}
63 |
64 | \subsubsection{Models}
65 |
66 | \texttt{ed.models} is comprised of random variables.
67 | The list of available random variables depends on the TensorFlow
68 | version installed. For TensorFlow {{tensorflow_version}}, the
69 | following are available:
70 |
71 | \begin{itemize}
72 | \item @{ed.models.RandomVariable}
73 | \item {{models}}
74 | \end{itemize}
75 |
76 | \subsubsection{Utilities}
77 |
78 | \texttt{ed.util} is comprised of functions for miscellaneous usage.
79 |
80 | \begin{itemize}
81 | \item {{util}}
82 | \item @{ed.VERSION}
83 | \item @{ed.__version__}
84 | \end{itemize}
85 |
--------------------------------------------------------------------------------
/docs/tex/community.tex:
--------------------------------------------------------------------------------
1 | \title{Community}
2 |
3 | \subsection{Community}
4 |
5 | Edward's community is a key asset. We work together to make Edward a
6 | rewarding experience both in its usage and development.
7 |
8 | \subsubsection{General}
9 |
10 | The most important community resources for Edward are:
11 |
12 | \begin{itemize}
13 | \item
14 | The
15 | \href{https://discourse.edwardlib.org}{Forum},
16 | for any discussion of Edward. The Forum includes user and development
17 | discussion, as well as announcements.
18 | \item
19 | The
20 | \href{http://gitter.im/blei-lab/edward}{Gitter channel},
21 | for any discussion at real time.
22 | \end{itemize}
23 |
24 | Both can be used to get help with installation, coding and debugging
25 | in Edward, and any discussion about probabilistic modeling.
26 |
27 | You can also find help on the question and answer site,
28 | \href{http://stackoverflow.com}{Stack Overflow}.
29 |
30 | \subsubsection{Bug reports \& feature requests}
31 |
32 | To submit bug reports or feature requests, use Edward's
33 | \href{https://github.com/blei-lab/edward/issues}{Github issues tracker}.
34 |
35 | Check out our
36 | \href{/contributing}{contributing page}
37 | if you'd like to help improve Edward.
38 |
--------------------------------------------------------------------------------
/docs/tex/license.tex:
--------------------------------------------------------------------------------
1 | \title{License}
2 |
3 | \subsection{License}
4 |
5 | Edward is open-source licensed under the
6 | \href{https://opensource.org/licenses/Apache-2.0}{Apache License, version 2.0}.
7 |
8 | The Edward logo is a black box, inspired from an installation by the Dutch
9 | artist \href{http://www.erikolofsen.com/blackbox.html}{Erik Olofsen}. The Edward
10 | logo is licensed under the Creative Commons
11 | \href
12 | {https://creativecommons.org/licenses/by-nd/4.0/}
13 | {Attribution-NoDerivatives 4.0 International License}.
14 |
--------------------------------------------------------------------------------
/docs/tex/troubleshooting.tex:
--------------------------------------------------------------------------------
1 | \title{Troubleshooting}
2 |
3 | \subsection{Troubleshooting}
4 |
5 | \subsubsection{Basic Installation}
6 |
7 | Edward depends on
8 |
9 | \begin{itemize}
10 | \item NumPy (>=1.7)
11 | \item Six (>=1.1.0)
12 | \item TensorFlow (>=1.2.0rc0)
13 | \end{itemize}
14 |
15 | Installing \texttt{edward} by default also installs \texttt{numpy} and
16 | \texttt{six} if they are unavailable (or out-of-date).
17 |
18 | Installing \texttt{edward} does not automatically install or update
19 | TensorFlow. We recommend installing it via
20 |
21 | \begin{lstlisting}[language=JSON]
22 | pip install tensorflow
23 | \end{lstlisting}
24 |
25 | To use Edward with GPUs, install \texttt{tensorflow-gpu} instead of
26 | \texttt{tensorflow} as
27 |
28 | \begin{lstlisting}[language=JSON]
29 | pip install tensorflow-gpu
30 | \end{lstlisting}
31 |
32 | See TensorFlow's
33 | \href{https://www.tensorflow.org/install/}{installation instructions}
34 | for details, including how to set up NVIDIA software for TensorFlow with GPUs.
35 |
36 | \subsubsection{Full Installation}
37 |
38 | Edward has optional features that depend on external packages.
39 |
40 | \begin{itemize}
41 | \item Any examples using real data sets typically require
42 | \href{https://github.com/edwardlib/observations/}{Observations} (>=0.1.2)
43 | \begin{lstlisting}[language=JSON]
44 | pip install observations
45 | \end{lstlisting}
46 | Observations lets you load an extensive collection of data sets with
47 | minimal effort under a one-line interface. Observations was originally
48 | developed for Edward and it has since become a standalone library for
49 | general machine learning.
50 | \item Neural networks are supported through any library operating
51 | on TensorFlow. For example:
52 | \texttt{tf.layers},
53 | \href{http://keras.io}{Keras} (>=1.0)
54 | \begin{lstlisting}[language=JSON]
55 | pip install keras==2.0.4
56 | \end{lstlisting}
57 | and
58 | \href{https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim}{TensorFlow Slim}
59 | (native in TensorFlow).
60 |
61 | Note that for Keras 2.0.5 and beyond, all neural net layer transformations cannot be directly applied on random variables anymore. For example, if \texttt{x} is a \texttt{ed.RandomVariable} object, one must call \texttt{tf.convert\_to\_tensor} before applying it to a layer transformation, \texttt{Dense(256)(tf.convert\_to\_tensor(x))}. See \href{https://github.com/fchollet/keras/issues/6979}{here} for more details.
62 | \item Notebooks require
63 | \href{http://jupyter.org}{Jupyter} (>=1.0.0)
64 | \begin{lstlisting}[language=JSON]
65 | pip install jupyter
66 | \end{lstlisting}
67 | \item Visualization requires
68 | \href{http://matplotlib.org}{Matplotlib} (>=1.3),
69 | \href{https://pillow.readthedocs.io}{Pillow} (>=3.4.2), and
70 | \href{https://seaborn.pydata.org}{Seaborn} (>=0.3.1)
71 | \begin{lstlisting}[language=JSON]
72 | pip install matplotlib
73 | pip install pillow
74 | pip install seaborn
75 | \end{lstlisting}
76 | \end{itemize}
77 |
--------------------------------------------------------------------------------
/docs/tex/tutorials/bayesian-neural-network.tex:
--------------------------------------------------------------------------------
1 | \title{Bayesian Neural Network}
2 |
3 | \subsection{Bayesian Neural Network}
4 |
5 | A Bayesian neural network is a neural network with a prior
6 | distribution on its weights \citep{neal2012bayesian}.
7 |
8 | Consider a data set $\{(\mathbf{x}_n, y_n)\}$, where each data point
9 | comprises of features $\mathbf{x}_n\in\mathbb{R}^D$ and output
10 | $y_n\in\mathbb{R}$. Define the likelihood for each data point as
11 | \begin{align*}
12 | p(y_n \mid \mathbf{w}, \mathbf{x}_n, \sigma^2)
13 | &=
14 | \text{Normal}(y_n \mid \mathrm{NN}(\mathbf{x}_n\;;\;\mathbf{w}), \sigma^2),
15 | \end{align*}
16 | where $\mathrm{NN}$ is a neural network whose weights and biases form
17 | the latent variables $\mathbf{w}$. Assume $\sigma^2$ is a
18 | known variance.
19 |
20 | Define the prior on the weights and biases $\mathbf{w}$ to be the standard normal
21 | \begin{align*}
22 | p(\mathbf{w})
23 | &=
24 | \text{Normal}(\mathbf{w} \mid \mathbf{0}, \mathbf{I}).
25 | \end{align*}
26 |
27 | Let's build the model in Edward. We define a 3-layer Bayesian neural
28 | network with $\tanh$ nonlinearities.
29 | \begin{lstlisting}[language=Python]
30 | from edward.models import Normal
31 |
32 | def neural_network(x):
33 | h = tf.tanh(tf.matmul(x, W_0) + b_0)
34 | h = tf.tanh(tf.matmul(h, W_1) + b_1)
35 | h = tf.matmul(h, W_2) + b_2
36 | return tf.reshape(h, [-1])
37 |
38 | N = 40 # number of data points
39 | D = 1 # number of features
40 |
41 | W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]))
42 | W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]))
43 | W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]))
44 | b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10))
45 | b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10))
46 | b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1))
47 |
48 | x = tf.cast(x_train, dtype=tf.float32)
49 | y = Normal(loc=neural_network(x), scale=0.1 * tf.ones(N))
50 | \end{lstlisting}
51 | This program builds the model assuming the features \texttt{x\_train}
52 | already exists in the Python environment. Alternatively, one can also
53 | define a TensorFlow placeholder,
54 | \begin{lstlisting}
55 | x = tf.placeholder(tf.float32, [N, D])
56 | \end{lstlisting}
57 | The placeholder must be fed with data later during inference.
58 |
59 | A toy demonstration is available in the \href{/getting-started}{Getting Started} section.
60 | Source code is available at
61 | \href{https://github.com/blei-lab/edward/blob/master/examples/bayesian_nn.py}
62 | {\texttt{examples/bayesian\_nn.py}} in the Github repository.
63 |
64 | \subsubsection{References}\label{references}
65 |
--------------------------------------------------------------------------------
/docs/tex/tutorials/inference.tex:
--------------------------------------------------------------------------------
1 | \title{Inference of Probabilistic Models}
2 |
3 | \subsection{Inference of Probabilistic Models}
4 |
5 | This tutorial asks the question: what does it mean to do inference of
6 | probabilistic models? This sets the stage for understanding how to
7 | design inference algorithms in Edward.
8 |
9 | \subsubsection{The posterior}
10 |
11 | How can we use a model $p(\mathbf{x}, \mathbf{z})$ to analyze some
12 | data $\mathbf{x}$? In other words, what hidden structure $\mathbf{z}$
13 | explains the data? We seek to infer this hidden structure using the
14 | model.
15 |
16 | One method of inference leverages Bayes' rule to define the
17 | \emph{posterior}
18 | \begin{align*}
19 | p(\mathbf{z} \mid \mathbf{x})
20 | &=
21 | \frac{p(\mathbf{x}, \mathbf{z})}{\int p(\mathbf{x}, \mathbf{z}) \text{d}\mathbf{z}}.
22 | \end{align*}
23 | The posterior is the distribution of the latent variables
24 | $\mathbf{z}$, conditioned on some (observed) data $\mathbf{x}$.
25 | Drawing analogy to representation learning, it is a probabilistic
26 | description of the data's hidden representation.
27 |
28 | From the perspective of inductivism, as practiced by classical
29 | Bayesians (and implicitly by frequentists),
30 | the posterior is our updated hypothesis about the latent variables.
31 | From the perspective of hypothetico-deductivism, as practiced by
32 | statisticians such as Box, Rubin, and Gelman, the posterior is simply
33 | a fitted model to data, to be criticized and thus revised
34 | \citep{box1982apology,gelman2013philosophy}.
35 |
36 | \subsubsection{Inferring the posterior}
37 |
38 | Now we know what the posterior represents. How do we calculate it? This is the
39 | central computational challenge in inference.
40 |
41 | The posterior is difficult to compute because of its normalizing
42 | constant, which is the integral in the denominator.
43 | This is often a high-dimensional integral that lacks an analytic (closed-form)
44 | solution. Thus, calculating the posterior means \emph{approximating} the
45 | posterior.
46 |
47 | For details on how to specify inference in Edward, see the
48 | \href{/api/inference}{inference API}. We describe several examples in
49 | detail in the \href{/tutorials/}{tutorials}.
50 |
51 |
52 | \subsubsection{References}\label{references}
53 |
54 |
--------------------------------------------------------------------------------
/docs/tex/tutorials/map-laplace.tex:
--------------------------------------------------------------------------------
1 | \title{Laplace Approximation}
2 |
3 | \subsection{Laplace Approximation}
4 |
5 | (This tutorial follows the
6 | \href{/tutorials/map}{Maximum a posteriori estimation} tutorial.)
7 |
8 | Maximum a posteriori (MAP) estimation approximates the posterior $p(\mathbf{z} \mid \mathbf{x})$
9 | with a point mass (delta function) by simply capturing its mode. MAP is
10 | attractive because it is fast and efficient. How can we use MAP to construct a
11 | better approximation to the posterior?
12 |
13 | The Laplace approximation
14 | \citep{laplace1986memoir}
15 | is one way of improving a MAP estimate. The idea
16 | is to approximate the posterior with a normal distribution centered at the MAP
17 | estimate,
18 | \begin{align*}
19 | p(\mathbf{z} \mid \mathbf{x})
20 | &\approx
21 | \text{Normal}(\mathbf{z}\;;\; \mathbf{z}_\text{MAP}, \Lambda^{-1}).
22 | \end{align*}
23 | This requires computing a precision matrix $\Lambda$. Derived from a
24 | Taylor expansion, the Laplace approximation uses the Hessian of the
25 | negative log joint density at the MAP estimate.
26 | It is defined component-wise as
27 | \begin{align*}
28 | \Lambda_{ij}
29 | &=
30 | \frac{\partial^2}{\partial z_i \partial z_j} -\log p(\mathbf{x}, \mathbf{z}).
31 | \end{align*}
32 | For flat priors (which reduces MAP to maximum likelihood), the
33 | precision matrix is known as the observed Fisher information
34 | \citep{fisher1925theory}.
35 | Edward uses TensorFlow's automatic differentiation, making this
36 | second-order gradient computation both simple and efficient to
37 | distribute.
38 |
39 | For more details, see the \href{/api/}{API} as well as its
40 | implementation in Edward's code base.
41 |
42 | \subsubsection{References}\label{references}
43 |
--------------------------------------------------------------------------------
/docs/tex/tutorials/model.tex:
--------------------------------------------------------------------------------
1 | \title{Probabilistic Models}
2 |
3 | \subsection{Probabilistic Models}
4 |
5 | A probabilistic model asserts how observations from a natural phenomenon arise.
6 | The model is a \emph{joint distribution}
7 | \begin{align*}
8 | p(\mathbf{x}, \mathbf{z})
9 | \end{align*}
10 | of observed variables $\mathbf{x}$ corresponding to data, and latent
11 | variables $\mathbf{z}$ that provide the hidden structure to generate
12 | from $\mathbf{x}$. The joint distribution factorizes into two
13 | components.
14 |
15 | The \emph{likelihood}
16 | \begin{align*}
17 | p(\mathbf{x} \mid \mathbf{z})
18 | \end{align*}
19 | is a probability distribution that describes how any data $\mathbf{x}$
20 | depend on the latent variables $\mathbf{z}$. The likelihood posits a
21 | data generating process, where the data $\mathbf{x}$ are assumed drawn
22 | from the likelihood conditioned on a particular hidden pattern
23 | described by $\mathbf{z}$.
24 |
25 | The \emph{prior}
26 | \begin{align*}
27 | p(\mathbf{z})
28 | \end{align*}
29 | is a probability distribution that describes the latent variables
30 | present in the data. It posits a generating process of the hidden structure.
31 |
32 | For details on how to specify a model in Edward, see the
33 | \href{/api/model}{model API}. We describe several examples in detail
34 | in the \href{/tutorials/}{tutorials}.
35 |
--------------------------------------------------------------------------------
/docs/tex/tutorials/variational-inference.tex:
--------------------------------------------------------------------------------
1 | \title{Variational Inference}
2 |
3 | \subsection{Variational Inference}
4 |
5 | Variational inference is an umbrella term for algorithms which cast
6 | posterior inference as optimization
7 | \citep{hinton1993keeping,waterhouse1996bayesian,jordan1999introduction}.
8 |
9 | The core idea involves two steps:
10 | \begin{enumerate}
11 | \item posit a family of distributions $q(\mathbf{z}\;;\;\lambda)$
12 | over the latent variables;
13 | \item match $q(\mathbf{z}\;;\;\lambda)$ to the posterior by
14 | optimizing over its parameters $\lambda$.
15 | \end{enumerate}
16 | This strategy converts the problem of computing the posterior
17 | $p(\mathbf{z} \mid \mathbf{x})$ into an optimization problem:
18 | minimize a divergence measure
19 | \begin{align*}
20 | \lambda^*
21 | &=
22 | \arg\min_\lambda \text{divergence}(
23 | p(\mathbf{z} \mid \mathbf{x})
24 | ,
25 | q(\mathbf{z}\;;\;\lambda)
26 | ).
27 | \end{align*}
28 | The optimized distribution $q(\mathbf{z}\;;\;\lambda^*)$ is used as
29 | a proxy to the posterior $p(\mathbf{z}\mid \mathbf{x})$.
30 |
31 | Edward takes the perspective that the posterior is (typically)
32 | intractable, and thus we must build a model of latent variables that
33 | best approximates the posterior.
34 | It is analogous to the perspective
35 | that the true data generating process is unknown, and thus we build
36 | models of data to best approximate the true process.
37 |
38 | For details on variational inference classes defined in Edward,
39 | see the \href{/api/inference}{inference API}.
40 | For background on specific variational inference algorithms in
41 | Edward, see the other inference \href{/tutorials/}{tutorials}.
42 |
43 | \subsubsection{References}\label{references}
44 |
45 |
--------------------------------------------------------------------------------
/edward/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | from edward import criticisms
6 | from edward import inferences
7 | from edward import models
8 | from edward import util
9 |
10 | # Direct imports for convenience
11 | from edward.criticisms import (
12 | evaluate, ppc, ppc_density_plot, ppc_stat_hist_plot)
13 | from edward.inferences import (
14 | Inference, MonteCarlo, VariationalInference,
15 | HMC, MetropolisHastings, SGLD, SGHMC,
16 | KLpq, KLqp, ReparameterizationKLqp, ReparameterizationKLKLqp,
17 | ReparameterizationEntropyKLqp, ReplicaExchangeMC, ScoreKLqp, ScoreKLKLqp,
18 | ScoreEntropyKLqp, ScoreRBKLqp, WakeSleep, GANInference, BiGANInference,
19 | WGANInference, ImplicitKLqp, MAP, Laplace, complete_conditional, Gibbs)
20 | from edward.models import RandomVariable
21 | from edward.util import (
22 | check_data, check_latent_vars, copy, dot,
23 | get_ancestors, get_blanket, get_children, get_control_variate_coef,
24 | get_descendants, get_parents, get_session, get_siblings, get_variables,
25 | is_independent, Progbar, random_variables, rbf, set_seed,
26 | to_simplex, transform)
27 | from edward.version import __version__, VERSION
28 |
29 | from tensorflow.python.util.all_util import remove_undocumented
30 |
31 | # Export modules and constants.
32 | _allowed_symbols = [
33 | 'criticisms',
34 | 'inferences',
35 | 'models',
36 | 'util',
37 | 'evaluate',
38 | 'ppc',
39 | 'ppc_density_plot',
40 | 'ppc_stat_hist_plot',
41 | 'Inference',
42 | 'MonteCarlo',
43 | 'VariationalInference',
44 | 'HMC',
45 | 'MetropolisHastings',
46 | 'SGLD',
47 | 'SGHMC',
48 | 'KLpq',
49 | 'KLqp',
50 | 'ReparameterizationKLqp',
51 | 'ReparameterizationKLKLqp',
52 | 'ReparameterizationEntropyKLqp',
53 | 'ScoreKLqp',
54 | 'ScoreKLKLqp',
55 | 'ScoreEntropyKLqp',
56 | 'ScoreRBKLqp',
57 | 'WakeSleep',
58 | 'GANInference',
59 | 'BiGANInference',
60 | 'WGANInference',
61 | 'ImplicitKLqp',
62 | 'MAP',
63 | 'Laplace',
64 | 'complete_conditional',
65 | 'Gibbs',
66 | 'RandomVariable',
67 | 'check_data',
68 | 'check_latent_vars',
69 | 'copy',
70 | 'dot',
71 | 'get_ancestors',
72 | 'get_blanket',
73 | 'get_children',
74 | 'get_control_variate_coef',
75 | 'get_descendants',
76 | 'get_parents',
77 | 'get_session',
78 | 'get_siblings',
79 | 'get_variables',
80 | 'is_independent',
81 | 'Progbar',
82 | 'random_variables',
83 | 'ReplicaExchangeMC',
84 | 'rbf',
85 | 'set_seed',
86 | 'to_simplex',
87 | 'transform',
88 | '__version__',
89 | 'VERSION',
90 | ]
91 |
92 | # Remove all extra symbols that don't have a docstring or are not explicitly
93 | # referenced in the whitelist.
94 | remove_undocumented(__name__, _allowed_symbols, [
95 | criticisms, inferences, models, util
96 | ])
97 |
--------------------------------------------------------------------------------
/edward/criticisms/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | """
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from edward.criticisms.evaluate import *
8 | from edward.criticisms.ppc import *
9 | from edward.criticisms.ppc_plots import *
10 |
11 | from tensorflow.python.util.all_util import remove_undocumented
12 |
13 | _allowed_symbols = [
14 | 'evaluate',
15 | 'ppc',
16 | 'ppc_density_plot',
17 | 'ppc_stat_hist_plot',
18 | ]
19 |
20 | remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
21 |
--------------------------------------------------------------------------------
/edward/criticisms/ppc_plots.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 |
6 | def ppc_density_plot(y, y_rep):
7 | """Create 1D kernel density plot comparing data to samples from posterior.
8 |
9 | Args:
10 | y: np.ndarray.
11 | A 1-D NumPy array.
12 | y_rep: np.ndarray.
13 | A 2-D NumPy array where rows represent different samples from posterior.
14 |
15 | Returns:
16 | matplotlib axes
17 |
18 | #### Examples
19 |
20 | ```python
21 | import matplotlib.pyplot as plt
22 |
23 | y = np.random.randn(20)
24 | y_rep = np.random.randn(20, 20)
25 |
26 | ed.ppc_density_plot(y, y_rep)
27 | plt.show()
28 | ```
29 | """
30 | import matplotlib.pyplot as plt
31 | import seaborn as sns
32 | ax = sns.kdeplot(y, color="maroon")
33 |
34 | n = y_rep.shape[0]
35 |
36 | for i in range(n):
37 | ax = sns.kdeplot(y_rep[i, :], color="maroon", alpha=0.2, linewidth=0.8)
38 |
39 | y_line = plt.Line2D([], [], color='maroon', label='y')
40 | y_rep_line = plt.Line2D([], [], color='maroon', alpha=0.2, label='y_rep')
41 |
42 | handles = [y_line, y_rep_line]
43 | labels = ['y', r'$y_{rep}$']
44 |
45 | ax.legend(handles, labels)
46 |
47 | return ax
48 |
49 |
50 | def ppc_stat_hist_plot(y_stats, yrep_stats, stat_name=None, **kwargs):
51 | """Create histogram plot comparing data to samples from posterior.
52 |
53 | Args:
54 | y_stats: float.
55 | Float representing statistic value of observed data.
56 | yrep_stats: np.ndarray.
57 | A 1-D NumPy array.
58 | stat_name: string.
59 | Optional string value for including statistic name in legend.
60 | **kwargs:
61 | Keyword arguments used by seaborn.distplot can be given to customize plot.
62 |
63 | Returns:
64 | matplotlib axes.
65 |
66 | #### Examples
67 |
68 | ```python
69 | import matplotlib.pyplot as plt
70 |
71 | # DATA
72 | x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])
73 |
74 | # MODEL
75 | p = Beta(1.0, 1.0)
76 | x = Bernoulli(probs=p, sample_shape=10)
77 |
78 | # INFERENCE
79 | qp = Beta(tf.nn.softplus(tf.Variable(tf.random_normal([]))),
80 | tf.nn.softplus(tf.Variable(tf.random_normal([]))))
81 |
82 | inference = ed.KLqp({p: qp}, data={x: x_data})
83 | inference.run(n_iter=500)
84 |
85 | # CRITICISM
86 | x_post = ed.copy(x, {p: qp})
87 | y_rep, y = ed.ppc(
88 | lambda xs, zs: tf.reduce_mean(tf.cast(xs[x_post], tf.float32)),
89 | data={x_post: x_data})
90 |
91 | ed.ppc_stat_hist_plot(
92 | y[0], y_rep, stat_name=r'$T \equiv$mean', bins=10)
93 | plt.show()
94 | ```
95 | """
96 | import matplotlib.pyplot as plt
97 | import seaborn as sns
98 | ax = sns.distplot(yrep_stats, kde=False, label=r'$T(y_{rep})$', **kwargs)
99 |
100 | max_value = ax.get_ylim()[1]
101 |
102 | plt.vlines(y_stats, ymin=0.0, ymax=max_value, label='T(y)')
103 |
104 | if stat_name is not None:
105 | plt.legend(title=stat_name)
106 | else:
107 | plt.legend()
108 |
109 | return ax
110 |
--------------------------------------------------------------------------------
/edward/inferences/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | """
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from edward.inferences.bigan_inference import *
8 | from edward.inferences.conjugacy import *
9 | from edward.inferences.gan_inference import *
10 | from edward.inferences.gibbs import *
11 | from edward.inferences.hmc import *
12 | from edward.inferences.implicit_klqp import *
13 | from edward.inferences.inference import *
14 | from edward.inferences.klpq import *
15 | from edward.inferences.klqp import *
16 | from edward.inferences.laplace import *
17 | from edward.inferences.map import *
18 | from edward.inferences.metropolis_hastings import *
19 | from edward.inferences.monte_carlo import *
20 | from edward.inferences.replica_exchange_mc import *
21 | from edward.inferences.sgld import *
22 | from edward.inferences.sghmc import *
23 | from edward.inferences.variational_inference import *
24 | from edward.inferences.wake_sleep import *
25 | from edward.inferences.wgan_inference import *
26 |
27 | from tensorflow.python.util.all_util import remove_undocumented
28 |
29 | _allowed_symbols = [
30 | 'BiGANInference',
31 | 'complete_conditional',
32 | 'GANInference',
33 | 'Gibbs',
34 | 'HMC',
35 | 'ImplicitKLqp',
36 | 'Inference',
37 | 'KLpq',
38 | 'KLqp',
39 | 'ReparameterizationKLqp',
40 | 'ReparameterizationKLKLqp',
41 | 'ReparameterizationEntropyKLqp',
42 | 'ReplicaExchangeMC',
43 | 'ScoreKLqp',
44 | 'ScoreKLKLqp',
45 | 'ScoreEntropyKLqp',
46 | 'ScoreRBKLqp',
47 | 'Laplace',
48 | 'MAP',
49 | 'MetropolisHastings',
50 | 'MonteCarlo',
51 | 'SGLD',
52 | 'SGHMC',
53 | 'VariationalInference',
54 | 'WakeSleep',
55 | 'WGANInference',
56 | ]
57 |
58 | remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
59 |
--------------------------------------------------------------------------------
/edward/inferences/conjugacy/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | """
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from edward.inferences.conjugacy.conjugacy import *
8 |
9 | from tensorflow.python.util.all_util import remove_undocumented
10 |
11 | _allowed_symbols = [
12 | 'complete_conditional',
13 | ]
14 |
15 | remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
16 |
--------------------------------------------------------------------------------
/edward/models/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | """
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from edward.models.dirichlet_process import *
8 | from edward.models.empirical import *
9 | from edward.models.param_mixture import *
10 | from edward.models.point_mass import *
11 | from edward.models.random_variable import RandomVariable
12 | from edward.models.random_variables import *
13 |
14 | from tensorflow.python.util.all_util import remove_undocumented
15 | from edward.models import random_variables as _module
16 |
17 | _allowed_symbols = [
18 | 'DirichletProcess',
19 | 'Empirical',
20 | 'ParamMixture',
21 | 'PointMass',
22 | 'RandomVariable',
23 | ]
24 | for name in dir(_module):
25 | obj = getattr(_module, name)
26 | if (isinstance(obj, type) and
27 | issubclass(obj, RandomVariable) and
28 | obj != RandomVariable):
29 | _allowed_symbols.append(name)
30 |
31 | remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
32 |
--------------------------------------------------------------------------------
/edward/models/random_variables.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import inspect as _inspect
6 |
7 | from edward.models.random_variable import RandomVariable as _RandomVariable
8 | from tensorflow.contrib import distributions as _distributions
9 |
10 | # Automatically generate random variable classes from classes in
11 | # tf.contrib.distributions.
12 | _globals = globals()
13 | for _name in sorted(dir(_distributions)):
14 | _candidate = getattr(_distributions, _name)
15 | if (_inspect.isclass(_candidate) and
16 | _candidate != _distributions.Distribution and
17 | issubclass(_candidate, _distributions.Distribution)):
18 |
19 | # to use _candidate's docstring, must write a new __init__ method
20 | def __init__(self, *args, **kwargs):
21 | _RandomVariable.__init__(self, *args, **kwargs)
22 | __init__.__doc__ = _candidate.__init__.__doc__
23 | _params = {'__doc__': _candidate.__doc__,
24 | '__init__': __init__}
25 | _globals[_name] = type(_name, (_RandomVariable, _candidate), _params)
26 |
27 | del _candidate
28 |
29 | # Add supports; these are used, e.g., in conjugacy.
30 | Bernoulli.support = 'binary'
31 | Beta.support = '01'
32 | Binomial.support = 'onehot'
33 | Categorical.support = 'categorical'
34 | Chi2.support = 'nonnegative'
35 | Dirichlet.support = 'simplex'
36 | Exponential.support = 'nonnegative'
37 | Gamma.support = 'nonnegative'
38 | InverseGamma.support = 'nonnegative'
39 | Laplace.support = 'real'
40 | Multinomial.support = 'onehot'
41 | MultivariateNormalDiag.support = 'multivariate_real'
42 | Normal.support = 'real'
43 | Poisson.support = 'countable'
44 |
45 | del absolute_import
46 | del division
47 | del print_function
48 |
--------------------------------------------------------------------------------
/edward/util/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | """
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from edward.util.graphs import *
8 | from edward.util.metrics import *
9 | from edward.util.progbar import *
10 | from edward.util.random_variables import *
11 | from edward.util.tensorflow import *
12 |
13 | from tensorflow.python.util.all_util import remove_undocumented
14 |
15 | _allowed_symbols = [
16 | 'check_data',
17 | 'check_latent_vars',
18 | 'compute_multinomial_mode',
19 | 'copy',
20 | 'dot',
21 | 'get_ancestors',
22 | 'get_blanket',
23 | 'get_children',
24 | 'get_control_variate_coef',
25 | 'get_descendants',
26 | 'get_parents',
27 | 'get_session',
28 | 'get_siblings',
29 | 'get_variables',
30 | 'is_independent',
31 | 'Progbar',
32 | 'random_variables',
33 | 'rbf',
34 | 'set_seed',
35 | 'to_simplex',
36 | 'transform',
37 | 'with_binary_averaging'
38 | ]
39 |
40 | remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
41 |
--------------------------------------------------------------------------------
/edward/util/graphs.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import six
7 | import sys
8 | import tensorflow as tf
9 |
10 | from edward.models.random_variable import _RANDOM_VARIABLE_COLLECTION
11 |
12 |
13 | def get_session():
14 | """Get the globally defined TensorFlow session.
15 |
16 | If the session is not already defined, then the function will create
17 | a global session.
18 |
19 | Returns:
20 | _ED_SESSION: tf.InteractiveSession.
21 | """
22 | global _ED_SESSION
23 | if tf.get_default_session() is None:
24 | _ED_SESSION = tf.InteractiveSession()
25 | else:
26 | _ED_SESSION = tf.get_default_session()
27 |
28 | save_stderr = sys.stderr
29 | try:
30 | import os
31 | sys.stderr = open(os.devnull, 'w') # suppress keras import
32 | from keras import backend as K
33 | sys.stderr = save_stderr
34 | have_keras = True
35 | except ImportError:
36 | sys.stderr = save_stderr
37 | have_keras = False
38 | if have_keras:
39 | K.set_session(_ED_SESSION)
40 |
41 | return _ED_SESSION
42 |
43 |
44 | def random_variables(graph=None):
45 | """Return all random variables in the TensorFlow graph.
46 |
47 | Args:
48 | graph: TensorFlow graph.
49 |
50 | Returns:
51 | list of RandomVariable.
52 | """
53 | if graph is None:
54 | graph = tf.get_default_graph()
55 |
56 | return _RANDOM_VARIABLE_COLLECTION[graph]
57 |
58 |
59 | def set_seed(x):
60 | """Set seed for both NumPy and TensorFlow.
61 |
62 | Args:
63 | x: int, float.
64 | seed
65 | """
66 | node_names = list(six.iterkeys(tf.get_default_graph()._nodes_by_name))
67 | if len(node_names) > 0 and node_names != ['keras_learning_phase']:
68 | raise RuntimeError("Seeding is not supported after initializing "
69 | "part of the graph. "
70 | "Please move set_seed to the beginning of your code.")
71 |
72 | np.random.seed(x)
73 | tf.set_random_seed(x)
74 |
--------------------------------------------------------------------------------
/edward/util/metrics.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | from functools import wraps
6 |
7 | import tensorflow as tf
8 |
9 |
10 | def with_binary_averaging(metric):
11 | """
12 | Inspired by scikit-learn's _average_binary_score function:
13 | https://github.com/scikit-learn/scikit-learn/blob/d9fdd8b0d1053cb47af8e3823b7a05279dd72054/sklearn/metrics/base.py#L23.
14 |
15 | `None`: computes the specified metric along the second-to-last
16 | dimension of `y_true` and `y_pred`. Returns a vector of "class-wise"
17 | metrics.
18 | `'macro'`: same as `None`, except compute the (unweighted) global
19 | average of the resulting vector.
20 | `'micro'`: flatten `y_true` and `y_pred` into vectors, then compute
21 | `'macro'`
22 | """
23 | AVERAGE_OPTIONS = (None, 'micro', 'macro')
24 |
25 | @wraps(metric)
26 | def with_binary_averaging(*args, **kwargs):
27 | y_true, y_pred = args
28 | y_true = tf.cast(y_true, tf.float32)
29 | y_pred = tf.cast(y_pred, tf.float32)
30 | if len(y_true.shape) < 2 and len(y_pred.shape) < 2:
31 | y_true = tf.expand_dims(y_true, 0)
32 | y_pred = tf.expand_dims(y_pred, 0)
33 |
34 | average = kwargs.get('average', 'macro')
35 | if average not in AVERAGE_OPTIONS:
36 | raise ValueError('average has to be one of {0}'
37 | ''.format(average_options))
38 | if average is None:
39 | return metric(y_true, y_pred)
40 | if average == 'macro':
41 | return tf.reduce_mean(metric(y_true, y_pred))
42 | if average == 'micro':
43 | y_true = tf.reshape(y_true, [1, -1])
44 | y_pred = tf.reshape(y_pred, [1, -1])
45 | return tf.reduce_mean(metric(y_true, y_pred))
46 | return with_binary_averaging
47 |
--------------------------------------------------------------------------------
/edward/version.py:
--------------------------------------------------------------------------------
1 | __version__ = '1.3.5'
2 | VERSION = __version__
3 |
--------------------------------------------------------------------------------
/examples/bayesian_logistic_regression.py:
--------------------------------------------------------------------------------
1 | """Bayesian logistic regression using Hamiltonian Monte Carlo.
2 |
3 | We visualize the fit.
4 | """
5 | from __future__ import absolute_import
6 | from __future__ import division
7 | from __future__ import print_function
8 |
9 | import edward as ed
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 | import tensorflow as tf
13 |
14 | from edward.models import Bernoulli, Normal, Empirical
15 |
16 | tf.flags.DEFINE_integer("N", default=40, help="Number of data points.")
17 | tf.flags.DEFINE_integer("D", default=1, help="Number of features.")
18 | tf.flags.DEFINE_integer("T", default=5000, help="Number of posterior samples.")
19 |
20 | FLAGS = tf.flags.FLAGS
21 |
22 |
23 | def build_toy_dataset(N, noise_std=0.1):
24 | D = 1
25 | X = np.linspace(-6, 6, num=N)
26 | y = np.tanh(X) + np.random.normal(0, noise_std, size=N)
27 | y[y < 0.5] = 0
28 | y[y >= 0.5] = 1
29 | X = (X - 4.0) / 4.0
30 | X = X.reshape((N, D))
31 | return X, y
32 |
33 |
34 | def main(_):
35 | ed.set_seed(42)
36 |
37 | # DATA
38 | X_train, y_train = build_toy_dataset(FLAGS.N)
39 |
40 | # MODEL
41 | X = tf.placeholder(tf.float32, [FLAGS.N, FLAGS.D])
42 | w = Normal(loc=tf.zeros(FLAGS.D), scale=3.0 * tf.ones(FLAGS.D))
43 | b = Normal(loc=tf.zeros([]), scale=3.0 * tf.ones([]))
44 | y = Bernoulli(logits=ed.dot(X, w) + b)
45 |
46 | # INFERENCE
47 | qw = Empirical(params=tf.get_variable("qw/params", [FLAGS.T, FLAGS.D]))
48 | qb = Empirical(params=tf.get_variable("qb/params", [FLAGS.T]))
49 |
50 | inference = ed.HMC({w: qw, b: qb}, data={X: X_train, y: y_train})
51 | inference.initialize(n_print=10, step_size=0.6)
52 |
53 | # Alternatively, use variational inference.
54 | # qw_loc = tf.get_variable("qw_loc", [FLAGS.D])
55 | # qw_scale = tf.nn.softplus(tf.get_variable("qw_scale", [FLAGS.D]))
56 | # qb_loc = tf.get_variable("qb_loc", []) + 10.0
57 | # qb_scale = tf.nn.softplus(tf.get_variable("qb_scale", []))
58 |
59 | # qw = Normal(loc=qw_loc, scale=qw_scale)
60 | # qb = Normal(loc=qb_loc, scale=qb_scale)
61 |
62 | # inference = ed.KLqp({w: qw, b: qb}, data={X: X_train, y: y_train})
63 | # inference.initialize(n_print=10, n_iter=600)
64 |
65 | tf.global_variables_initializer().run()
66 |
67 | # Set up figure.
68 | fig = plt.figure(figsize=(8, 8), facecolor='white')
69 | ax = fig.add_subplot(111, frameon=False)
70 | plt.ion()
71 | plt.show(block=False)
72 |
73 | # Build samples from inferred posterior.
74 | n_samples = 50
75 | inputs = np.linspace(-5, 3, num=400, dtype=np.float32).reshape((400, 1))
76 | probs = tf.stack([tf.sigmoid(ed.dot(inputs, qw.sample()) + qb.sample())
77 | for _ in range(n_samples)])
78 |
79 | for t in range(inference.n_iter):
80 | info_dict = inference.update()
81 | inference.print_progress(info_dict)
82 |
83 | if t % inference.n_print == 0:
84 | outputs = probs.eval()
85 |
86 | # Plot data and functions
87 | plt.cla()
88 | ax.plot(X_train[:], y_train, 'bx')
89 | for s in range(n_samples):
90 | ax.plot(inputs[:], outputs[s], alpha=0.2)
91 |
92 | ax.set_xlim([-5, 3])
93 | ax.set_ylim([-0.5, 1.5])
94 | plt.draw()
95 | plt.pause(1.0 / 60.0)
96 |
97 | if __name__ == "__main__":
98 | tf.app.run()
99 |
--------------------------------------------------------------------------------
/examples/beta_bernoulli.py:
--------------------------------------------------------------------------------
1 | """A simple coin flipping example. Inspired by Stan's toy example.
2 | """
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import edward as ed
8 | import matplotlib.pyplot as plt
9 | import numpy as np
10 | import tensorflow as tf
11 |
12 | from edward.models import Bernoulli, Beta, Empirical
13 |
14 |
15 | def main(_):
16 | ed.set_seed(42)
17 |
18 | # DATA
19 | x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])
20 |
21 | # MODEL
22 | p = Beta(1.0, 1.0)
23 | x = Bernoulli(probs=p, sample_shape=10)
24 |
25 | # INFERENCE
26 | qp = Empirical(params=tf.get_variable(
27 | "qp/params", [1000], initializer=tf.constant_initializer(0.5)))
28 |
29 | proposal_p = Beta(3.0, 9.0)
30 |
31 | inference = ed.MetropolisHastings({p: qp}, {p: proposal_p}, data={x: x_data})
32 | inference.run()
33 |
34 | # CRITICISM
35 | # exact posterior has mean 0.25 and std 0.12
36 | sess = ed.get_session()
37 | mean, stddev = sess.run([qp.mean(), qp.stddev()])
38 | print("Inferred posterior mean:")
39 | print(mean)
40 | print("Inferred posterior stddev:")
41 | print(stddev)
42 |
43 | x_post = ed.copy(x, {p: qp})
44 | tx_rep, tx = ed.ppc(
45 | lambda xs, zs: tf.reduce_mean(tf.cast(xs[x_post], tf.float32)),
46 | data={x_post: x_data})
47 | ed.ppc_stat_hist_plot(
48 | tx[0], tx_rep, stat_name=r'$T \equiv$mean', bins=10)
49 | plt.show()
50 |
51 | if __name__ == "__main__":
52 | tf.app.run()
53 |
--------------------------------------------------------------------------------
/examples/beta_bernoulli_conjugate.py:
--------------------------------------------------------------------------------
1 | """A simple coin flipping example that exploits conjugacy.
2 |
3 | Inspired by Stan's toy example.
4 | """
5 | from __future__ import absolute_import
6 | from __future__ import division
7 | from __future__ import print_function
8 |
9 | import edward as ed
10 | import numpy as np
11 | import six
12 | import tensorflow as tf
13 |
14 | from edward.models import Bernoulli, Beta
15 |
16 |
17 | def main(_):
18 | ed.set_seed(42)
19 |
20 | # DATA
21 | x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])
22 |
23 | # MODEL
24 | p = Beta(1.0, 1.0)
25 | x = Bernoulli(probs=p, sample_shape=10)
26 |
27 | # COMPLETE CONDITIONAL
28 | p_cond = ed.complete_conditional(p)
29 |
30 | sess = ed.get_session()
31 |
32 | print('p(probs | x) type:', p_cond.parameters['name'])
33 | param_vals = sess.run({key: val for
34 | key, val in six.iteritems(p_cond.parameters)
35 | if isinstance(val, tf.Tensor)}, {x: x_data})
36 | print('parameters:')
37 | for key, val in six.iteritems(param_vals):
38 | print('%s:\t%.3f' % (key, val))
39 |
40 | if __name__ == "__main__":
41 | tf.app.run()
42 |
--------------------------------------------------------------------------------
/examples/cox_process.py:
--------------------------------------------------------------------------------
1 | """A Cox process model for spatial analysis
2 | (Cox, 1955; Miller et al., 2014).
3 |
4 | The data set is a N x V matrix. There are N NBA players, X =
5 | {(x_1, ..., x_N)}, where each x_n has a set of V counts. x_{n, v} is
6 | the number of attempted basketball shots for the nth NBA player at
7 | location v.
8 |
9 | We model a latent intensity function for each data point. Let K be the
10 | N x V x V covariance matrix applied to the data set X with fixed
11 | kernel hyperparameters, where a slice K_n is the V x V covariance
12 | matrix over counts for a data point x_n.
13 |
14 | For n = 1, ..., N,
15 | p(f_n) = N(f_n | 0, K_n),
16 | p(x_n | f_n) = \prod_{v=1}^V p(x_{n,v} | f_{n,v}),
17 | where p(x_{n,v} | f_{n, v}) = Poisson(x_{n,v} | exp(f_{n,v})).
18 | """
19 | from __future__ import absolute_import
20 | from __future__ import division
21 | from __future__ import print_function
22 |
23 | import edward as ed
24 | import numpy as np
25 | import tensorflow as tf
26 |
27 | from edward.models import MultivariateNormalTriL, Normal, Poisson
28 | from edward.util import rbf
29 | from scipy.stats import multivariate_normal, poisson
30 |
31 | tf.flags.DEFINE_integer("N", default=308, help="Number of NBA players.")
32 | tf.flags.DEFINE_integer("V", default=2, help="Number of shot locations.")
33 |
34 | FLAGS = tf.flags.FLAGS
35 |
36 |
37 | def build_toy_dataset(N, V):
38 | """A simulator mimicking the data set from 2015-2016 NBA season with
39 | 308 NBA players and ~150,000 shots."""
40 | L = np.tril(np.random.normal(2.5, 0.1, size=[V, V]))
41 | K = np.matmul(L, L.T)
42 | x = np.zeros([N, V])
43 | for n in range(N):
44 | f_n = multivariate_normal.rvs(cov=K, size=1)
45 | for v in range(V):
46 | x[n, v] = poisson.rvs(mu=np.exp(f_n[v]), size=1)
47 |
48 | return x
49 |
50 |
51 | def main(_):
52 | ed.set_seed(42)
53 |
54 | # DATA
55 | x_data = build_toy_dataset(FLAGS.N, FLAGS.V)
56 |
57 | # MODEL
58 | x_ph = tf.placeholder(tf.float32, [FLAGS.N, FLAGS.V])
59 |
60 | # Form (N, V, V) covariance, one matrix per data point.
61 | K = tf.stack([rbf(tf.reshape(xn, [FLAGS.V, 1])) + tf.diag([1e-6, 1e-6])
62 | for xn in tf.unstack(x_ph)])
63 | f = MultivariateNormalTriL(loc=tf.zeros([FLAGS.N, FLAGS.V]),
64 | scale_tril=tf.cholesky(K))
65 | x = Poisson(rate=tf.exp(f))
66 |
67 | # INFERENCE
68 | qf = Normal(
69 | loc=tf.get_variable("qf/loc", [FLAGS.N, FLAGS.V]),
70 | scale=tf.nn.softplus(tf.get_variable("qf/scale", [FLAGS.N, FLAGS.V])))
71 |
72 | inference = ed.KLqp({f: qf}, data={x: x_data, x_ph: x_data})
73 | inference.run(n_iter=5000)
74 |
75 | if __name__ == "__main__":
76 | tf.app.run()
77 |
--------------------------------------------------------------------------------
/examples/dirichlet_categorical.py:
--------------------------------------------------------------------------------
1 | """Dirichlet-Categorical with variational inference."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import edward as ed
7 | import numpy as np
8 | import tensorflow as tf
9 |
10 | from edward.models import Categorical, Dirichlet
11 |
12 | tf.flags.DEFINE_integer("N", default=1000, help="")
13 | tf.flags.DEFINE_integer("K", default=4, help="")
14 |
15 | FLAGS = tf.flags.FLAGS
16 |
17 |
18 | def main(_):
19 | # DATA
20 | pi_true = np.random.dirichlet(np.array([20.0, 30.0, 10.0, 10.0]))
21 | z_data = np.array([np.random.choice(FLAGS.K, 1, p=pi_true)[0]
22 | for n in range(FLAGS.N)])
23 | print("pi: {}".format(pi_true))
24 |
25 | # MODEL
26 | pi = Dirichlet(tf.ones(4))
27 | z = Categorical(probs=pi, sample_shape=FLAGS.N)
28 |
29 | # INFERENCE
30 | qpi = Dirichlet(tf.nn.softplus(
31 | tf.get_variable("qpi/concentration", [FLAGS.K])))
32 |
33 | inference = ed.KLqp({pi: qpi}, data={z: z_data})
34 | inference.run(n_iter=1500, n_samples=30)
35 |
36 | sess = ed.get_session()
37 | print("Inferred pi: {}".format(sess.run(qpi.mean())))
38 |
39 | if __name__ == "__main__":
40 | tf.app.run()
41 |
--------------------------------------------------------------------------------
/examples/eight_schools/eight_schools.py:
--------------------------------------------------------------------------------
1 | """Implement the stan 8 schools example using the recommended non-centred
2 | parameterization.
3 |
4 | The Stan example is slightly modified to avoid improper priors and
5 | avoid half-Cauchy priors. Inference is with Edward using both HMC
6 | and KLQP.
7 |
8 | This model has a hierachy and an inferred variance - yet the example is
9 | very simple - only the Normal distribution is used.
10 |
11 | #### References
12 | https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started
13 | http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html
14 | """
15 |
16 | from __future__ import absolute_import
17 | from __future__ import division
18 | from __future__ import print_function
19 |
20 | import edward as ed
21 | import tensorflow as tf
22 | import numpy as np
23 | from edward.models import Normal, Empirical
24 |
25 |
26 | def main(_):
27 | # data
28 | J = 8
29 | data_y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
30 | data_sigma = np.array([15, 10, 16, 11, 9, 11, 10, 18])
31 |
32 | # model definition
33 | mu = Normal(0., 10.)
34 | logtau = Normal(5., 1.)
35 | theta_prime = Normal(tf.zeros(J), tf.ones(J))
36 | sigma = tf.placeholder(tf.float32, J)
37 | y = Normal(mu + tf.exp(logtau) * theta_prime, sigma * tf.ones([J]))
38 |
39 | data = {y: data_y, sigma: data_sigma}
40 |
41 | # ed.KLqp inference
42 | with tf.variable_scope('q_logtau'):
43 | q_logtau = Normal(tf.get_variable('loc', []),
44 | tf.nn.softplus(tf.get_variable('scale', [])))
45 |
46 | with tf.variable_scope('q_mu'):
47 | q_mu = Normal(tf.get_variable('loc', []),
48 | tf.nn.softplus(tf.get_variable('scale', [])))
49 |
50 | with tf.variable_scope('q_theta_prime'):
51 | q_theta_prime = Normal(tf.get_variable('loc', [J]),
52 | tf.nn.softplus(tf.get_variable('scale', [J])))
53 |
54 | inference = ed.KLqp({logtau: q_logtau, mu: q_mu,
55 | theta_prime: q_theta_prime}, data=data)
56 | inference.run(n_samples=15, n_iter=60000)
57 | print("==== ed.KLqp inference ====")
58 | print("E[mu] = %f" % (q_mu.mean().eval()))
59 | print("E[logtau] = %f" % (q_logtau.mean().eval()))
60 | print("E[theta_prime]=")
61 | print((q_theta_prime.mean().eval()))
62 | print("==== end ed.KLqp inference ====")
63 | print("")
64 | print("")
65 |
66 | # HMC inference
67 | S = 400000
68 | burn = S // 2
69 |
70 | hq_logtau = Empirical(tf.get_variable('hq_logtau', [S]))
71 | hq_mu = Empirical(tf.get_variable('hq_mu', [S]))
72 | hq_theta_prime = Empirical(tf.get_variable('hq_thetaprime', [S, J]))
73 |
74 | inference = ed.HMC({logtau: hq_logtau, mu: hq_mu,
75 | theta_prime: hq_theta_prime}, data=data)
76 | inference.run()
77 |
78 | print("==== ed.HMC inference ====")
79 | print("E[mu] = %f" % (hq_mu.params.eval()[burn:].mean()))
80 | print("E[logtau] = %f" % (hq_logtau.params.eval()[burn:].mean()))
81 | print("E[theta_prime]=")
82 | print(hq_theta_prime.params.eval()[burn:, ].mean(0))
83 | print("==== end ed.HMC inference ====")
84 | print("")
85 | print("")
86 |
87 |
88 | if __name__ == "__main__":
89 | tf.app.run()
90 |
--------------------------------------------------------------------------------
/examples/eight_schools/eight_schools.stan:
--------------------------------------------------------------------------------
1 | data {
2 | int J;
3 | real y[J];
4 | real sigma[J];
5 | }
6 |
7 | parameters {
8 | real mu;
9 | real logtau;
10 | real theta_prime[J];
11 | }
12 |
13 | model {
14 | mu ~ normal(0, 10);
15 | logtau ~ normal(5, 1);
16 | theta_prime ~ normal(0, 1);
17 | for (j in 1:J) {
18 | y[j] ~ normal(mu + exp(logtau) * theta_prime[j], sigma[j]);
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/examples/eight_schools/eight_schools_pystan.py:
--------------------------------------------------------------------------------
1 | """Implement the stan 8 schools example using the recommended non-centred
2 | parameterization.
3 |
4 | The Stan example is slightly modified to avoid improper priors and avoid
5 | half-Cauchy priors. Inference is with Stan using NUTS, pystan is required.
6 |
7 | This model has a hierachy and an inferred variance - yet the example is
8 | very simple - only the Normal distribution is used.
9 |
10 | #### References
11 | https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started
12 | http://mc-stan.org/users/documentation/case-studies/divergences_and_bias.html
13 | """
14 |
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 |
19 | import numpy as np
20 | import pystan
21 |
22 |
23 | def main():
24 | # data
25 | J = 8
26 | data_y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
27 | data_sigma = np.array([15, 10, 16, 11, 9, 11, 10, 18])
28 |
29 | standata = dict(J=J, y=data_y, sigma=data_sigma)
30 | fit = pystan.stan('eight_schools.stan', data=standata, iter=100000)
31 | print(fit)
32 |
33 | if __name__ == "__main__":
34 | main()
35 |
--------------------------------------------------------------------------------
/examples/factor_analysis.py:
--------------------------------------------------------------------------------
1 | """Logistic factor analysis on MNIST. Using Monte Carlo EM, with HMC
2 | for the E-step and MAP for the M-step. We fit to just one data
3 | point in MNIST.
4 | """
5 | from __future__ import absolute_import
6 | from __future__ import division
7 | from __future__ import print_function
8 |
9 | import edward as ed
10 | import os
11 | import tensorflow as tf
12 |
13 | from edward.models import Bernoulli, Empirical, Normal
14 | from observations import mnist
15 | from scipy.misc import imsave
16 |
17 | tf.flags.DEFINE_string("data_dir", default="/tmp/data", help="")
18 | tf.flags.DEFINE_string("out_dir", default="/tmp/out", help="")
19 | tf.flags.DEFINE_integer("N", default=1, help="Number of data points.")
20 | tf.flags.DEFINE_integer("d", default=10, help="Number of latent dimensions.")
21 | tf.flags.DEFINE_integer("n_iter_per_epoch", default=5000, help="")
22 | tf.flags.DEFINE_integer("n_epoch", default=20, help="")
23 |
24 | FLAGS = tf.flags.FLAGS
25 | if not os.path.exists(FLAGS.out_dir):
26 | os.makedirs(FLAGS.out_dir)
27 |
28 | FLAGS = tf.flags.FLAGS
29 |
30 |
31 | def generative_network(z):
32 | """Generative network to parameterize generative model. It takes
33 | latent variables as input and outputs the likelihood parameters.
34 |
35 | logits = neural_network(z)
36 | """
37 | net = tf.layers.dense(z, 28 * 28, activation=None)
38 | net = tf.reshape(net, [FLAGS.N, -1])
39 | return net
40 |
41 |
42 | def main(_):
43 | ed.set_seed(42)
44 |
45 | # DATA
46 | (x_train, _), (x_test, _) = mnist(FLAGS.data_dir)
47 | x_train = x_train[:FLAGS.N]
48 |
49 | # MODEL
50 | z = Normal(loc=tf.zeros([FLAGS.N, FLAGS.d]),
51 | scale=tf.ones([FLAGS.N, FLAGS.d]))
52 | logits = generative_network(z)
53 | x = Bernoulli(logits=logits)
54 |
55 | # INFERENCE
56 | T = FLAGS.n_iter_per_epoch * FLAGS.n_epoch
57 | qz = Empirical(params=tf.get_variable("qz/params", [T, FLAGS.N, FLAGS.d]))
58 |
59 | inference_e = ed.HMC({z: qz}, data={x: x_train})
60 | inference_e.initialize()
61 |
62 | inference_m = ed.MAP(data={x: x_train, z: qz.params[inference_e.t]})
63 | optimizer = tf.train.AdamOptimizer(0.01, epsilon=1.0)
64 | inference_m.initialize(optimizer=optimizer)
65 |
66 | tf.global_variables_initializer().run()
67 |
68 | for _ in range(FLAGS.n_epoch - 1):
69 | avg_loss = 0.0
70 | for _ in range(FLAGS.n_iter_per_epoch):
71 | info_dict_e = inference_e.update()
72 | info_dict_m = inference_m.update()
73 | avg_loss += info_dict_m['loss']
74 | inference_e.print_progress(info_dict_e)
75 |
76 | # Print a lower bound to the average marginal likelihood for an
77 | # image.
78 | avg_loss = avg_loss / FLAGS.n_iter_per_epoch
79 | avg_loss = avg_loss / FLAGS.N
80 | print("\nlog p(x) >= {:0.3f}".format(avg_loss))
81 |
82 | # Prior predictive check.
83 | images = x.eval()
84 | for m in range(FLAGS.N):
85 | imsave(os.path.join(FLAGS.out_dir, '%d.png') % m,
86 | images[m].reshape(28, 28))
87 |
88 | if __name__ == "__main__":
89 | tf.app.run()
90 |
--------------------------------------------------------------------------------
/examples/invgamma_normal_mh.py:
--------------------------------------------------------------------------------
1 | """InverseGamma-Normal with Metropolis-Hastings."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import edward as ed
7 | import numpy as np
8 | import tensorflow as tf
9 |
10 | from edward.models import InverseGamma, Normal, Empirical
11 |
12 | tf.flags.DEFINE_integer("N", default=1000, help="Number of data points.")
13 | tf.flags.DEFINE_float("loc", default=7.0, help="")
14 | tf.flags.DEFINE_float("scale", default=0.7, help="")
15 |
16 | FLAGS = tf.flags.FLAGS
17 |
18 |
19 | def main(_):
20 | # Data generation (known mean)
21 | xn_data = np.random.normal(FLAGS.loc, FLAGS.scale, FLAGS.N)
22 | print("scale: {}".format(FLAGS.scale))
23 |
24 | # Prior definition
25 | alpha = 0.5
26 | beta = 0.7
27 |
28 | # Posterior inference
29 | # Probabilistic model
30 | ig = InverseGamma(alpha, beta)
31 | xn = Normal(FLAGS.loc, tf.sqrt(ig), sample_shape=FLAGS.N)
32 |
33 | # Inference
34 | qig = Empirical(params=tf.get_variable(
35 | "qig/params", [1000], initializer=tf.constant_initializer(0.5)))
36 | proposal_ig = InverseGamma(2.0, 2.0)
37 | inference = ed.MetropolisHastings({ig: qig},
38 | {ig: proposal_ig}, data={xn: xn_data})
39 | inference.run()
40 |
41 | sess = ed.get_session()
42 | print("Inferred scale: {}".format(sess.run(tf.sqrt(qig.mean()))))
43 |
44 | if __name__ == "__main__":
45 | tf.app.run()
46 |
--------------------------------------------------------------------------------
/examples/normal.py:
--------------------------------------------------------------------------------
1 | """Correlated normal posterior. Inference with Hamiltonian Monte Carlo.
2 | """
3 | from __future__ import absolute_import
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import edward as ed
8 | import numpy as np
9 | import tensorflow as tf
10 |
11 | from matplotlib import pyplot as plt
12 | from edward.models import Empirical, MultivariateNormalTriL
13 |
14 |
15 | def mvn_plot_contours(z, label=False, ax=None):
16 | """Plot the contours of 2-d Normal or MultivariateNormal object.
17 | Scale the axes to show 3 standard deviations.
18 | """
19 | sess = ed.get_session()
20 | mu = sess.run(z.parameters['loc'])
21 | mu_x, mu_y = mu
22 | Sigma = sess.run(z.parameters['scale_tril'])
23 | sigma_x, sigma_y = np.sqrt(Sigma[0, 0]), np.sqrt(Sigma[1, 1])
24 | xmin, xmax = mu_x - 3 * sigma_x, mu_x + 3 * sigma_x
25 | ymin, ymax = mu_y - 3 * sigma_y, mu_y + 3 * sigma_y
26 | xs = np.linspace(xmin, xmax, num=100)
27 | ys = np.linspace(ymin, ymax, num=100)
28 | X, Y = np.meshgrid(xs, ys)
29 | T = tf.cast(np.c_[X.flatten(), Y.flatten()], dtype=tf.float32)
30 | Z = sess.run(tf.exp(z.log_prob(T))).reshape((len(xs), len(ys)))
31 | if ax is None:
32 | fig, ax = plt.subplots()
33 | cs = ax.contour(X, Y, Z)
34 | if label:
35 | plt.clabel(cs, inline=1, fontsize=10)
36 |
37 |
38 | def main(_):
39 | ed.set_seed(42)
40 |
41 | # MODEL
42 | z = MultivariateNormalTriL(
43 | loc=tf.ones(2),
44 | scale_tril=tf.cholesky(tf.constant([[1.0, 0.8], [0.8, 1.0]])))
45 |
46 | # INFERENCE
47 | qz = Empirical(params=tf.get_variable("qz/params", [1000, 2]))
48 |
49 | inference = ed.HMC({z: qz})
50 | inference.run()
51 |
52 | # CRITICISM
53 | sess = ed.get_session()
54 | mean, stddev = sess.run([qz.mean(), qz.stddev()])
55 | print("Inferred posterior mean:")
56 | print(mean)
57 | print("Inferred posterior stddev:")
58 | print(stddev)
59 |
60 | fig, ax = plt.subplots()
61 | trace = sess.run(qz.params)
62 | ax.scatter(trace[:, 0], trace[:, 1], marker=".")
63 | mvn_plot_contours(z, ax=ax)
64 | plt.show()
65 |
66 | if __name__ == "__main__":
67 | plt.style.use("ggplot")
68 | tf.app.run()
69 |
--------------------------------------------------------------------------------
/examples/normal_normal.py:
--------------------------------------------------------------------------------
1 | """Normal-normal model using Hamiltonian Monte Carlo."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import edward as ed
7 | import matplotlib.pyplot as plt
8 | import numpy as np
9 | import tensorflow as tf
10 |
11 | from edward.models import Empirical, Normal
12 |
13 |
14 | def main(_):
15 | ed.set_seed(42)
16 |
17 | # DATA
18 | x_data = np.array([0.0] * 50)
19 |
20 | # MODEL: Normal-Normal with known variance
21 | mu = Normal(loc=0.0, scale=1.0)
22 | x = Normal(loc=mu, scale=1.0, sample_shape=50)
23 |
24 | # INFERENCE
25 | qmu = Empirical(params=tf.get_variable("qmu/params", [1000],
26 | initializer=tf.zeros_initializer()))
27 |
28 | # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
29 | inference = ed.HMC({mu: qmu}, data={x: x_data})
30 | inference.run()
31 |
32 | # CRITICISM
33 | sess = ed.get_session()
34 | mean, stddev = sess.run([qmu.mean(), qmu.stddev()])
35 | print("Inferred posterior mean:")
36 | print(mean)
37 | print("Inferred posterior stddev:")
38 | print(stddev)
39 |
40 | # Check convergence with visual diagnostics.
41 | samples = sess.run(qmu.params)
42 |
43 | # Plot histogram.
44 | plt.hist(samples, bins='auto')
45 | plt.show()
46 |
47 | # Trace plot.
48 | plt.plot(samples)
49 | plt.show()
50 |
51 | if __name__ == "__main__":
52 | tf.app.run()
53 |
--------------------------------------------------------------------------------
/examples/normal_sgld.py:
--------------------------------------------------------------------------------
1 | """Correlated normal posterior. Inference with stochastic gradient
2 | Langevin dynamics.
3 | """
4 | from __future__ import absolute_import
5 | from __future__ import division
6 | from __future__ import print_function
7 |
8 | import edward as ed
9 | import tensorflow as tf
10 |
11 | from edward.models import Empirical, MultivariateNormalTriL
12 |
13 |
14 | def main(_):
15 | ed.set_seed(42)
16 |
17 | # MODEL
18 | z = MultivariateNormalTriL(
19 | loc=tf.ones(2),
20 | scale_tril=tf.cholesky(tf.constant([[1.0, 0.8], [0.8, 1.0]])))
21 |
22 | # INFERENCE
23 | qz = Empirical(params=tf.get_variable("qz/params", [2000, 2]))
24 |
25 | inference = ed.SGLD({z: qz})
26 | inference.run(step_size=5.0)
27 |
28 | # CRITICISM
29 | sess = ed.get_session()
30 | mean, stddev = sess.run([qz.mean(), qz.stddev()])
31 | print("Inferred posterior mean:")
32 | print(mean)
33 | print("Inferred posterior stddev:")
34 | print(stddev)
35 |
36 | if __name__ == "__main__":
37 | tf.app.run()
38 |
--------------------------------------------------------------------------------
/examples/pp_dirichlet_process.py:
--------------------------------------------------------------------------------
1 | """Dirichlet process.
2 |
3 | We implement sample generation from a Dirichlet process (with no base
4 | distribution) via its stick breaking construction. It is a streamlined
5 | implementation of the `DirichletProcess` random variable in Edward.
6 |
7 | References
8 | ----------
9 | https://probmods.org/chapters/12-non-parametric-models.html#infinite-discrete-distributions-the-dirichlet-processes
10 | """
11 | from __future__ import absolute_import
12 | from __future__ import division
13 | from __future__ import print_function
14 |
15 | import matplotlib.pyplot as plt
16 | import tensorflow as tf
17 |
18 | from edward.models import Bernoulli, Beta, DirichletProcess, Exponential, Normal
19 |
20 |
21 | def dirichlet_process(alpha):
22 | """Demo of stochastic while loop for stick breaking construction."""
23 | def cond(k, beta_k):
24 | # End while loop (return False) when flip is heads.
25 | flip = Bernoulli(beta_k)
26 | return tf.cast(1 - flip, tf.bool)
27 |
28 | def body(k, beta_k):
29 | beta_k = Beta(1.0, alpha)
30 | return k + 1, beta_k
31 |
32 | k = tf.constant(0)
33 | beta_k = Beta(1.0, alpha)
34 | stick_num, stick_beta = tf.while_loop(cond, body, loop_vars=[k, beta_k])
35 | return stick_num
36 |
37 |
38 | def main(_):
39 | dp = dirichlet_process(10.0)
40 |
41 | # The number of sticks broken is dynamic, changing across evaluations.
42 | sess = tf.Session()
43 | print(sess.run(dp))
44 | print(sess.run(dp))
45 |
46 | # Demo of the DirichletProcess random variable in Edward.
47 | base = Normal(0.0, 1.0)
48 |
49 | # Highly concentrated DP.
50 | alpha = 1.0
51 | dp = DirichletProcess(alpha, base)
52 | x = dp.sample(1000)
53 | samples = sess.run(x)
54 | plt.hist(samples, bins=100, range=(-3.0, 3.0))
55 | plt.title("DP({0}, N(0, 1))".format(alpha))
56 | plt.show()
57 |
58 | # More spread out DP.
59 | alpha = 50.0
60 | dp = DirichletProcess(alpha, base)
61 | x = dp.sample(1000)
62 | samples = sess.run(x)
63 | plt.hist(samples, bins=100, range=(-3.0, 3.0))
64 | plt.title("DP({0}, N(0, 1))".format(alpha))
65 | plt.show()
66 |
67 | # States persist across calls to sample() in a DP.
68 | alpha = 1.0
69 | dp = DirichletProcess(alpha, base)
70 | x = dp.sample(50)
71 | y = dp.sample(75)
72 | samples_x, samples_y = sess.run([x, y])
73 | plt.subplot(211)
74 | plt.hist(samples_x, bins=100, range=(-3.0, 3.0))
75 | plt.title("DP({0}, N(0, 1)) across two calls to sample()".format(alpha))
76 | plt.subplot(212)
77 | plt.hist(samples_y, bins=100, range=(-3.0, 3.0))
78 | plt.show()
79 |
80 | # `theta` is the distribution indirectly returned by the DP.
81 | # Fetching theta is the same as fetching the Dirichlet process.
82 | dp = DirichletProcess(alpha, base)
83 | theta = Normal(0.0, 1.0, value=tf.cast(dp, tf.float32))
84 | print(sess.run([dp, theta]))
85 | print(sess.run([dp, theta]))
86 |
87 | # DirichletProcess can also take in non-scalar concentrations and bases.
88 | alpha = tf.constant([0.1, 0.6, 0.4])
89 | base = Exponential(rate=tf.ones([5, 2]))
90 | dp = DirichletProcess(alpha, base)
91 | print(dp)
92 |
93 | if __name__ == "__main__":
94 | plt.style.use('ggplot')
95 | tf.app.run()
96 |
--------------------------------------------------------------------------------
/examples/pp_dynamic_shape.py:
--------------------------------------------------------------------------------
1 | """Dynamic shapes.
2 |
3 | We build a random variable whose size depends on a sample from another
4 | random variable.
5 | """
6 | from __future__ import absolute_import
7 | from __future__ import division
8 | from __future__ import print_function
9 |
10 | import edward as ed
11 | import tensorflow as tf
12 |
13 | from edward.models import Exponential, Dirichlet, Gamma
14 |
15 |
16 | def main(_):
17 | ed.set_seed(42)
18 |
19 | # Prior on scalar hyperparameter to Dirichlet.
20 | alpha = Gamma(1.0, 1.0)
21 |
22 | # Prior on size of Dirichlet.
23 | n = 1 + tf.cast(Exponential(0.5), tf.int32)
24 |
25 | # Build a vector of ones whose size is n; multiply it by alpha.
26 | p = Dirichlet(tf.ones([n]) * alpha)
27 |
28 | sess = ed.get_session()
29 | print(sess.run(p))
30 | # [ 0.01012419 0.02939712 0.05036638 0.51287931 0.31020424 0.0485355
31 | # 0.0384932 ]
32 | print(sess.run(p))
33 | # [ 0.12836078 0.23335715 0.63828212]
34 |
35 | if __name__ == "__main__":
36 | tf.app.run()
37 |
--------------------------------------------------------------------------------
/examples/pp_persistent_randomness.py:
--------------------------------------------------------------------------------
1 | """Persistent randomness.
2 |
3 | Our language defines random variables. They enable memoization in the
4 | sense that the generative process of any values which depend on the
5 | same random variable will be generated conditioned on the same samples.
6 | Simulating the world multiple times (i.e., fetching the value out of
7 | session) results in new memoized values. To avoid persistent
8 | randomness, simply define another random variable to work with.
9 |
10 | References
11 | ----------
12 | https://probmods.org/chapters/02-generative-models.html#persistent-randomness-mem
13 | """
14 | import edward as ed
15 | import tensorflow as tf
16 |
17 | from edward.models import Categorical
18 |
19 |
20 | def eye_color(person):
21 | random_variables = {x.name: x for x in ed.random_variables()}
22 | if person + '/' in random_variables:
23 | return random_variables[person + '/']
24 | else:
25 | return Categorical(probs=tf.ones(3) / 3, name=person)
26 |
27 |
28 | def main(_):
29 | # Only two categorical random variables are created.
30 | eye_color('bob')
31 | eye_color('alice')
32 | eye_color('bob')
33 |
34 | if __name__ == "__main__":
35 | tf.app.run()
36 |
--------------------------------------------------------------------------------
/examples/pp_stochastic_control_flow.py:
--------------------------------------------------------------------------------
1 | """Stochastic control flow.
2 |
3 | We sample from a geometric random variable by using samples from
4 | Bernoulli random variables. It requires a while loop whose condition
5 | is stochastic.
6 |
7 | References
8 | ----------
9 | https://probmods.org/chapters/02-generative-models.html#stochastic-recursion
10 | """
11 | from __future__ import absolute_import
12 | from __future__ import division
13 | from __future__ import print_function
14 |
15 | import matplotlib.pyplot as plt
16 | import tensorflow as tf
17 |
18 | from edward.models import Bernoulli
19 |
20 |
21 | def geometric(p):
22 | i = tf.constant(0)
23 | sample = tf.while_loop(
24 | cond=lambda i: tf.cast(1 - Bernoulli(probs=p), tf.bool),
25 | body=lambda i: i + 1,
26 | loop_vars=[i])
27 | return sample
28 |
29 |
30 | def main(_):
31 | p = 0.1
32 | geom = geometric(p)
33 |
34 | sess = tf.Session()
35 | samples = [sess.run(geom) for _ in range(1000)]
36 | plt.hist(samples, bins='auto')
37 | plt.title("Geometric({0})".format(p))
38 | plt.show()
39 |
40 | if __name__ == "__main__":
41 | tf.app.run()
42 |
--------------------------------------------------------------------------------
/examples/pp_stochastic_recursion.py:
--------------------------------------------------------------------------------
1 | """Stochastic recursion.
2 |
3 | We sample from a geometric random variable by using samples from
4 | Bernoulli random variable. It uses a recursive function and requires
5 | lazy evaluation of the condition.
6 |
7 | Recursion is not available in TensorFlow and so neither is stochastic
8 | recursion available in Edward's modeling language. There are several
9 | alternatives: (stochastic) while loops, wrapping around a Python
10 | implementation (`tf.py_func`), and a CPS-style formulation.
11 |
12 | References
13 | ----------
14 | https://probmods.org/chapters/02-generative-models.html#stochastic-recursion
15 | """
16 | from __future__ import absolute_import
17 | from __future__ import division
18 | from __future__ import print_function
19 |
20 | import matplotlib.pyplot as plt
21 | import tensorflow as tf
22 |
23 | from edward.models import Bernoulli
24 |
25 |
26 | def geometric(p):
27 | cond = tf.cast(Bernoulli(probs=p), tf.bool)
28 |
29 | def fn1():
30 | return tf.constant(0)
31 |
32 | def fn2():
33 | return geometric(p) + 1
34 |
35 | # TensorFlow builds the op non-lazily, unrolling both functions
36 | # before it checks the condition. This makes this function fail.
37 | return tf.cond(cond, fn1, fn2)
38 |
39 |
40 | def main(_):
41 | p = tf.constant(0.9)
42 | geom = geometric(p)
43 |
44 | sess = tf.Session()
45 | samples = [sess.run(geom) for _ in range(1000)]
46 | plt.hist(samples, bins='auto')
47 |
48 | if __name__ == "__main__":
49 | tf.app.run()
50 |
--------------------------------------------------------------------------------
/examples/probabilistic_matrix_factorization.py:
--------------------------------------------------------------------------------
1 | """Probabilistic matrix factorization using variational inference.
2 |
3 | Visualizes the actual and the estimated rating matrices as heatmaps.
4 | """
5 | from __future__ import absolute_import
6 | from __future__ import division
7 | from __future__ import print_function
8 |
9 | import edward as ed
10 | import matplotlib.pyplot as plt
11 | import numpy as np
12 | import tensorflow as tf
13 |
14 | from edward.models import Normal
15 |
16 | tf.flags.DEFINE_integer("N", default=50, help="Number of users.")
17 | tf.flags.DEFINE_integer("M", default=60, help="Number of movies.")
18 | tf.flags.DEFINE_integer("D", default=3, help="Number of latent factors.")
19 |
20 | FLAGS = tf.flags.FLAGS
21 |
22 |
23 | def build_toy_dataset(U, V, N, M, noise_std=0.1):
24 | R = np.dot(np.transpose(U), V) + np.random.normal(0, noise_std, size=(N, M))
25 | return R
26 |
27 |
28 | def get_indicators(N, M, prob_std=0.5):
29 | ind = np.random.binomial(1, prob_std, (N, M))
30 | return ind
31 |
32 |
33 | def main(_):
34 | # true latent factors
35 | U_true = np.random.randn(FLAGS.D, FLAGS.N)
36 | V_true = np.random.randn(FLAGS.D, FLAGS.M)
37 |
38 | # DATA
39 | R_true = build_toy_dataset(U_true, V_true, FLAGS.N, FLAGS.M)
40 | I_train = get_indicators(FLAGS.N, FLAGS.M)
41 | I_test = 1 - I_train
42 |
43 | # MODEL
44 | I = tf.placeholder(tf.float32, [FLAGS.N, FLAGS.M])
45 | U = Normal(loc=0.0, scale=1.0, sample_shape=[FLAGS.D, FLAGS.N])
46 | V = Normal(loc=0.0, scale=1.0, sample_shape=[FLAGS.D, FLAGS.M])
47 | R = Normal(loc=tf.matmul(tf.transpose(U), V) * I,
48 | scale=tf.ones([FLAGS.N, FLAGS.M]))
49 |
50 | # INFERENCE
51 | qU = Normal(loc=tf.get_variable("qU/loc", [FLAGS.D, FLAGS.N]),
52 | scale=tf.nn.softplus(
53 | tf.get_variable("qU/scale", [FLAGS.D, FLAGS.N])))
54 | qV = Normal(loc=tf.get_variable("qV/loc", [FLAGS.D, FLAGS.M]),
55 | scale=tf.nn.softplus(
56 | tf.get_variable("qV/scale", [FLAGS.D, FLAGS.M])))
57 |
58 | inference = ed.KLqp({U: qU, V: qV}, data={R: R_true, I: I_train})
59 | inference.run()
60 |
61 | # CRITICISM
62 | qR = Normal(loc=tf.matmul(tf.transpose(qU), qV),
63 | scale=tf.ones([FLAGS.N, FLAGS.M]))
64 |
65 | print("Mean squared error on test data:")
66 | print(ed.evaluate('mean_squared_error', data={qR: R_true, I: I_test}))
67 |
68 | plt.imshow(R_true, cmap='hot')
69 | plt.show()
70 |
71 | R_est = tf.matmul(tf.transpose(qU), qV).eval()
72 | plt.imshow(R_est, cmap='hot')
73 | plt.show()
74 |
75 | if __name__ == "__main__":
76 | tf.app.run()
77 |
--------------------------------------------------------------------------------
/examples/rasch_model.py:
--------------------------------------------------------------------------------
1 | """Rasch model (Rasch, 1960)."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import edward as ed
7 | import matplotlib.pyplot as plt
8 | import numpy as np
9 | import tensorflow as tf
10 |
11 | from edward.models import Bernoulli, Normal, Empirical
12 | from scipy.special import expit
13 |
14 | tf.flags.DEFINE_integer("nsubj", default=200, help="")
15 | tf.flags.DEFINE_integer("nitem", default=25, help="")
16 | tf.flags.DEFINE_integer("T", default=5000, help="Number of posterior samples.")
17 |
18 | FLAGS = tf.flags.FLAGS
19 |
20 |
21 | def main(_):
22 | # DATA
23 | trait_true = np.random.normal(size=[FLAGS.nsubj, 1])
24 | thresh_true = np.random.normal(size=[1, FLAGS.nitem])
25 | X_data = np.random.binomial(1, expit(trait_true - thresh_true))
26 |
27 | # MODEL
28 | trait = Normal(loc=0.0, scale=1.0, sample_shape=[FLAGS.nsubj, 1])
29 | thresh = Normal(loc=0.0, scale=1.0, sample_shape=[1, FLAGS.nitem])
30 | X = Bernoulli(logits=trait - thresh)
31 |
32 | # INFERENCE
33 | q_trait = Empirical(params=tf.get_variable("q_trait/params",
34 | [FLAGS.T, FLAGS.nsubj, 1]))
35 | q_thresh = Empirical(params=tf.get_variable("q_thresh/params",
36 | [FLAGS.T, 1, FLAGS.nitem]))
37 |
38 | inference = ed.HMC({trait: q_trait, thresh: q_thresh}, data={X: X_data})
39 | inference.run(step_size=0.1)
40 |
41 | # Alternatively, use variational inference.
42 | # q_trait = Normal(
43 | # loc=tf.get_variable("q_trait/loc", [FLAGS.nsubj, 1]),
44 | # scale=tf.nn.softplus(
45 | # tf.get_variable("q_trait/scale", [FLAGS.nsubj, 1])))
46 | # q_thresh = Normal(
47 | # loc=tf.get_variable("q_thresh/loc", [1, FLAGS.nitem]),
48 | # scale=tf.nn.softplus(
49 | # tf.get_variable("q_thresh/scale", [1, FLAGS.nitem])))
50 |
51 | # inference = ed.KLqp({trait: q_trait, thresh: q_thresh}, data={X: X_data})
52 | # inference.run(n_iter=2500, n_samples=10)
53 |
54 | # CRITICISM
55 | # Check that the inferred posterior mean captures the true traits.
56 | plt.scatter(trait_true, q_trait.mean().eval())
57 | plt.show()
58 |
59 | print("MSE between true traits and inferred posterior mean:")
60 | print(np.mean(np.square(trait_true - q_trait.mean().eval())))
61 |
62 | if __name__ == "__main__":
63 | tf.app.run()
64 |
--------------------------------------------------------------------------------
/examples/stochastic_block_model.py:
--------------------------------------------------------------------------------
1 | """Stochastic block model."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import edward as ed
7 | import numpy as np
8 | import tensorflow as tf
9 |
10 | from edward.models import Bernoulli, Multinomial, Beta, Dirichlet, PointMass
11 | from observations import karate
12 | from sklearn.metrics.cluster import adjusted_rand_score
13 |
14 |
15 | def main(_):
16 | ed.set_seed(42)
17 |
18 | # DATA
19 | X_data, Z_true = karate("~/data")
20 | N = X_data.shape[0] # number of vertices
21 | K = 2 # number of clusters
22 |
23 | # MODEL
24 | gamma = Dirichlet(concentration=tf.ones([K]))
25 | Pi = Beta(concentration0=tf.ones([K, K]), concentration1=tf.ones([K, K]))
26 | Z = Multinomial(total_count=1.0, probs=gamma, sample_shape=N)
27 | X = Bernoulli(probs=tf.matmul(Z, tf.matmul(Pi, tf.transpose(Z))))
28 |
29 | # INFERENCE (EM algorithm)
30 | qgamma = PointMass(tf.nn.softmax(tf.get_variable("qgamma/params", [K])))
31 | qPi = PointMass(tf.nn.sigmoid(tf.get_variable("qPi/params", [K, K])))
32 | qZ = PointMass(tf.nn.softmax(tf.get_variable("qZ/params", [N, K])))
33 |
34 | inference = ed.MAP({gamma: qgamma, Pi: qPi, Z: qZ}, data={X: X_data})
35 | inference.initialize(n_iter=250)
36 |
37 | tf.global_variables_initializer().run()
38 |
39 | for _ in range(inference.n_iter):
40 | info_dict = inference.update()
41 | inference.print_progress(info_dict)
42 |
43 | # CRITICISM
44 | Z_pred = qZ.mean().eval().argmax(axis=1)
45 | print("Result (label flip can happen):")
46 | print("Predicted")
47 | print(Z_pred)
48 | print("True")
49 | print(Z_true)
50 | print("Adjusted Rand Index =", adjusted_rand_score(Z_pred, Z_true))
51 |
52 | if __name__ == "__main__":
53 | tf.app.run()
54 |
--------------------------------------------------------------------------------
/notebooks/data/insteval_dept_ranefs_r.csv:
--------------------------------------------------------------------------------
1 | "","(Intercept)","dept_id"
2 | "15",0.0172105596050927,"15"
3 | "5",0.0504985814356492,"5"
4 | "10",-0.124354155843624,"10"
5 | "12",0.0180897575241262,"12"
6 | "6",-0.0665685935570926,"6"
7 | "7",0.0376324771800788,"7"
8 | "4",0.0818520204766247,"4"
9 | "8",0.113196959365669,"8"
10 | "9",-0.0369171351929634,"9"
11 | "14",-0.0320131093679916,"14"
12 | "1",0.0251619667636418,"1"
13 | "3",0.0281748539677167,"3"
14 | "11",-0.0752573476292343,"11"
15 | "2",-0.0367068346772158,"2"
16 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
3 |
4 | [pep8]
5 | # Skip errors and warnings
6 | # E111 indentation is not a multiple of four
7 | # E114 indentation is not a multiple of four (comment)
8 | ignore=E111,E114
9 |
10 | # Set maximum allowed line length (default: 79)
11 | max-line-length=80
12 |
13 | [tool:pytest]
14 | pep8ignore = E111 E114
15 | pep8maxlinelength = 80
16 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | # import ``__version__`` from code base
4 | exec(open('edward/version.py').read())
5 |
6 | setup(
7 | name='edward',
8 | version=__version__,
9 | description='A library for probabilistic modeling, inference, and '
10 | 'criticism',
11 | author='Dustin Tran',
12 | author_email="dustin@cs.columbia.edu",
13 | packages=['edward', 'edward.criticisms', 'edward.inferences',
14 | 'edward.models', 'edward.util', 'edward.inferences.conjugacy'],
15 | install_requires=['numpy>=1.7',
16 | 'six>=1.10.0'],
17 | extras_require={
18 | 'tensorflow': ['tensorflow>=1.2.0rc0'],
19 | 'tensorflow with gpu': ['tensorflow-gpu>=1.2.0rc0'],
20 | 'neural networks': ['keras>=2.0.0', 'prettytensor>=0.7.4'],
21 | 'datasets': ['observations>=0.1.2'],
22 | 'notebooks': ['jupyter>=1.0.0'],
23 | 'visualization': ['matplotlib>=1.3',
24 | 'pillow>=3.4.2',
25 | 'seaborn>=0.3.1']},
26 | tests_require=['pytest', 'pytest-pep8'],
27 | url='http://edwardlib.org',
28 | keywords='machine learning statistics probabilistic programming tensorflow',
29 | license='Apache License 2.0',
30 | classifiers=['Intended Audience :: Developers',
31 | 'Intended Audience :: Education',
32 | 'Intended Audience :: Science/Research',
33 | 'License :: OSI Approved :: Apache Software License',
34 | 'Operating System :: POSIX :: Linux',
35 | 'Operating System :: MacOS :: MacOS X',
36 | 'Operating System :: Microsoft :: Windows',
37 | 'Programming Language :: Python :: 2.7',
38 | 'Programming Language :: Python :: 3.4'],
39 | )
40 |
--------------------------------------------------------------------------------
/tests/criticisms/ppc_plots_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 |
10 | class test_ppc_plots_class(tf.test.TestCase):
11 |
12 | def test_ppc_density_plot(self):
13 | y = np.random.randn(20)
14 | y_rep = np.random.randn(20, 20)
15 |
16 | ed.ppc_density_plot(y, y_rep)
17 |
18 | def test_ppc_stat_hist_plot(self):
19 | y = np.random.randn(20)
20 | t = 0.0
21 |
22 | ed.ppc_stat_hist_plot(t, y, stat_name="mean", bins=10)
23 |
24 | if __name__ == '__main__':
25 | tf.test.main()
26 |
--------------------------------------------------------------------------------
/tests/criticisms/ppc_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import tensorflow as tf
7 |
8 | from edward.models import Normal
9 |
10 |
11 | class test_ppc_class(tf.test.TestCase):
12 |
13 | def test_data(self):
14 | with self.test_session():
15 | x = Normal(loc=0.0, scale=1.0)
16 | y = 2.0 * x
17 | x_data = tf.constant(0.0)
18 | y_data = tf.constant(0.0)
19 | ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]), {x: x_data}, n_samples=1)
20 | ed.ppc(lambda xs, zs: tf.reduce_mean(xs[y]), {y: y_data}, n_samples=1)
21 | self.assertRaises(TypeError, ed.ppc, lambda xs, zs: tf.reduce_mean(xs[y]),
22 | {'y': y_data}, n_samples=1)
23 |
24 | def test_latent_vars(self):
25 | with self.test_session():
26 | x = Normal(loc=0.0, scale=1.0)
27 | y = 2.0 * x
28 | z = Normal(loc=0.0, scale=1.0)
29 | x_data = tf.constant(0.0)
30 | y_data = tf.constant(0.0)
31 | ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[z]),
32 | {x: x_data}, {z: z}, n_samples=1)
33 | ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[z]),
34 | {x: x_data}, {z: y}, n_samples=1)
35 | ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[y]),
36 | {x: x_data}, {y: y}, n_samples=1)
37 | ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]) + tf.reduce_mean(zs[y]),
38 | {x: x_data}, {y: z}, n_samples=1)
39 | self.assertRaises(TypeError, ed.ppc, lambda xs, zs: tf.reduce_mean(xs[x]),
40 | {x: x_data}, {'y': z}, n_samples=1)
41 |
42 | def test_n_samples(self):
43 | with self.test_session():
44 | x = Normal(loc=0.0, scale=1.0)
45 | x_data = tf.constant(0.0)
46 | ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]), {x: x_data}, n_samples=1)
47 | ed.ppc(lambda xs, zs: tf.reduce_mean(xs[x]), {x: x_data}, n_samples=5)
48 | self.assertRaises(TypeError, ed.ppc, lambda xs, zs: tf.reduce_mean(xs[x]),
49 | {x: x_data}, n_samples='1')
50 |
51 | if __name__ == '__main__':
52 | tf.test.main()
53 |
--------------------------------------------------------------------------------
/tests/data/generate_test_saver.py:
--------------------------------------------------------------------------------
1 | """Generate `test_saver`."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import edward as ed
7 | import numpy as np
8 | import tensorflow as tf
9 |
10 | from edward.models import Normal, PointMass
11 |
12 |
13 | def main(_):
14 | x_data = np.array([0.0] * 50, dtype=np.float32)
15 |
16 | mu = Normal(loc=0.0, scale=1.0)
17 | x = Normal(loc=mu, scale=1.0, sample_shape=50)
18 |
19 | with tf.variable_scope("posterior"):
20 | qmu = PointMass(params=tf.Variable(1.0))
21 |
22 | inference = ed.MAP({mu: qmu}, data={x: x_data})
23 | inference.run(n_iter=10)
24 |
25 | sess = ed.get_session()
26 | saver = tf.train.Saver()
27 | saver.save(sess, "test_saver")
28 |
29 | if __name__ == "__main__":
30 | tf.app.run()
31 |
--------------------------------------------------------------------------------
/tests/data/generate_toy_data_tfrecords.py:
--------------------------------------------------------------------------------
1 | """Generate `toy_data.tfrecords`."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 |
10 | def main(_):
11 | xs = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
12 | writer = tf.python_io.TFRecordWriter("toy_data.tfrecords")
13 | for x in xs:
14 | example = tf.train.Example(features=tf.train.Features(
15 | feature={'outcome':
16 | tf.train.Feature(float_list=tf.train.FloatList(value=[x]))}))
17 | serialized = example.SerializeToString()
18 | writer.write(serialized)
19 |
20 | writer.close()
21 |
22 | if __name__ == "__main__":
23 | tf.app.run()
24 |
--------------------------------------------------------------------------------
/tests/data/strip_markdown.tpl:
--------------------------------------------------------------------------------
1 | {% extends 'python.tpl'%}
2 | # Remove markdown cells.
3 | {% block markdowncell -%}
4 | {% endblock markdowncell %}
5 |
6 | # Change the appearance of execution count.
7 | {% block in_prompt %}
8 | {%- endblock in_prompt %}
9 |
--------------------------------------------------------------------------------
/tests/data/test_saver.data-00000-of-00001:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/tests/data/test_saver.data-00000-of-00001
--------------------------------------------------------------------------------
/tests/data/test_saver.index:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/tests/data/test_saver.index
--------------------------------------------------------------------------------
/tests/data/test_saver.meta:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/tests/data/test_saver.meta
--------------------------------------------------------------------------------
/tests/data/toy_data.tfrecords:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/blei-lab/edward/352596c3bc77e1cb5c01f77ac3283a53c74dbf21/tests/data/toy_data.tfrecords
--------------------------------------------------------------------------------
/tests/inferences/ar_process_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Normal, PointMass
10 | from scipy.optimize import minimize
11 |
12 | from edward.models import RandomVariable
13 | from tensorflow.contrib.distributions import Distribution
14 | from tensorflow.contrib.distributions import FULLY_REPARAMETERIZED
15 |
16 |
17 | class AutoRegressive(RandomVariable, Distribution):
18 | # a 1-D AR(1) process
19 | # a[t + 1] = a[t] + eps with eps ~ N(0, sig**2)
20 | def __init__(self, T, a, sig, *args, **kwargs):
21 | self.a = a
22 | self.sig = sig
23 | self.T = T
24 | self.shocks = Normal(tf.zeros(T), scale=sig)
25 | self.z = tf.scan(lambda acc, x: self.a * acc + x, self.shocks)
26 |
27 | if 'dtype' not in kwargs:
28 | kwargs['dtype'] = tf.float32
29 | if 'allow_nan_stats' not in kwargs:
30 | kwargs['allow_nan_stats'] = False
31 | if 'reparameterization_type' not in kwargs:
32 | kwargs['reparameterization_type'] = FULLY_REPARAMETERIZED
33 | if 'validate_args' not in kwargs:
34 | kwargs['validate_args'] = False
35 | if 'name' not in kwargs:
36 | kwargs['name'] = 'AutoRegressive'
37 |
38 | super(AutoRegressive, self).__init__(*args, **kwargs)
39 |
40 | self._args = (T, a, sig)
41 |
42 | def _log_prob(self, value):
43 | err = value - self.a * tf.pad(value[:-1], [[1, 0]], 'CONSTANT')
44 | lpdf = self.shocks._log_prob(err)
45 | return tf.reduce_sum(lpdf)
46 |
47 | def _sample_n(self, n, seed=None):
48 | return tf.scan(lambda acc, x: self.a * acc + x,
49 | self.shocks._sample_n(n, seed))
50 |
51 |
52 | class test_ar_process(tf.test.TestCase):
53 |
54 | def test_ar_mle(self):
55 | # set up test data: a random walk
56 | T = 100
57 | z_true = np.zeros(T)
58 | r = 0.95
59 | sig = 0.01
60 | eta = 0.01
61 | for t in range(1, 100):
62 | z_true[t] = r * z_true[t - 1] + sig * np.random.randn()
63 |
64 | x_data = (z_true + eta * np.random.randn(T)).astype(np.float32)
65 |
66 | # use scipy to find max likelihood
67 | def cost(z):
68 | initial = z[0]**2 / sig**2
69 | ar = np.sum((z[1:] - r * z[:-1])**2) / sig**2
70 | data = np.sum((x_data - z)**2) / eta**2
71 | return initial + ar + data
72 |
73 | mle = minimize(cost, np.zeros(T)).x
74 |
75 | with self.test_session() as sess:
76 | z = AutoRegressive(T, r, sig)
77 | x = Normal(loc=z, scale=eta)
78 |
79 | qz = PointMass(params=tf.Variable(tf.zeros(T)))
80 | inference = ed.MAP({z: qz}, data={x: x_data})
81 | inference.run(n_iter=500)
82 |
83 | self.assertAllClose(qz.eval(), mle, rtol=1e-3, atol=1e-3)
84 |
85 | if __name__ == '__main__':
86 | ed.set_seed(42)
87 | tf.test.main()
88 |
--------------------------------------------------------------------------------
/tests/inferences/gan_inference_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Normal
10 | from tensorflow.contrib import slim
11 |
12 |
13 | def next_batch(M):
14 | samples = np.random.normal(4.0, 0.1, M)
15 | samples.sort()
16 | return samples
17 |
18 |
19 | def discriminative_network(x):
20 | """Outputs probability in logits."""
21 | h0 = slim.fully_connected(x, 10, activation_fn=tf.nn.relu)
22 | return slim.fully_connected(h0, 1, activation_fn=None)
23 |
24 |
25 | class test_gan_class(tf.test.TestCase):
26 |
27 | def test_normal(self):
28 | with self.test_session() as sess:
29 | # DATA
30 | M = 12 # batch size during training
31 | x_ph = tf.placeholder(tf.float32, [M, 1])
32 |
33 | # MODEL
34 | with tf.variable_scope("Gen"):
35 | theta = tf.Variable(0.0)
36 | x = Normal(theta, 0.1, sample_shape=[M, 1])
37 |
38 | # INFERENCE
39 | inference = ed.GANInference(
40 | data={x: x_ph}, discriminator=discriminative_network)
41 | inference.initialize(n_iter=1000)
42 | tf.global_variables_initializer().run()
43 |
44 | for _ in range(inference.n_iter):
45 | x_data = next_batch(M).reshape([M, 1])
46 | inference.update(feed_dict={x_ph: x_data})
47 |
48 | # CRITICISM
49 | self.assertAllClose(theta.eval(), 4.0, rtol=1.0, atol=1.0)
50 |
51 | if __name__ == '__main__':
52 | ed.set_seed(54432132)
53 | tf.test.main()
54 |
--------------------------------------------------------------------------------
/tests/inferences/implicitklqp_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import tensorflow as tf
7 |
8 | from edward.models import Normal
9 |
10 |
11 | class test_implicit_klqp_class(tf.test.TestCase):
12 |
13 | def test_normal_run(self):
14 | def ratio_estimator(data, local_vars, global_vars):
15 | """Use the optimal ratio estimator, r(z) = log p(z). We add a
16 | TensorFlow variable as the algorithm assumes that the function
17 | has parameters to optimize."""
18 | w = tf.get_variable("w", [])
19 | return z.log_prob(local_vars[z]) + w
20 |
21 | with self.test_session() as sess:
22 | z = Normal(loc=5.0, scale=1.0)
23 |
24 | qz = Normal(loc=tf.Variable(tf.random_normal([])),
25 | scale=tf.nn.softplus(tf.Variable(tf.random_normal([]))))
26 |
27 | inference = ed.ImplicitKLqp({z: qz}, discriminator=ratio_estimator)
28 | inference.run(n_iter=200)
29 |
30 | self.assertAllClose(qz.mean().eval(), 5.0, atol=1.0)
31 |
32 | if __name__ == '__main__':
33 | ed.set_seed(47324)
34 | tf.test.main()
35 |
--------------------------------------------------------------------------------
/tests/inferences/inference_data_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import six
8 | import tensorflow as tf
9 |
10 | from edward.models import Normal
11 |
12 |
13 | class test_inference_data_class(tf.test.TestCase):
14 |
15 | def test_preloaded_full(self):
16 | with self.test_session() as sess:
17 | x_data = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
18 |
19 | mu = Normal(loc=0.0, scale=1.0)
20 | x = Normal(loc=tf.ones(10) * mu, scale=tf.ones(1))
21 |
22 | qmu = Normal(loc=tf.Variable(0.0), scale=tf.constant(1.0))
23 |
24 | inference = ed.KLqp({mu: qmu}, data={x: x_data})
25 | inference.initialize()
26 | tf.global_variables_initializer().run()
27 |
28 | val = sess.run(inference.data[x])
29 | self.assertAllEqual(val, x_data)
30 |
31 | def test_feeding(self):
32 | with self.test_session() as sess:
33 | x_data = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
34 | x_ph = tf.placeholder(tf.float32, [10])
35 |
36 | mu = Normal(loc=0.0, scale=1.0)
37 | x = Normal(loc=tf.ones(10) * mu, scale=tf.ones(10))
38 |
39 | qmu = Normal(loc=tf.Variable(0.0), scale=tf.constant(1.0))
40 |
41 | inference = ed.KLqp({mu: qmu}, data={x: x_ph})
42 | inference.initialize()
43 | tf.global_variables_initializer().run()
44 |
45 | val = sess.run( # avoid directly fetching placeholder
46 | tf.identity(list(six.itervalues(inference.data))[0]),
47 | feed_dict={inference.data[x]: x_data})
48 | self.assertAllEqual(val, x_data)
49 |
50 | def test_read_file(self):
51 | with self.test_session() as sess:
52 | # Construct a queue containing a list of filenames.
53 | filename_queue = tf.train.string_input_producer(
54 | ["tests/data/toy_data.tfrecords"])
55 | # Read a single serialized example from a filename.
56 | # `serialized_example` is a Tensor of type str.
57 | reader = tf.TFRecordReader()
58 | _, serialized_example = reader.read(filename_queue)
59 | # Convert serialized example back to actual values,
60 | # describing format of the objects to be returned.
61 | features = tf.parse_single_example(
62 | serialized_example,
63 | features={'outcome': tf.FixedLenFeature([], tf.float32)})
64 | x_batch = features['outcome']
65 |
66 | mu = Normal(loc=0.0, scale=1.0)
67 | x = Normal(loc=tf.ones([]) * mu, scale=tf.ones([]))
68 |
69 | qmu = Normal(loc=tf.Variable(0.0), scale=tf.constant(1.0))
70 |
71 | inference = ed.KLqp({mu: qmu}, data={x: x_batch})
72 | inference.initialize(scale={x: 10.0})
73 |
74 | tf.global_variables_initializer().run()
75 |
76 | coord = tf.train.Coordinator()
77 | threads = tf.train.start_queue_runners(coord=coord)
78 |
79 | # Check data varies by session run.
80 | val = sess.run(inference.data[x])
81 | val_1 = sess.run(inference.data[x])
82 | self.assertNotEqual(val, val_1)
83 |
84 | coord.request_stop()
85 | coord.join(threads)
86 |
87 | if __name__ == '__main__':
88 | ed.set_seed(1512351)
89 | tf.test.main()
90 |
--------------------------------------------------------------------------------
/tests/inferences/inference_debug_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Normal
10 |
11 |
12 | class test_inference_debug_class(tf.test.TestCase):
13 |
14 | def test_placeholder(self):
15 | with self.test_session():
16 | N = 5
17 | mu = Normal(loc=0.0, scale=1.0)
18 | x = Normal(loc=tf.ones(N) * mu, scale=tf.ones(N))
19 |
20 | qmu = Normal(loc=tf.Variable(0.0), scale=tf.constant(1.0))
21 |
22 | x_ph = tf.placeholder(tf.float32, [N])
23 | inference = ed.KLqp({mu: qmu}, data={x: x_ph})
24 | inference.initialize(debug=True)
25 | tf.global_variables_initializer().run()
26 | inference.update(feed_dict={x_ph: np.zeros(N, np.float32)})
27 |
28 | def test_tensor(self):
29 | with self.test_session():
30 | N = 5
31 | mu = Normal(loc=0.0, scale=1.0)
32 | x = Normal(loc=tf.ones(N) * mu, scale=tf.ones(N))
33 |
34 | qmu = Normal(loc=tf.Variable(0.0), scale=tf.constant(1.0))
35 |
36 | x_data = tf.zeros(N)
37 | inference = ed.KLqp({mu: qmu}, data={x: x_data})
38 | inference.run(n_iter=1, debug=True)
39 |
40 | if __name__ == '__main__':
41 | tf.test.main()
42 |
--------------------------------------------------------------------------------
/tests/inferences/inference_integer_test.py:
--------------------------------------------------------------------------------
1 | """Test that integer variables are handled properly during initialization."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import edward as ed
7 | import numpy as np
8 | import tensorflow as tf
9 |
10 | from edward.models import Normal, Categorical
11 |
12 |
13 | def neural_network(x, W_0, W_1, b_0, b_1):
14 | h = tf.nn.relu(tf.matmul(x, W_0) + b_0)
15 | h = tf.nn.relu(tf.matmul(h, W_1) + b_1)
16 | return h
17 |
18 |
19 | class test_integer_init(tf.test.TestCase):
20 |
21 | def test(self):
22 | with self.test_session():
23 | X_train = np.zeros([100, 10], dtype=np.float32)
24 | y_train = np.zeros(100, dtype=np.int32)
25 |
26 | N, D = X_train.shape
27 | H = 10 # number of hidden units
28 | K = 10 # number of classes
29 |
30 | W_0 = Normal(loc=tf.zeros([D, H]), scale=tf.ones([D, H]))
31 | W_1 = Normal(loc=tf.zeros([H, K]), scale=tf.ones([H, K]))
32 | b_0 = Normal(loc=tf.zeros(H), scale=tf.ones(H))
33 | b_1 = Normal(loc=tf.zeros(K), scale=tf.ones(K))
34 |
35 | y = Categorical(logits=neural_network(X_train, W_0, W_1, b_0, b_1))
36 |
37 | qW_0 = Normal(
38 | loc=tf.Variable(tf.random_normal([D, H])),
39 | scale=tf.nn.softplus(tf.Variable(tf.random_normal([D, H]))))
40 | qW_1 = Normal(
41 | loc=tf.Variable(tf.random_normal([H, K])),
42 | scale=tf.nn.softplus(tf.Variable(tf.random_normal([H, K]))))
43 | qb_0 = Normal(
44 | loc=tf.Variable(tf.random_normal([H])),
45 | scale=tf.nn.softplus(tf.Variable(tf.random_normal([H]))))
46 | qb_1 = Normal(
47 | loc=tf.Variable(tf.random_normal([K])),
48 | scale=tf.nn.softplus(tf.Variable(tf.random_normal([K]))))
49 |
50 | inference = ed.KLqp({W_0: qW_0, b_0: qb_0, W_1: qW_1, b_1: qb_1},
51 | data={y: y_train})
52 | inference.run(n_iter=1)
53 |
54 | if __name__ == '__main__':
55 | tf.test.main()
56 |
--------------------------------------------------------------------------------
/tests/inferences/inference_reset_test.py:
--------------------------------------------------------------------------------
1 | """Test that reset op works."""
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 |
6 | import edward as ed
7 | import tensorflow as tf
8 |
9 | from edward.models import Normal
10 |
11 |
12 | class test_inference_reset_class(tf.test.TestCase):
13 |
14 | def test(self):
15 | with self.test_session() as sess:
16 | mu = Normal(loc=0.0, scale=1.0)
17 | x = Normal(loc=mu, scale=1.0, sample_shape=5)
18 |
19 | qmu = Normal(loc=tf.Variable(0.0), scale=tf.constant(1.0))
20 |
21 | inference = ed.KLqp({mu: qmu}, data={x: tf.zeros(5)})
22 | inference.initialize()
23 | tf.global_variables_initializer().run()
24 |
25 | first = sess.run(inference.t)
26 | inference.update()
27 | second = sess.run(inference.t)
28 | self.assertEqual(first, second - 1)
29 | sess.run(inference.reset)
30 | third = sess.run(inference.t)
31 | self.assertEqual(first, third)
32 |
33 | if __name__ == '__main__':
34 | tf.test.main()
35 |
--------------------------------------------------------------------------------
/tests/inferences/inference_scale_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Normal
10 |
11 |
12 | class test_inference_scale_class(tf.test.TestCase):
13 |
14 | def test_scale_0d(self):
15 | with self.test_session():
16 | N = 10
17 | M = 5
18 | mu = Normal(loc=0.0, scale=1.0)
19 | x = Normal(loc=tf.ones(M) * mu, scale=tf.ones(M))
20 |
21 | qmu = Normal(loc=tf.Variable(0.0), scale=tf.constant(1.0))
22 |
23 | x_ph = tf.placeholder(tf.float32, [M])
24 | inference = ed.KLqp({mu: qmu}, data={x: x_ph})
25 | inference.initialize(scale={x: float(N) / M})
26 | self.assertAllEqual(inference.scale[x], float(N) / M)
27 |
28 | def test_scale_1d(self):
29 | with self.test_session():
30 | N = 10
31 | M = 5
32 | mu = Normal(loc=0.0, scale=1.0)
33 | x = Normal(loc=tf.ones(M) * mu, scale=tf.ones(M))
34 |
35 | qmu = Normal(loc=tf.Variable(0.0), scale=tf.constant(1.0))
36 |
37 | x_ph = tf.placeholder(tf.float32, [M])
38 | inference = ed.KLqp({mu: qmu}, data={x: x_ph})
39 | inference.initialize(scale={x: tf.range(M, dtype=tf.float32)})
40 | self.assertAllEqual(inference.scale[x].eval(), np.arange(M))
41 |
42 | if __name__ == '__main__':
43 | tf.test.main()
44 |
--------------------------------------------------------------------------------
/tests/inferences/klpq_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Bernoulli, Normal
10 |
11 |
12 | class test_klpq_class(tf.test.TestCase):
13 |
14 | def _test_normal_normal(self, Inference, default, *args, **kwargs):
15 | with self.test_session() as sess:
16 | x_data = np.array([0.0] * 50, dtype=np.float32)
17 |
18 | mu = Normal(loc=0.0, scale=1.0)
19 | x = Normal(loc=mu, scale=1.0, sample_shape=50)
20 |
21 | qmu_loc = tf.Variable(tf.random_normal([]))
22 | qmu_scale = tf.nn.softplus(tf.Variable(tf.random_normal([])))
23 | qmu = Normal(loc=qmu_loc, scale=qmu_scale)
24 |
25 | if not default:
26 | qmu_loc = tf.Variable(tf.random_normal([]))
27 | qmu_scale = tf.nn.softplus(tf.Variable(tf.random_normal([])))
28 | qmu = Normal(loc=qmu_loc, scale=qmu_scale)
29 |
30 | # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
31 | inference = Inference({mu: qmu}, data={x: x_data})
32 | else:
33 | inference = Inference([mu], data={x: x_data})
34 | qmu = inference.latent_vars[mu]
35 | inference.run(*args, **kwargs)
36 |
37 | self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-1, atol=1e-1)
38 | self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
39 | rtol=1e-1, atol=1e-1)
40 |
41 | variables = tf.get_collection(
42 | tf.GraphKeys.GLOBAL_VARIABLES, scope='optimizer')
43 | old_t, old_variables = sess.run([inference.t, variables])
44 | self.assertEqual(old_t, inference.n_iter)
45 | sess.run(inference.reset)
46 | new_t, new_variables = sess.run([inference.t, variables])
47 | self.assertEqual(new_t, 0)
48 | self.assertNotEqual(old_variables, new_variables)
49 |
50 | def _test_model_parameter(self, Inference, *args, **kwargs):
51 | with self.test_session() as sess:
52 | x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])
53 |
54 | p = tf.sigmoid(tf.Variable(0.5))
55 | x = Bernoulli(probs=p, sample_shape=10)
56 |
57 | inference = Inference({}, data={x: x_data})
58 | inference.run(*args, **kwargs)
59 |
60 | self.assertAllClose(p.eval(), 0.2, rtol=5e-2, atol=5e-2)
61 |
62 | def test_klpq(self):
63 | self._test_normal_normal(ed.KLpq, default=False, n_samples=25, n_iter=100)
64 | self._test_normal_normal(ed.KLpq, default=True, n_samples=25, n_iter=100)
65 | self._test_model_parameter(ed.KLpq, n_iter=50)
66 |
67 | def test_klpq_nsamples_check(self):
68 | with self.assertRaisesRegexp(ValueError,
69 | "n_samples should be greater than zero: 0"):
70 | self._test_normal_normal(ed.KLpq, default=True, n_samples=0, n_iter=10)
71 |
72 | if __name__ == '__main__':
73 | ed.set_seed(42)
74 | tf.test.main()
75 |
--------------------------------------------------------------------------------
/tests/inferences/map_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Normal, PointMass
10 |
11 |
12 | class test_map_class(tf.test.TestCase):
13 |
14 | def test_normalnormal_run(self):
15 | with self.test_session() as sess:
16 | x_data = np.array([0.0] * 50, dtype=np.float32)
17 |
18 | mu = Normal(loc=0.0, scale=1.0)
19 | x = Normal(loc=mu, scale=1.0, sample_shape=50)
20 |
21 | qmu = PointMass(params=tf.Variable(1.0))
22 |
23 | # analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
24 | inference = ed.MAP({mu: qmu}, data={x: x_data})
25 | inference.run(n_iter=1000)
26 |
27 | self.assertAllClose(qmu.mean().eval(), 0)
28 |
29 | def test_normalnormal_regularization(self):
30 | with self.test_session() as sess:
31 | x_data = np.array([5.0] * 50, dtype=np.float32)
32 |
33 | mu = Normal(loc=0.0, scale=1.0)
34 | x = Normal(loc=mu, scale=1.0, sample_shape=50)
35 |
36 | qmu = PointMass(params=tf.Variable(1.0))
37 |
38 | inference = ed.MAP({mu: qmu}, data={x: x_data})
39 | inference.run(n_iter=1000)
40 | mu_val = qmu.mean().eval()
41 |
42 | # regularized solution
43 | regularizer = tf.contrib.layers.l2_regularizer(scale=1.0)
44 | mu_reg = tf.get_variable("mu_reg", shape=[],
45 | regularizer=regularizer)
46 | x_reg = Normal(loc=mu_reg, scale=1.0, sample_shape=50)
47 |
48 | inference_reg = ed.MAP(None, data={x_reg: x_data})
49 | inference_reg.run(n_iter=1000)
50 |
51 | mu_reg_val = mu_reg.eval()
52 | self.assertAllClose(mu_val, mu_reg_val)
53 |
54 | if __name__ == '__main__':
55 | ed.set_seed(42)
56 | tf.test.main()
57 |
--------------------------------------------------------------------------------
/tests/inferences/saver_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Normal, PointMass
10 |
11 |
12 | class test_saver_class(tf.test.TestCase):
13 |
14 | def test_export_meta_graph(self):
15 | with self.test_session() as sess:
16 | x_data = np.array([0.0] * 50, dtype=np.float32)
17 |
18 | mu = Normal(loc=0.0, scale=1.0)
19 | x = Normal(loc=mu, scale=1.0, sample_shape=50)
20 |
21 | qmu = PointMass(params=tf.Variable(1.0))
22 |
23 | inference = ed.MAP({mu: qmu}, data={x: x_data})
24 | inference.run(n_iter=10)
25 |
26 | saver = tf.train.Saver()
27 | saver.export_meta_graph("/tmp/test_saver.meta")
28 |
29 | def test_import_meta_graph(self):
30 | with self.test_session() as sess:
31 | new_saver = tf.train.import_meta_graph("tests/data/test_saver.meta")
32 | new_saver.restore(sess, "tests/data/test_saver")
33 | qmu_variable = tf.get_collection(
34 | tf.GraphKeys.TRAINABLE_VARIABLES, scope="posterior")[0]
35 | self.assertNotEqual(qmu_variable.eval(), 1.0)
36 |
37 | def test_restore(self):
38 | with self.test_session() as sess:
39 | x_data = np.array([0.0] * 50, dtype=np.float32)
40 |
41 | mu = Normal(loc=0.0, scale=1.0)
42 | x = Normal(loc=mu, scale=1.0, sample_shape=50)
43 |
44 | with tf.variable_scope("posterior"):
45 | qmu = PointMass(params=tf.Variable(1.0))
46 |
47 | inference = ed.MAP({mu: qmu}, data={x: x_data})
48 |
49 | saver = tf.train.Saver()
50 | saver.restore(sess, "tests/data/test_saver")
51 | qmu_variable = tf.get_collection(
52 | tf.GraphKeys.TRAINABLE_VARIABLES, scope="posterior")[0]
53 | self.assertNotEqual(qmu_variable.eval(), 1.0)
54 |
55 | def test_save(self):
56 | with self.test_session() as sess:
57 | x_data = np.array([0.0] * 50, dtype=np.float32)
58 |
59 | mu = Normal(loc=0.0, scale=1.0)
60 | x = Normal(loc=mu, scale=1.0, sample_shape=50)
61 |
62 | with tf.variable_scope("posterior"):
63 | qmu = PointMass(params=tf.Variable(1.0))
64 |
65 | inference = ed.MAP({mu: qmu}, data={x: x_data})
66 | inference.run(n_iter=10)
67 |
68 | saver = tf.train.Saver()
69 | saver.save(sess, "/tmp/test_saver")
70 |
71 | if __name__ == '__main__':
72 | ed.set_seed(23451)
73 | tf.test.main()
74 |
--------------------------------------------------------------------------------
/tests/inferences/wakesleep_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Bernoulli, Normal
10 |
11 |
12 | class test_wakesleep_class(tf.test.TestCase):
13 |
14 | def _test_model_parameter(self, Inference, *args, **kwargs):
15 | with self.test_session() as sess:
16 | x_data = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1])
17 |
18 | p = tf.sigmoid(tf.Variable(0.5))
19 | x = Bernoulli(probs=p, sample_shape=10)
20 |
21 | inference = Inference({}, data={x: x_data})
22 | inference.run(*args, **kwargs)
23 |
24 | self.assertAllClose(p.eval(), 0.2, rtol=5e-2, atol=5e-2)
25 |
26 | def test_wakesleep(self):
27 | self._test_model_parameter(ed.WakeSleep, n_iter=50)
28 |
29 | if __name__ == '__main__':
30 | ed.set_seed(42)
31 | tf.test.main()
32 |
--------------------------------------------------------------------------------
/tests/inferences/wgan_inference_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Normal
10 | from tensorflow.contrib import slim
11 |
12 |
13 | def next_batch(M):
14 | samples = np.random.normal(4.0, 0.1, M)
15 | samples.sort()
16 | return samples
17 |
18 |
19 | def discriminative_network(x):
20 | """Outputs probability in logits."""
21 | h0 = slim.fully_connected(x, 10, activation_fn=tf.nn.relu)
22 | return slim.fully_connected(h0, 1, activation_fn=None)
23 |
24 |
25 | class test_wgan_class(tf.test.TestCase):
26 |
27 | def test_normal_clip(self):
28 | with self.test_session() as sess:
29 | # DATA
30 | M = 12 # batch size during training
31 | x_ph = tf.placeholder(tf.float32, [M, 1])
32 |
33 | # MODEL
34 | with tf.variable_scope("Gen"):
35 | theta = tf.Variable(0.0)
36 | x = Normal(theta, 0.1, sample_shape=[M, 1])
37 |
38 | # INFERENCE
39 | inference = ed.WGANInference(
40 | data={x: x_ph}, discriminator=discriminative_network)
41 | inference.initialize(penalty=None, clip=0.01, n_iter=500)
42 | tf.global_variables_initializer().run()
43 |
44 | for _ in range(inference.n_iter):
45 | x_data = next_batch(M).reshape([M, 1])
46 | for _ in range(5):
47 | info_dict_d = inference.update(feed_dict={x_ph: x_data},
48 | variables="Disc")
49 |
50 | inference.update(feed_dict={x_ph: x_data}, variables="Gen")
51 |
52 | self.assertAllClose(theta.eval(), 4.0, rtol=1.0, atol=1.0)
53 |
54 | def test_normal_penalty(self):
55 | with self.test_session() as sess:
56 | # DATA
57 | M = 12 # batch size during training
58 | x_ph = tf.placeholder(tf.float32, [M, 1])
59 |
60 | # MODEL
61 | with tf.variable_scope("Gen"):
62 | theta = tf.Variable(0.0)
63 | x = Normal(theta, 0.1, sample_shape=[M, 1])
64 |
65 | # INFERENCE
66 | inference = ed.WGANInference(
67 | data={x: x_ph}, discriminator=discriminative_network)
68 | inference.initialize(penalty=0.1, n_iter=500)
69 | tf.global_variables_initializer().run()
70 |
71 | for _ in range(inference.n_iter):
72 | x_data = next_batch(M).reshape([M, 1])
73 | for _ in range(5):
74 | info_dict_d = inference.update(feed_dict={x_ph: x_data},
75 | variables="Disc")
76 |
77 | inference.update(feed_dict={x_ph: x_data}, variables="Gen")
78 |
79 | # CRITICISM
80 | self.assertAllClose(theta.eval(), 4.0, rtol=1.0, atol=1.0)
81 |
82 | if __name__ == '__main__':
83 | ed.set_seed(12451)
84 | tf.test.main()
85 |
--------------------------------------------------------------------------------
/tests/models/bernoulli_doc_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Bernoulli
8 | from tensorflow.contrib import distributions as ds
9 |
10 |
11 | class test_bernoulli_doc_class(tf.test.TestCase):
12 |
13 | def test(self):
14 | self.assertGreater(len(Bernoulli.__doc__), 0)
15 | self.assertEqual(Bernoulli.__doc__, ds.Bernoulli.__doc__)
16 | self.assertEqual(Bernoulli.__name__, "Bernoulli")
17 |
18 | if __name__ == '__main__':
19 | tf.test.main()
20 |
--------------------------------------------------------------------------------
/tests/models/bernoulli_log_prob_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Bernoulli
8 | from tensorflow.contrib import distributions as ds
9 |
10 |
11 | class test_bernoulli_log_prob_class(tf.test.TestCase):
12 |
13 | def _test(self, probs, n):
14 | rv = Bernoulli(probs)
15 | dist = ds.Bernoulli(probs)
16 | x = rv.sample(n).eval()
17 | self.assertAllEqual(rv.log_prob(x).eval(), dist.log_prob(x).eval())
18 |
19 | def test_1d(self):
20 | with self.test_session():
21 | self._test(tf.zeros([1]) + 0.5, [1])
22 | self._test(tf.zeros([1]) + 0.5, [5])
23 | self._test(tf.zeros([5]) + 0.5, [1])
24 | self._test(tf.zeros([5]) + 0.5, [5])
25 |
26 | if __name__ == '__main__':
27 | tf.test.main()
28 |
--------------------------------------------------------------------------------
/tests/models/bernoulli_sample_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.models import Bernoulli
9 | from tensorflow.contrib import distributions as ds
10 |
11 |
12 | class test_bernoulli_sample_class(tf.test.TestCase):
13 |
14 | def _test(self, probs, n):
15 | rv = Bernoulli(probs)
16 | dist = ds.Bernoulli(probs)
17 | self.assertEqual(rv.sample(n).shape, dist.sample(n).shape)
18 |
19 | def test_0d(self):
20 | with self.test_session():
21 | self._test(0.5, [1])
22 | self._test(np.array(0.5), [1])
23 | self._test(tf.constant(0.5), [1])
24 |
25 | def test_1d(self):
26 | with self.test_session():
27 | self._test(np.array([0.5]), [1])
28 | self._test(np.array([0.5]), [5])
29 | self._test(np.array([0.2, 0.8]), [1])
30 | self._test(np.array([0.2, 0.8]), [10])
31 | self._test(tf.constant([0.5]), [1])
32 | self._test(tf.constant([0.5]), [5])
33 | self._test(tf.constant([0.2, 0.8]), [1])
34 | self._test(tf.constant([0.2, 0.8]), [10])
35 |
36 | if __name__ == '__main__':
37 | tf.test.main()
38 |
--------------------------------------------------------------------------------
/tests/models/dirichlet_process_sample_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.models import DirichletProcess, Normal
9 |
10 |
11 | class test_dirichletprocess_sample_class(tf.test.TestCase):
12 |
13 | def _test(self, n, concentration, base):
14 | x = DirichletProcess(concentration=concentration, base=base)
15 | val_est = x.sample(n).shape.as_list()
16 | val_true = n + tf.convert_to_tensor(concentration).shape.as_list() + \
17 | tf.convert_to_tensor(base).shape.as_list()
18 | self.assertEqual(val_est, val_true)
19 |
20 | def test_concentration_0d_base_0d(self):
21 | with self.test_session():
22 | self._test([1], 0.5, Normal(loc=0.0, scale=0.5))
23 | self._test([5], tf.constant(0.5), Normal(loc=0.0, scale=0.5))
24 |
25 | def test_concentration_1d_base_0d(self):
26 | with self.test_session():
27 | self._test([1], np.array([0.5]), Normal(loc=0.0, scale=0.5))
28 | self._test([5], tf.constant([0.5]), Normal(loc=0.0, scale=0.5))
29 | self._test([1], tf.constant([0.2, 1.5]), Normal(loc=0.0, scale=0.5))
30 | self._test([5], tf.constant([0.2, 1.5]), Normal(loc=0.0, scale=0.5))
31 |
32 | def test_concentration_0d_base_1d(self):
33 | with self.test_session():
34 | self._test([1], 0.5, Normal(loc=tf.zeros(3), scale=tf.ones(3)))
35 | self._test([5], tf.constant(0.5),
36 | Normal(loc=tf.zeros(3), scale=tf.ones(3)))
37 |
38 | def test_concentration_1d_base_2d(self):
39 | with self.test_session():
40 | self._test([1], np.array([0.5]),
41 | Normal(loc=tf.zeros([3, 4]), scale=tf.ones([3, 4])))
42 | self._test([5], tf.constant([0.5]),
43 | Normal(loc=tf.zeros([3, 4]), scale=tf.ones([3, 4])))
44 | self._test([1], tf.constant([0.2, 1.5]),
45 | Normal(loc=tf.zeros([3, 4]), scale=tf.ones([3, 4])))
46 | self._test([5], tf.constant([0.2, 1.5]),
47 | Normal(loc=tf.zeros([3, 4]), scale=tf.ones([3, 4])))
48 |
49 | def test_persistent_state(self):
50 | with self.test_session() as sess:
51 | dp = DirichletProcess(0.1, Normal(loc=0.0, scale=1.0))
52 | x = dp.sample(5)
53 | y = dp.sample(5)
54 | x_data, y_data, locs = sess.run([x, y, dp.locs])
55 | for sample in x_data:
56 | self.assertTrue(sample in locs)
57 | for sample in y_data:
58 | self.assertTrue(sample in locs)
59 |
60 | if __name__ == '__main__':
61 | tf.test.main()
62 |
--------------------------------------------------------------------------------
/tests/models/empirical_sample_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.models import Empirical
9 |
10 |
11 | class test_empirical_sample_class(tf.test.TestCase):
12 |
13 | def _test(self, params, n):
14 | x = Empirical(params=params)
15 | val_est = x.sample(n).shape.as_list()
16 | val_true = n + tf.convert_to_tensor(params).shape.as_list()[1:]
17 | self.assertEqual(val_est, val_true)
18 |
19 | def test_0d(self):
20 | with self.test_session():
21 | self._test(0.5, [1])
22 | self._test(np.array(0.5), [1])
23 | self._test(tf.constant(0.5), [1])
24 | self._test(np.array([0.5]), [1])
25 | self._test(np.array([0.5]), [5])
26 | self._test(np.array([0.2, 0.8]), [1])
27 | self._test(np.array([0.2, 0.8]), [10])
28 | self._test(tf.constant([0.5]), [1])
29 | self._test(tf.constant([0.5]), [5])
30 | self._test(tf.constant([0.2, 0.8]), [1])
31 | self._test(tf.constant([0.2, 0.8]), [10])
32 |
33 | if __name__ == '__main__':
34 | tf.test.main()
35 |
--------------------------------------------------------------------------------
/tests/models/keras_core_layers_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import keras.layers as layers
7 | import tensorflow as tf
8 |
9 | from edward.models import Normal
10 |
11 |
12 | class test_keras_core_layers_class(tf.test.TestCase):
13 |
14 | def test_dense(self):
15 | x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
16 | y = layers.Dense(32)(x.value())
17 |
18 | def test_activation(self):
19 | x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
20 | y = layers.Activation('tanh')(x.value())
21 |
22 | def test_dropout(self):
23 | x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
24 | y = layers.Dropout(0.5)(x.value())
25 |
26 | def test_flatten(self):
27 | x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
28 | y = layers.Flatten()(x.value())
29 | with self.test_session():
30 | self.assertEqual(y.eval().shape, (100, 50))
31 |
32 | def test_reshape(self):
33 | x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
34 | y = layers.Reshape((5, 10))(x.value())
35 | with self.test_session():
36 | self.assertEqual(y.eval().shape, (100, 5, 10))
37 |
38 | def test_permute(self):
39 | x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
40 | y = layers.Permute((2, 1))(x.value())
41 | with self.test_session():
42 | self.assertEqual(y.eval().shape, (100, 5, 10))
43 |
44 | def test_repeat_vector(self):
45 | x = Normal(loc=tf.zeros([100, 10]), scale=tf.ones([100, 10]))
46 | y = layers.RepeatVector(2)(x.value())
47 | with self.test_session():
48 | self.assertEqual(y.eval().shape, (100, 2, 10))
49 |
50 | def test_lambda(self):
51 | x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
52 | y = layers.Lambda(lambda x: x ** 2)(x.value())
53 |
54 | def test_activity_regularization(self):
55 | x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
56 | y = layers.ActivityRegularization(l1=0.1)(x.value())
57 |
58 | def test_masking(self):
59 | x = Normal(loc=tf.zeros([100, 10, 5]), scale=tf.ones([100, 10, 5]))
60 | y = layers.Masking()(x.value())
61 |
62 | if __name__ == '__main__':
63 | tf.test.main()
64 |
--------------------------------------------------------------------------------
/tests/models/param_mixture_sample_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from edward.models import Beta, Dirichlet, Normal, ParamMixture
10 |
11 |
12 | class test_param_mixture_sample_class(tf.test.TestCase):
13 |
14 | def _test(self, n, *args, **kwargs):
15 | rv = ParamMixture(*args, **kwargs)
16 | val_est = rv.sample(n).shape
17 | val_true = tf.TensorShape(n).concatenate(
18 | rv.cat.batch_shape).concatenate(rv.components.event_shape)
19 | self.assertEqual(val_est, val_true)
20 |
21 | self.assertEqual(rv.sample_shape, rv.cat.sample_shape)
22 | self.assertEqual(rv.sample_shape, rv.components.sample_shape)
23 | self.assertEqual(rv.batch_shape, rv.cat.batch_shape)
24 | self.assertEqual(rv.event_shape, rv.components.event_shape)
25 |
26 | def test_batch_0d_event_0d(self):
27 | """Mixture of 3 normal distributions."""
28 | with self.test_session():
29 | probs = np.array([0.2, 0.3, 0.5], np.float32)
30 | loc = np.array([1.0, 5.0, 7.0], np.float32)
31 | scale = np.array([1.5, 1.5, 1.5], np.float32)
32 |
33 | self._test([], probs, {'loc': loc, 'scale': scale}, Normal)
34 | self._test([5], probs, {'loc': loc, 'scale': scale}, Normal)
35 |
36 | def test_batch_0d_event_1d(self):
37 | """Mixture of 2 Dirichlet distributions."""
38 | with self.test_session():
39 | probs = np.array([0.4, 0.6], np.float32)
40 | concentration = np.ones([2, 3], np.float32)
41 |
42 | self._test([], probs, {'concentration': concentration}, Dirichlet)
43 | self._test([5], probs, {'concentration': concentration}, Dirichlet)
44 |
45 | def test_batch_1d_event_0d(self):
46 | """Two mixtures each of 3 beta distributions."""
47 | with self.test_session():
48 | probs = np.array([[0.2, 0.3, 0.5], [0.2, 0.3, 0.5]], np.float32)
49 | conc1 = np.array([[2.0, 0.5], [1.0, 1.0], [0.5, 2.0]], np.float32)
50 | conc0 = conc1 + 2.0
51 |
52 | self._test([], probs, {'concentration1': conc1, 'concentration0': conc0},
53 | Beta)
54 | self._test([5], probs, {'concentration1': conc1, 'concentration0': conc0},
55 | Beta)
56 |
57 | probs = np.array([0.2, 0.3, 0.5], np.float32)
58 | self.assertRaises(ValueError, self._test, [], probs,
59 | {'concentration1': conc1, 'concentration0': conc0},
60 | Beta)
61 |
62 | if __name__ == '__main__':
63 | tf.test.main()
64 |
--------------------------------------------------------------------------------
/tests/models/point_mass_sample_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.models import PointMass
9 |
10 |
11 | class test_pointmass_sample_class(tf.test.TestCase):
12 |
13 | def _test(self, params, n):
14 | x = PointMass(params=params)
15 | val_est = x.sample(n).shape.as_list()
16 | val_true = n + tf.convert_to_tensor(params).shape.as_list()
17 | self.assertEqual(val_est, val_true)
18 |
19 | def test_0d(self):
20 | with self.test_session():
21 | self._test(0.5, [1])
22 | self._test(np.array(0.5), [1])
23 | self._test(tf.constant(0.5), [1])
24 |
25 | def test_1d(self):
26 | with self.test_session():
27 | self._test(np.array([0.5]), [1])
28 | self._test(np.array([0.5]), [5])
29 | self._test(np.array([0.2, 0.8]), [1])
30 | self._test(np.array([0.2, 0.8]), [10])
31 | self._test(tf.constant([0.5]), [1])
32 | self._test(tf.constant([0.5]), [5])
33 | self._test(tf.constant([0.2, 0.8]), [1])
34 | self._test(tf.constant([0.2, 0.8]), [10])
35 |
36 | if __name__ == '__main__':
37 | tf.test.main()
38 |
--------------------------------------------------------------------------------
/tests/models/random_variable_gradients_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.models import Bernoulli, Normal
9 |
10 |
11 | class test_random_variable_gradients_class(tf.test.TestCase):
12 |
13 | def test_first_order(self):
14 | with self.test_session() as sess:
15 | x = Bernoulli(0.5)
16 | y = 2 * x
17 | z = tf.gradients(y, x)[0]
18 | self.assertEqual(z.eval(), 2)
19 |
20 | def test_second_order(self):
21 | with self.test_session() as sess:
22 | x = Normal(0.0, 1.0)
23 | y = 2 * (x ** 2)
24 | z = tf.gradients(y, x)[0]
25 | z = tf.gradients(z, x)[0]
26 | self.assertEqual(z.eval(), 4.0)
27 |
28 | if __name__ == '__main__':
29 | tf.test.main()
30 |
--------------------------------------------------------------------------------
/tests/models/random_variable_session_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import tensorflow as tf
7 |
8 | from edward.models import Normal
9 |
10 |
11 | class test_random_variable_session_class(tf.test.TestCase):
12 |
13 | def test_eval(self):
14 | with self.test_session() as sess:
15 | x = Normal(0.0, 0.1)
16 | x_ph = tf.placeholder(tf.float32, [])
17 | y = Normal(x_ph, 0.1)
18 | self.assertLess(x.eval(), 5.0)
19 | self.assertLess(x.eval(sess), 5.0)
20 | self.assertLess(x.eval(feed_dict={x_ph: 100.0}), 5.0)
21 | self.assertGreater(y.eval(feed_dict={x_ph: 100.0}), 5.0)
22 | self.assertGreater(y.eval(sess, feed_dict={x_ph: 100.0}), 5.0)
23 | self.assertRaises(tf.errors.InvalidArgumentError, y.eval)
24 | self.assertRaises(tf.errors.InvalidArgumentError, y.eval, sess)
25 |
26 | def test_run(self):
27 | with self.test_session() as sess:
28 | x = Normal(0.0, 0.1)
29 | x_ph = tf.placeholder(tf.float32, [])
30 | y = Normal(x_ph, 0.1)
31 | self.assertLess(sess.run(x), 5.0)
32 | self.assertLess(sess.run(x, feed_dict={x_ph: 100.0}), 5.0)
33 | self.assertGreater(sess.run(y, feed_dict={x_ph: 100.0}), 5.0)
34 | self.assertRaises(tf.errors.InvalidArgumentError, sess.run, y)
35 |
36 | if __name__ == '__main__':
37 | ed.set_seed(82341)
38 | tf.test.main()
39 |
--------------------------------------------------------------------------------
/tests/models/random_variable_shape_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Bernoulli, Dirichlet
8 |
9 |
10 | class test_random_variable_shape_class(tf.test.TestCase):
11 |
12 | def _test(self, rv, sample_shape, batch_shape, event_shape):
13 | self.assertEqual(rv.shape, sample_shape + batch_shape + event_shape)
14 | self.assertEqual(rv.sample_shape, sample_shape)
15 | self.assertEqual(rv.batch_shape, batch_shape)
16 | self.assertEqual(rv.event_shape, event_shape)
17 |
18 | def test_bernoulli(self):
19 | with self.test_session():
20 | self._test(Bernoulli(0.5), [], [], [])
21 | self._test(Bernoulli(tf.zeros([2, 3])), [], [2, 3], [])
22 | self._test(Bernoulli(0.5, sample_shape=2), [2], [], [])
23 | self._test(Bernoulli(0.5, sample_shape=[2, 1]), [2, 1], [], [])
24 |
25 | def test_dirichlet(self):
26 | with self.test_session():
27 | self._test(Dirichlet(tf.zeros(3)), [], [], [3])
28 | self._test(Dirichlet(tf.zeros([2, 3])), [], [2], [3])
29 | self._test(Dirichlet(tf.zeros(3), sample_shape=1), [1], [], [3])
30 | self._test(Dirichlet(tf.zeros(3), sample_shape=[2, 1]), [2, 1], [], [3])
31 |
32 | if __name__ == '__main__':
33 | tf.test.main()
34 |
--------------------------------------------------------------------------------
/tests/models/random_variable_value_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.models import Bernoulli, Normal, Poisson, RandomVariable
9 | from edward.util import copy
10 |
11 |
12 | class test_random_variable_value_class(tf.test.TestCase):
13 |
14 | def _test_sample(self, RV, value, *args, **kwargs):
15 | rv = RV(*args, value=value, **kwargs)
16 | value_shape = rv.value().shape
17 | expected_shape = rv.sample_shape.concatenate(
18 | rv.batch_shape).concatenate(rv.event_shape)
19 | self.assertEqual(value_shape, expected_shape)
20 | self.assertEqual(rv.dtype, rv.value().dtype)
21 |
22 | def _test_copy(self, RV, value, *args, **kwargs):
23 | rv1 = RV(*args, value=value, **kwargs)
24 | rv2 = copy(rv1)
25 | value_shape1 = rv1.value().shape
26 | value_shape2 = rv2.value().shape
27 | self.assertEqual(value_shape1, value_shape2)
28 |
29 | def test_shape_and_dtype(self):
30 | with self.test_session():
31 | self._test_sample(Normal, 2, loc=0.5, scale=1.0)
32 | self._test_sample(Normal, [2], loc=[0.5], scale=[1.0])
33 | self._test_sample(Poisson, 2, rate=0.5)
34 |
35 | def test_unknown_shape(self):
36 | with self.test_session():
37 | x = Bernoulli(0.5, value=tf.placeholder(tf.int32))
38 |
39 | def test_mismatch_raises(self):
40 | with self.test_session():
41 | self.assertRaises(ValueError, self._test_sample, Normal, 2,
42 | loc=[0.5, 0.5], scale=1.0)
43 | self.assertRaises(ValueError, self._test_sample, Normal, 2,
44 | loc=[0.5], scale=[1.0])
45 | self.assertRaises(ValueError, self._test_sample, Normal,
46 | np.zeros([10, 3]), loc=[0.5, 0.5], scale=[1.0, 1.0])
47 |
48 | def test_copy(self):
49 | with self.test_session():
50 | self._test_copy(Normal, 2, loc=0.5, scale=1.0)
51 | self._test_copy(Normal, [2], loc=[0.5], scale=[1.0])
52 | self._test_copy(Poisson, 2, rate=0.5)
53 |
54 | if __name__ == '__main__':
55 | tf.test.main()
56 |
--------------------------------------------------------------------------------
/tests/notebooks/notebooks_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import glob
6 | import nbformat
7 | import os
8 | import sys
9 | import time
10 | import traceback
11 | import tensorflow as tf
12 |
13 | from nbconvert.preprocessors import ExecutePreprocessor
14 | from nbconvert.preprocessors.execute import CellExecutionError
15 |
16 |
17 | class test_notebooks(tf.test.TestCase):
18 |
19 | def _exec_notebook(self, ep, filename):
20 | with open(filename) as f:
21 | nb = nbformat.read(f, as_version=nbformat.current_nbformat)
22 | try:
23 | out = ep.preprocess(nb, {})
24 | except CellExecutionError:
25 | print('-' * 60)
26 | traceback.print_exc(file=sys.stdout)
27 | print('-' * 60)
28 | self.assertTrue(False,
29 | 'Error executing the notebook %s. See above for error.'
30 | % filename)
31 |
32 | def test_all_notebooks(self):
33 | """ Test all notebooks except blacklist. """
34 | blacklist = ['gan.ipynb', 'iclr2017.ipynb']
35 | nbpath = 'notebooks/'
36 | # see http://nbconvert.readthedocs.io/en/stable/execute_api.html
37 | ep = ExecutePreprocessor(timeout=120,
38 | kernel_name='python' + str(sys.version_info[0]),
39 | interrupt_on_timeout=True)
40 | os.chdir(nbpath)
41 | files = glob.glob("*.ipynb")
42 | for filename in files:
43 | if filename not in blacklist:
44 | start = time.time()
45 | self._exec_notebook(ep, filename)
46 | end = time.time()
47 | print(filename, 'took %g seconds.' % (end - start))
48 |
49 | if __name__ == '__main__':
50 | tf.test.main()
51 |
--------------------------------------------------------------------------------
/tests/util/check_data_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.models import Normal
9 | from edward.util import check_data
10 |
11 |
12 | class test_check_data_class(tf.test.TestCase):
13 |
14 | def test(self):
15 | with self.test_session():
16 | x = Normal(0.0, 1.0)
17 | qx = Normal(0.0, 1.0)
18 | x_ph = tf.placeholder(tf.float32, [])
19 |
20 | check_data({x: tf.constant(0.0)})
21 | check_data({x: np.float64(0.0)})
22 | check_data({x: np.int64(0)})
23 | check_data({x: 0.0})
24 | check_data({x: 0})
25 | check_data({x: False})
26 | check_data({x: '0'})
27 | check_data({x: x_ph})
28 | check_data({x: qx})
29 | check_data({2.0 * x: tf.constant(0.0)})
30 | self.assertRaises(TypeError, check_data, {0.0: x})
31 | self.assertRaises(TypeError, check_data, {x: tf.zeros(5)})
32 | self.assertRaises(TypeError, check_data, {x_ph: x})
33 | self.assertRaises(TypeError, check_data, {x_ph: x})
34 | self.assertRaises(TypeError, check_data,
35 | {x: tf.constant(0, tf.float64)})
36 | self.assertRaises(TypeError, check_data,
37 | {x_ph: tf.constant(0.0)})
38 |
39 | x_vec = Normal(tf.constant([0.0]), tf.constant([1.0]))
40 | qx_vec = Normal(tf.constant([0.0]), tf.constant([1.0]))
41 |
42 | check_data({x_vec: qx_vec})
43 | check_data({x_vec: [0.0]})
44 | check_data({x_vec: [0]})
45 | check_data({x_vec: ['0']})
46 | self.assertRaises(TypeError, check_data, {x: qx_vec})
47 |
48 | if __name__ == '__main__':
49 | tf.test.main()
50 |
--------------------------------------------------------------------------------
/tests/util/check_latent_vars_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Normal
8 | from edward.util import check_latent_vars
9 |
10 |
11 | class test_check_latent_vars_class(tf.test.TestCase):
12 |
13 | def test(self):
14 | with self.test_session():
15 | mu = Normal(0.0, 1.0)
16 | qmu = Normal(tf.Variable(0.0), tf.constant(1.0))
17 | qmu_vec = Normal(tf.constant([0.0]), tf.constant([1.0]))
18 |
19 | check_latent_vars({mu: qmu})
20 | check_latent_vars({mu: tf.constant(0.0)})
21 | check_latent_vars({tf.constant(0.0): qmu})
22 | self.assertRaises(TypeError, check_latent_vars, {mu: '5'})
23 | self.assertRaises(TypeError, check_latent_vars, {mu: qmu_vec})
24 |
25 | if __name__ == '__main__':
26 | tf.test.main()
27 |
--------------------------------------------------------------------------------
/tests/util/dot_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.util import dot
9 |
10 |
11 | class test_dot_class(tf.test.TestCase):
12 |
13 | def test_dot(self):
14 | with self.test_session():
15 | a = tf.constant(np.arange(5, dtype=np.float32))
16 | b = tf.diag(tf.ones([5]))
17 | self.assertAllEqual(dot(a, b).eval(),
18 | np.dot(a.eval(), b.eval()))
19 | self.assertAllEqual(dot(b, a).eval(),
20 | np.dot(b.eval(), a.eval()))
21 |
22 | def test_all_finite_raises(self):
23 | with self.test_session():
24 | a = np.inf * tf.ones([5])
25 | b = tf.diag(tf.ones([5]))
26 | with self.assertRaisesOpError('Inf'):
27 | dot(a, b).eval()
28 | a = tf.ones([5]) * np.arange(5)
29 | b = np.inf * tf.diag(tf.ones([5]))
30 | with self.assertRaisesOpError('Inf'):
31 | dot(a, b).eval()
32 |
33 | if __name__ == '__main__':
34 | tf.test.main()
35 |
--------------------------------------------------------------------------------
/tests/util/get_blanket_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Bernoulli, Normal
8 | from edward.util import get_blanket
9 |
10 |
11 | class test_get_blanket_class(tf.test.TestCase):
12 |
13 | def test_blanket_structure(self):
14 | """a -> c <- b
15 | |
16 | v
17 | d -> f <- e
18 | """
19 | with self.test_session():
20 | a = Normal(0.0, 1.0)
21 | b = Normal(0.0, 1.0)
22 | c = Normal(a * b, 1.0)
23 | d = Normal(0.0, 1.0)
24 | e = Normal(0.0, 1.0)
25 | f = Normal(c * d * e, 1.0)
26 | self.assertEqual(set(get_blanket(a)), set([b, c]))
27 | self.assertEqual(set(get_blanket(b)), set([a, c]))
28 | self.assertEqual(set(get_blanket(c)), set([a, b, d, e, f]))
29 | self.assertEqual(set(get_blanket(d)), set([c, e, f]))
30 | self.assertEqual(set(get_blanket(e)), set([c, d, f]))
31 | self.assertEqual(set(get_blanket(f)), set([c, d, e]))
32 |
33 | if __name__ == '__main__':
34 | tf.test.main()
35 |
--------------------------------------------------------------------------------
/tests/util/get_children_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Bernoulli, Normal
8 | from edward.util import get_children
9 |
10 |
11 | class test_get_children_class(tf.test.TestCase):
12 |
13 | def test_v_structure(self):
14 | """a -> b -> e <- d <- c"""
15 | with self.test_session():
16 | a = Normal(0.0, 1.0)
17 | b = Normal(a, 1.0)
18 | c = Normal(0.0, 1.0)
19 | d = Normal(c, 1.0)
20 | e = Normal(b * d, 1.0)
21 | self.assertEqual(get_children(a), [b])
22 | self.assertEqual(get_children(b), [e])
23 | self.assertEqual(get_children(c), [d])
24 | self.assertEqual(get_children(d), [e])
25 | self.assertEqual(get_children(e), [])
26 |
27 | def test_a_structure(self):
28 | """e <- d <- a -> b -> c"""
29 | with self.test_session():
30 | a = Normal(0.0, 1.0)
31 | b = Normal(a, 1.0)
32 | c = Normal(b, 1.0)
33 | d = Normal(a, 1.0)
34 | e = Normal(d, 1.0)
35 | self.assertEqual(set(get_children(a)), set([b, d]))
36 | self.assertEqual(get_children(b), [c])
37 | self.assertEqual(get_children(c), [])
38 | self.assertEqual(get_children(d), [e])
39 | self.assertEqual(get_children(e), [])
40 |
41 | def test_chain_structure(self):
42 | """a -> b -> c -> d -> e"""
43 | with self.test_session():
44 | a = Normal(0.0, 1.0)
45 | b = Normal(a, 1.0)
46 | c = Normal(b, 1.0)
47 | d = Normal(c, 1.0)
48 | e = Normal(d, 1.0)
49 | self.assertEqual(get_children(a), [b])
50 | self.assertEqual(get_children(b), [c])
51 | self.assertEqual(get_children(c), [d])
52 | self.assertEqual(get_children(d), [e])
53 | self.assertEqual(get_children(e), [])
54 |
55 | def test_tensor(self):
56 | with self.test_session():
57 | a = Normal(0.0, 1.0)
58 | b = tf.constant(2.0)
59 | c = a + b
60 | d = Normal(c, 1.0)
61 | self.assertEqual(get_children(a), [d])
62 | self.assertEqual(get_children(b), [d])
63 | self.assertEqual(get_children(c), [d])
64 | self.assertEqual(get_children(d), [])
65 |
66 | def test_control_flow(self):
67 | with self.test_session():
68 | a = Bernoulli(0.5)
69 | b = Normal(0.0, 1.0)
70 | c = tf.constant(0.0)
71 | d = tf.cond(tf.cast(a, tf.bool), lambda: b, lambda: c)
72 | e = Normal(d, 1.0)
73 | self.assertEqual(get_children(a), [e])
74 | self.assertEqual(get_children(b), [e])
75 | self.assertEqual(get_children(c), [e])
76 | self.assertEqual(get_children(d), [e])
77 | self.assertEqual(get_children(e), [])
78 |
79 | def test_scan(self):
80 | """copied from test_chain_structure"""
81 | def cumsum(x):
82 | return tf.scan(lambda a, x: a + x, x)
83 |
84 | with self.test_session():
85 | a = Normal(tf.ones([3]), tf.ones([3]))
86 | b = Normal(cumsum(a), tf.ones([3]))
87 | c = Normal(cumsum(b), tf.ones([3]))
88 | d = Normal(cumsum(c), tf.ones([3]))
89 | e = Normal(cumsum(d), tf.ones([3]))
90 | self.assertEqual(get_children(a), [b])
91 | self.assertEqual(get_children(b), [c])
92 | self.assertEqual(get_children(c), [d])
93 | self.assertEqual(get_children(d), [e])
94 | self.assertEqual(get_children(e), [])
95 |
96 | if __name__ == '__main__':
97 | tf.test.main()
98 |
--------------------------------------------------------------------------------
/tests/util/get_control_variate_coef_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.util.tensorflow import get_control_variate_coef
8 |
9 |
10 | class test_get_control_variate_coef(tf.test.TestCase):
11 |
12 | def test_calculate_correct_coefficient(self):
13 | with self.test_session():
14 | f = tf.constant([1.0, 2.0, 3.0, 4.0])
15 | h = tf.constant([2.0, 3.0, 8.0, 1.0])
16 | self.assertAllClose(get_control_variate_coef(f, h).eval(),
17 | 0.03448276)
18 |
19 | if __name__ == '__main__':
20 | tf.test.main()
21 |
--------------------------------------------------------------------------------
/tests/util/get_parents_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Bernoulli, Normal
8 | from edward.util import get_parents
9 |
10 |
11 | class test_get_parents_class(tf.test.TestCase):
12 |
13 | def test_v_structure(self):
14 | """a -> b -> e <- d <- c"""
15 | with self.test_session():
16 | a = Normal(0.0, 1.0)
17 | b = Normal(a, 1.0)
18 | c = Normal(0.0, 1.0)
19 | d = Normal(c, 1.0)
20 | e = Normal(b * d, 1.0)
21 | self.assertEqual(get_parents(a), [])
22 | self.assertEqual(get_parents(b), [a])
23 | self.assertEqual(get_parents(c), [])
24 | self.assertEqual(get_parents(d), [c])
25 | self.assertEqual(set(get_parents(e)), set([b, d]))
26 |
27 | def test_a_structure(self):
28 | """e <- d <- a -> b -> c"""
29 | with self.test_session():
30 | a = Normal(0.0, 1.0)
31 | b = Normal(a, 1.0)
32 | c = Normal(b, 1.0)
33 | d = Normal(a, 1.0)
34 | e = Normal(d, 1.0)
35 | self.assertEqual(get_parents(a), [])
36 | self.assertEqual(get_parents(b), [a])
37 | self.assertEqual(get_parents(c), [b])
38 | self.assertEqual(get_parents(d), [a])
39 | self.assertEqual(get_parents(e), [d])
40 |
41 | def test_chain_structure(self):
42 | """a -> b -> c -> d -> e"""
43 | with self.test_session():
44 | a = Normal(0.0, 1.0)
45 | b = Normal(a, 1.0)
46 | c = Normal(b, 1.0)
47 | d = Normal(c, 1.0)
48 | e = Normal(d, 1.0)
49 | self.assertEqual(get_parents(a), [])
50 | self.assertEqual(get_parents(b), [a])
51 | self.assertEqual(get_parents(c), [b])
52 | self.assertEqual(get_parents(d), [c])
53 | self.assertEqual(get_parents(e), [d])
54 |
55 | def test_tensor(self):
56 | with self.test_session():
57 | a = Normal(0.0, 1.0)
58 | b = tf.constant(2.0)
59 | c = a + b
60 | d = Normal(c, 1.0)
61 | self.assertEqual(get_parents(a), [])
62 | self.assertEqual(get_parents(b), [])
63 | self.assertEqual(get_parents(c), [a])
64 | self.assertEqual(get_parents(d), [a])
65 |
66 | def test_control_flow(self):
67 | with self.test_session():
68 | a = Bernoulli(0.5)
69 | b = Normal(0.0, 1.0)
70 | c = tf.constant(0.0)
71 | d = tf.cond(tf.cast(a, tf.bool), lambda: b, lambda: c)
72 | e = Normal(d, 1.0)
73 | self.assertEqual(get_parents(a), [])
74 | self.assertEqual(get_parents(b), [])
75 | self.assertEqual(get_parents(c), [])
76 | self.assertEqual(set(get_parents(d)), set([a, b]))
77 | self.assertEqual(set(get_parents(e)), set([a, b]))
78 |
79 | def test_scan(self):
80 | """copied from test_chain_structure"""
81 | def cumsum(x):
82 | return tf.scan(lambda a, x: a + x, x)
83 |
84 | with self.test_session():
85 | a = Normal(tf.ones([3]), tf.ones([3]))
86 | b = Normal(cumsum(a), tf.ones([3]))
87 | c = Normal(cumsum(b), tf.ones([3]))
88 | d = Normal(cumsum(c), tf.ones([3]))
89 | e = Normal(cumsum(d), tf.ones([3]))
90 | self.assertEqual(get_parents(a), [])
91 | self.assertEqual(get_parents(b), [a])
92 | self.assertEqual(get_parents(c), [b])
93 | self.assertEqual(get_parents(d), [c])
94 | self.assertEqual(get_parents(e), [d])
95 |
96 | if __name__ == '__main__':
97 | tf.test.main()
98 |
--------------------------------------------------------------------------------
/tests/util/get_siblings_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Bernoulli, Normal
8 | from edward.util import get_siblings
9 |
10 |
11 | class test_get_siblings_class(tf.test.TestCase):
12 |
13 | def test_v_structure(self):
14 | """a -> b -> e <- d <- c"""
15 | with self.test_session():
16 | a = Normal(0.0, 1.0)
17 | b = Normal(a, 1.0)
18 | c = Normal(0.0, 1.0)
19 | d = Normal(c, 1.0)
20 | e = Normal(b * d, 1.0)
21 | self.assertEqual(get_siblings(a), [])
22 | self.assertEqual(get_siblings(b), [])
23 | self.assertEqual(get_siblings(c), [])
24 | self.assertEqual(get_siblings(d), [])
25 | self.assertEqual(get_siblings(e), [])
26 |
27 | def test_a_structure(self):
28 | """e <- d <- a -> b -> c"""
29 | with self.test_session():
30 | a = Normal(0.0, 1.0)
31 | b = Normal(a, 1.0)
32 | c = Normal(b, 1.0)
33 | d = Normal(a, 1.0)
34 | e = Normal(d, 1.0)
35 | self.assertEqual(get_siblings(a), [])
36 | self.assertEqual(get_siblings(b), [d])
37 | self.assertEqual(get_siblings(c), [])
38 | self.assertEqual(get_siblings(d), [b])
39 | self.assertEqual(get_siblings(e), [])
40 |
41 | def test_chain_structure(self):
42 | """a -> b -> c -> d -> e"""
43 | with self.test_session():
44 | a = Normal(0.0, 1.0)
45 | b = Normal(a, 1.0)
46 | c = Normal(b, 1.0)
47 | d = Normal(c, 1.0)
48 | e = Normal(d, 1.0)
49 | self.assertEqual(get_siblings(a), [])
50 | self.assertEqual(get_siblings(b), [])
51 | self.assertEqual(get_siblings(c), [])
52 | self.assertEqual(get_siblings(d), [])
53 | self.assertEqual(get_siblings(e), [])
54 |
55 | def test_tensor(self):
56 | with self.test_session():
57 | a = Normal(0.0, 1.0)
58 | b = tf.constant(2.0)
59 | c = a + b
60 | d = Normal(c, 1.0)
61 | self.assertEqual(get_siblings(a), [])
62 | self.assertEqual(get_siblings(b), [])
63 | self.assertEqual(get_siblings(c), [d])
64 | self.assertEqual(get_siblings(d), [])
65 |
66 | def test_control_flow(self):
67 | with self.test_session():
68 | a = Bernoulli(0.5)
69 | b = Normal(0.0, 1.0)
70 | c = tf.constant(0.0)
71 | d = tf.cond(tf.cast(a, tf.bool), lambda: b, lambda: c)
72 | e = Normal(d, 1.0)
73 | self.assertEqual(get_siblings(a), [])
74 | self.assertEqual(get_siblings(b), [])
75 | self.assertEqual(get_siblings(c), [])
76 | self.assertEqual(get_siblings(d), [e])
77 | self.assertEqual(get_siblings(e), [])
78 |
79 | def test_scan(self):
80 | """copied from test_a_structure"""
81 | def cumsum(x):
82 | return tf.scan(lambda a, x: a + x, x)
83 |
84 | with self.test_session():
85 | a = Normal(tf.ones([3]), tf.ones([3]))
86 | b = Normal(cumsum(a), tf.ones([3]))
87 | c = Normal(cumsum(b), tf.ones([3]))
88 | d = Normal(cumsum(a), tf.ones([3]))
89 | e = Normal(cumsum(d), tf.ones([3]))
90 | self.assertEqual(get_siblings(a), [])
91 | self.assertEqual(get_siblings(b), [d])
92 | self.assertEqual(get_siblings(c), [])
93 | self.assertEqual(get_siblings(d), [b])
94 | self.assertEqual(get_siblings(e), [])
95 |
96 | if __name__ == '__main__':
97 | tf.test.main()
98 |
--------------------------------------------------------------------------------
/tests/util/get_variables_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Bernoulli, Normal
8 | from edward.util import get_variables
9 |
10 |
11 | class test_get_variables_class(tf.test.TestCase):
12 |
13 | def test_v_structure(self):
14 | """a -> b -> e <- d <- c"""
15 | with self.test_session():
16 | a = tf.Variable(0.0)
17 | b = Normal(a, 1.0)
18 | c = tf.Variable(0.0)
19 | d = Normal(c, 1.0)
20 | e = Normal(b * d, 1.0)
21 | self.assertEqual(get_variables(a), [])
22 | self.assertEqual(get_variables(b), [a])
23 | self.assertEqual(get_variables(c), [])
24 | self.assertEqual(get_variables(d), [c])
25 | self.assertEqual(set(get_variables(e)), set([a, c]))
26 |
27 | def test_a_structure(self):
28 | """e <- d <- a -> b -> c"""
29 | with self.test_session():
30 | a = tf.Variable(0.0)
31 | b = Normal(a, 1.0)
32 | c = Normal(b, 1.0)
33 | d = Normal(a, 1.0)
34 | e = Normal(d, 1.0)
35 | self.assertEqual(get_variables(a), [])
36 | self.assertEqual(get_variables(b), [a])
37 | self.assertEqual(get_variables(c), [a])
38 | self.assertEqual(get_variables(d), [a])
39 | self.assertEqual(get_variables(e), [a])
40 |
41 | def test_chain_structure(self):
42 | """a -> b -> c -> d -> e"""
43 | with self.test_session():
44 | a = tf.Variable(0.0)
45 | b = tf.Variable(a)
46 | c = Normal(b, 1.0)
47 | self.assertEqual(get_variables(a), [])
48 | self.assertEqual(get_variables(b), [])
49 | self.assertEqual(get_variables(c), [b])
50 |
51 | def test_tensor(self):
52 | with self.test_session():
53 | a = tf.Variable(0.0)
54 | b = tf.constant(2.0)
55 | c = a + b
56 | d = tf.Variable(a)
57 | self.assertEqual(get_variables(a), [])
58 | self.assertEqual(get_variables(b), [])
59 | self.assertEqual(get_variables(c), [a])
60 | self.assertEqual(get_variables(d), [])
61 |
62 | def test_control_flow(self):
63 | with self.test_session():
64 | a = Bernoulli(0.5)
65 | b = tf.Variable(0.0)
66 | c = tf.constant(0.0)
67 | d = tf.cond(tf.cast(a, tf.bool), lambda: b, lambda: c)
68 | e = Normal(d, 1.0)
69 | self.assertEqual(get_variables(d), [b])
70 | self.assertEqual(get_variables(e), [b])
71 |
72 | def test_scan(self):
73 | with self.test_session():
74 | b = tf.Variable(0.0)
75 | op = tf.scan(lambda a, x: a + b + x, tf.constant([2.0, 3.0, 1.0]))
76 |
77 | self.assertEqual(get_variables(op), [b])
78 |
79 | def test_scan_with_a_structure(self):
80 | """copied from test_a_structure"""
81 | def cumsum(x):
82 | return tf.scan(lambda a, x: a + x, x)
83 |
84 | with self.test_session():
85 | a = tf.Variable([1.0, 1.0, 1.0])
86 | b = Normal(cumsum(a), tf.ones([3]))
87 | c = Normal(cumsum(b), tf.ones([3]))
88 | d = Normal(cumsum(a), tf.ones([3]))
89 | e = Normal(cumsum(d), tf.ones([3]))
90 | self.assertEqual(get_variables(a), [])
91 | self.assertEqual(get_variables(b), [a])
92 | self.assertEqual(get_variables(c), [a])
93 | self.assertEqual(get_variables(d), [a])
94 | self.assertEqual(get_variables(e), [a])
95 |
96 | if __name__ == '__main__':
97 | tf.test.main()
98 |
--------------------------------------------------------------------------------
/tests/util/is_independent_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import tensorflow as tf
6 |
7 | from edward.models import Normal
8 | from edward.util import is_independent
9 |
10 |
11 | class test_is_independent_class(tf.test.TestCase):
12 |
13 | def test_chain_structure(self):
14 | """a -> b -> c -> d -> e"""
15 | a = Normal(0.0, 1.0)
16 | b = Normal(a, 1.0)
17 | c = Normal(b, 1.0)
18 | d = Normal(c, 1.0)
19 | e = Normal(d, 1.0)
20 | self.assertTrue(is_independent(c, e, d))
21 | self.assertTrue(is_independent([a, b, c], e, d))
22 | self.assertTrue(is_independent([a, b], [d, e], c))
23 | self.assertFalse(is_independent([a, b, e], d, c))
24 |
25 | def test_binary_structure(self):
26 | """f <- c <- a -> b -> d
27 | | |
28 | v v
29 | g e
30 | """
31 | a = Normal(0.0, 1.0)
32 | b = Normal(a, 1.0)
33 | c = Normal(a, 1.0)
34 | d = Normal(b, 1.0)
35 | e = Normal(b, 1.0)
36 | f = Normal(c, 1.0)
37 | g = Normal(c, 1.0)
38 | self.assertFalse(is_independent(b, c))
39 | self.assertTrue(is_independent(b, c, a))
40 | self.assertTrue(is_independent(d, [a, c, e, f, g], b))
41 | self.assertFalse(is_independent(b, [e, d], a))
42 | self.assertFalse(is_independent(a, [b, c, d, e, f, g]))
43 |
44 | def test_grid_structure(self):
45 | """a -> b -> c
46 | | | |
47 | v v v
48 | d -> e -> f
49 | """
50 | a = Normal(0.0, 1.0)
51 | b = Normal(a, 1.0)
52 | c = Normal(b, 1.0)
53 | d = Normal(a, 1.0)
54 | e = Normal(b + d, 1.0)
55 | f = Normal(e + c, 1.0)
56 | self.assertFalse(is_independent(f, [a, b, d]))
57 | self.assertTrue(is_independent(f, [a, b, d], [e, c]))
58 | self.assertTrue(is_independent(e, [a, c], [b, d]))
59 | self.assertFalse(is_independent(e, f, [b, d]))
60 | self.assertFalse(is_independent(e, f, [a, b, c, d]))
61 |
62 | if __name__ == '__main__':
63 | tf.test.main()
64 |
--------------------------------------------------------------------------------
/tests/util/random_variables_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.util.random_variables import compute_multinomial_mode
9 |
10 |
11 | class test_compute_multinomial_mode(tf.test.TestCase):
12 |
13 | RANDOM_SEED = 12345
14 |
15 | def test_correct_mode_computed_with_uniform_probabilities(self):
16 | with self.test_session():
17 | probs = np.array(3 * [1 / 3.0])
18 | total_count = 5
19 | self.assertAllEqual(
20 | compute_multinomial_mode(probs, total_count, seed=self.RANDOM_SEED),
21 | np.array([1, 2, 2]))
22 | probs = np.array([0.6, 0.4, 0.0])
23 | total_count = 5
24 | self.assertAllEqual(
25 | compute_multinomial_mode(probs, total_count, seed=self.RANDOM_SEED),
26 | np.array([2, 2, 1]))
27 |
28 | if __name__ == '__main__':
29 | tf.test.main()
30 |
--------------------------------------------------------------------------------
/tests/util/to_simplex_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from edward.util import to_simplex
9 |
10 |
11 | class test_to_simplex_class(tf.test.TestCase):
12 |
13 | def test_to_simplex_1d(self):
14 | with self.test_session():
15 | x = tf.constant([0.0])
16 | self.assertAllClose(to_simplex(x).eval(),
17 | [0.5, 0.5])
18 | x = tf.constant([0.0, 10.0])
19 | self.assertAllClose(to_simplex(x).eval(),
20 | [3.333333e-01, 6.666363e-01, 3.027916e-05])
21 |
22 | def test_to_simplex_2d(self):
23 | with self.test_session():
24 | x = tf.constant([[0.0], [0.0]])
25 | self.assertAllClose(to_simplex(x).eval(),
26 | [[0.5, 0.5], [0.5, 0.5]])
27 | x = tf.constant([[0.0, 10.0], [0.0, 10.0]])
28 | self.assertAllClose(to_simplex(x).eval(),
29 | [[3.333333e-01, 6.666363e-01, 3.027916e-05],
30 | [3.333333e-01, 6.666363e-01, 3.027916e-05]])
31 |
32 | def test_all_finite_raises(self):
33 | with self.test_session():
34 | x = tf.constant([12.5, np.inf])
35 | with self.assertRaisesOpError('Inf'):
36 | to_simplex(x).eval()
37 | x = tf.constant([12.5, np.nan])
38 | with self.assertRaisesOpError('NaN'):
39 | to_simplex(x).eval()
40 |
41 | if __name__ == '__main__':
42 | tf.test.main()
43 |
--------------------------------------------------------------------------------
/tests/util/transform_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | from __future__ import division
3 | from __future__ import print_function
4 |
5 | import edward as ed
6 | import numpy as np
7 | import tensorflow as tf
8 |
9 | from collections import namedtuple
10 | from edward.models import (
11 | Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag,
12 | Normal, Poisson, TransformedDistribution)
13 | from tensorflow.contrib.distributions import bijectors
14 |
15 |
16 | class test_transform_class(tf.test.TestCase):
17 |
18 | def assertSamplePosNeg(self, sample):
19 | num_pos = np.sum((sample > 0.0), axis=0, keepdims=True)
20 | num_neg = np.sum((sample < 0.0), axis=0, keepdims=True)
21 | self.assertTrue((num_pos > 0).all())
22 | self.assertTrue((num_neg > 0).all())
23 |
24 | def test_args(self):
25 | with self.test_session():
26 | x = Normal(-100.0, 1.0)
27 | y = ed.transform(x, bijectors.Softplus())
28 | sample = y.sample(10).eval()
29 | self.assertTrue((sample >= 0.0).all())
30 |
31 | def test_kwargs(self):
32 | with self.test_session():
33 | x = Normal(-100.0, 1.0)
34 | y = ed.transform(x, bijector=bijectors.Softplus())
35 | sample = y.sample(10).eval()
36 | self.assertTrue((sample >= 0.0).all())
37 |
38 | def test_01(self):
39 | with self.test_session():
40 | x = Beta(1.0, 1.0)
41 | y = ed.transform(x)
42 | self.assertIsInstance(y, TransformedDistribution)
43 | sample = y.sample(10, seed=1).eval()
44 | self.assertSamplePosNeg(sample)
45 |
46 | def test_nonnegative(self):
47 | with self.test_session():
48 | x = Gamma(1.0, 1.0)
49 | y = ed.transform(x)
50 | self.assertIsInstance(y, TransformedDistribution)
51 | sample = y.sample(10, seed=1).eval()
52 | self.assertSamplePosNeg(sample)
53 |
54 | def test_simplex(self):
55 | with self.test_session():
56 | x = Dirichlet([1.1, 1.2, 1.3, 1.4])
57 | y = ed.transform(x)
58 | self.assertIsInstance(y, TransformedDistribution)
59 | sample = y.sample(10, seed=1).eval()
60 | self.assertSamplePosNeg(sample)
61 |
62 | def test_real(self):
63 | with self.test_session():
64 | x = Normal(0.0, 1.0)
65 | y = ed.transform(x)
66 | self.assertIsInstance(y, Normal)
67 | sample = y.sample(10, seed=1).eval()
68 | self.assertSamplePosNeg(sample)
69 |
70 | def test_multivariate_real(self):
71 | with self.test_session():
72 | x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2))
73 | y = ed.transform(x)
74 | sample = y.sample(10, seed=1).eval()
75 | self.assertSamplePosNeg(sample)
76 |
77 | def test_no_support(self):
78 | with self.test_session():
79 | x = DirichletProcess(1.0, Normal(0.0, 1.0))
80 | with self.assertRaises(AttributeError):
81 | y = ed.transform(x)
82 |
83 | def test_unhandled_support(self):
84 | with self.test_session():
85 | FakeRV = namedtuple('FakeRV', ['support'])
86 | x = FakeRV(support='rational')
87 | with self.assertRaises(ValueError):
88 | y = ed.transform(x)
89 |
90 | if __name__ == '__main__':
91 | tf.test.main()
92 |
--------------------------------------------------------------------------------