├── .dockerignore ├── .gitignore ├── Dockerfile ├── README.md ├── bo.ipynb ├── bo ├── gen_latent.py ├── run_bo.py └── run_experiment.sh ├── configs ├── mnist │ ├── N_N.ini │ └── U_U.ini ├── moses │ ├── DD-VAE_gaussian_seed1.ini │ ├── DD-VAE_gaussian_seed2.ini │ ├── DD-VAE_gaussian_seed3.ini │ ├── DD-VAE_triweight_seed1.ini │ ├── DD-VAE_triweight_seed2.ini │ ├── DD-VAE_triweight_seed3.ini │ ├── VAE_gaussian_seed1.ini │ ├── VAE_gaussian_seed2.ini │ ├── VAE_gaussian_seed3.ini │ ├── VAE_triweight_seed1.ini │ ├── VAE_triweight_seed2.ini │ └── VAE_triweight_seed3.ini ├── synthetic │ ├── N_N.ini │ ├── U_T.ini │ └── U_U.ini └── zinc │ ├── dd_vae_gaussian.ini │ ├── dd_vae_tricube.ini │ ├── vae_gaussian.ini │ └── vae_tricube.ini ├── data ├── moses │ ├── test.csv.gz │ ├── test_scaffolds.csv.gz │ ├── test_scaffolds_stats.npz │ ├── test_stats.npz │ └── train.csv.gz ├── synthetic │ └── 2d_map_0.2.csv.gz └── zinc │ ├── 250k_rndm_zinc_drugs_clean.smi.gz │ ├── test.csv.gz │ ├── train.csv.gz │ └── valid.csv.gz ├── dd_vae ├── __init__.py ├── bo │ ├── __init__.py │ ├── gauss.py │ ├── psd_theano.py │ ├── sparse_gp.py │ ├── sparse_gp_theano_internal.py │ └── utils.py ├── proposals.py ├── utils.py ├── vae_base.py ├── vae_mnist.py └── vae_rnn.py ├── illustrations.ipynb ├── images ├── .DS_Store ├── kernels.pdf ├── mnist │ ├── latent_N_N.png │ └── latent_U_U.png ├── moses_FCD.pdf ├── moses_SNN.pdf ├── smoothed_indicator.pdf ├── synthetic │ ├── N_N.png │ ├── U_T.png │ └── U_U.png └── zinc │ ├── DD_VAE_GAUSSIAN_molecule_0.pdf │ ├── DD_VAE_GAUSSIAN_molecule_1.pdf │ ├── DD_VAE_GAUSSIAN_molecule_2.pdf │ ├── DD_VAE_GAUSSIAN_top50_molecules.pdf │ ├── DD_VAE_TRICUBE_molecule_0.pdf │ ├── DD_VAE_TRICUBE_molecule_1.pdf │ ├── DD_VAE_TRICUBE_molecule_2.pdf │ ├── DD_VAE_TRICUBE_top50_molecules.pdf │ ├── VAE_GAUSSIAN_molecule_0.pdf │ ├── VAE_GAUSSIAN_molecule_1.pdf │ ├── VAE_GAUSSIAN_molecule_2.pdf │ ├── VAE_GAUSSIAN_top50_molecules.pdf │ ├── VAE_TRICUBE_molecule_0.pdf │ ├── VAE_TRICUBE_molecule_1.pdf │ ├── VAE_TRICUBE_molecule_2.pdf │ └── VAE_TRICUBE_top50_molecules.pdf ├── mnist.ipynb ├── moses_plots.ipynb ├── moses_prepare_metrics.ipynb ├── setup.py ├── synthetic.ipynb ├── train.py └── unit_test.py /.dockerignore: -------------------------------------------------------------------------------- 1 | models/ 2 | metrics/ 3 | logs/ 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | data/mnist 107 | models/ 108 | logs/ 109 | metrics/ 110 | bo/results/ 111 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04 2 | 3 | RUN mkdir -p /code 4 | 5 | RUN set -ex \ 6 | && apt-get update \ 7 | && apt-get install -y git vim less wget \ 8 | tmux libxrender1 libxext6 9 | 10 | RUN set -ex \ 11 | && wget https://repo.continuum.io/miniconda/Miniconda3-4.7.10-Linux-x86_64.sh \ 12 | && /bin/bash Miniconda3-4.7.10-Linux-x86_64.sh -f -b -p /opt/miniconda 13 | 14 | ENV PATH /opt/miniconda/bin:$PATH 15 | 16 | RUN conda install -y numpy=1.17.2 \ 17 | scipy=1.3.1 \ 18 | scikit-learn=0.20.3 \ 19 | matplotlib=3.1.1 \ 20 | pandas=0.25.1 \ 21 | notebook=6.0.0 \ 22 | networkx=2.3 \ 23 | ipywidgets=7.5.1 24 | 25 | RUN conda install -y -c pytorch cudatoolkit=9.0 pytorch=1.1.0 torchvision=0.2.1 26 | 27 | RUN conda install -y -c rdkit rdkit=2019.03.4 28 | 29 | RUN pip install Theano==1.0.4 molsets==0.2 tensorboardX==1.9 cairosvg==2.4.2 tqdm==4.42.0 30 | 31 | ADD . /code/dd_vae 32 | 33 | RUN cd /code/dd_vae && python setup.py install 34 | 35 | CMD [ "/bin/bash" ] 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deterministic Decoding for Discrete Data in Variational Autoencoders 2 | 3 | Variational autoencoders are prominent generative models for modeling discrete data. However, with flexible decoders, they tend to ignore the latent codes. In this paper, we study a VAE model with a deterministic decoder (DD-VAE) for sequential data that selects the highest-scoring tokens instead of sampling. Deterministic decoding solely relies on latent codes as the only way to produce diverse objects, which improves the structure of the learned manifold. To implement DD-VAE, we propose a new class of bounded support proposal distributions and derive Kullback-Leibler divergence for Gaussian and uniform priors. We also study a continuous relaxation of deterministic decoding objective function and analyze the relation of reconstruction accuracy and relaxation parameters. We demonstrate the performance of DD-VAE on multiple datasets, including molecular generation and optimization problems. 4 | 5 | For more details, please refer to the [full paper](https://arxiv.org/abs/2003.02174). 6 | 7 | ### Repository 8 | In this repository, we provide all code and data that is necessary to reproduce all the results from the paper. To reproduce the experiments, we recommend using Docker image built using a provided `Dockerfile`: 9 | ```{bash} 10 | nvidia-docker build -t dd_vae . 11 | nvidia-docker run -it --shm-size 10G --network="host" --name dd_vae -w=/code/dd_vae dd_vae 12 | ``` 13 | All the code will be available inside `/code/dd_vae` folder. For more details on using Docker, please refer to [Docker manual](https://docs.docker.com/) 14 | 15 | You can also install `dd_vae` locally by running `python setup.py install` command. 16 | 17 | ### Reproducing the experiments 18 | You can train any model using `train.py` script. This scripts takes only two arguments: `--config` (path to .ini file that sets up the experiment) and `--device` (PyTorch-style device naiming such as `cuda:0`). We provide all configuration files in `configs/` folder. For each experiment we provide a separate Jupyter Notebook, where you will find further instructions to reproduce the experiments: 19 | * [Synthetic](./synthetic.ipynb) 20 | * [MNIST](./mnist.ipynb) 21 | * [MOSES (metrics)](./moses_prepare_metrics.ipynb), [MOSES (plots)](./moses_plots.ipynb) 22 | * [ZINC](./bo.ipynb) 23 | 24 | ### How to cite 25 | ``` 26 | @InProceedings{pmlr-v108-polykovskiy20a, 27 | title = {Deterministic Decoding for Discrete Data in Variational Autoencoders}, 28 | author = {Polykovskiy, Daniil and Vetrov, Dmitry}, 29 | booktitle = {Proceedings of the Twenty Third International Conference on Artificial Intelligence and Statistics}, 30 | pages = {3046--3056}, 31 | year = {2020}, 32 | editor = {Silvia Chiappa and Roberto Calandra}, 33 | volume = {108}, 34 | series = {Proceedings of Machine Learning Research}, address = {Online}, 35 | month = {26--28 Aug}, 36 | publisher = {PMLR} 37 | } 38 | ``` 39 | -------------------------------------------------------------------------------- /bo/gen_latent.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import sys 4 | from functools import partial 5 | 6 | import numpy as np 7 | import pandas as pd 8 | import rdkit 9 | from dd_vae.bo.utils import max_ring_penalty 10 | from dd_vae.utils import collate, StringDataset, batch_to_device 11 | from dd_vae.vae_rnn import VAE_RNN 12 | from moses.metrics import SA 13 | from rdkit import Chem 14 | from rdkit.Chem import Descriptors 15 | from torch.utils.data import DataLoader 16 | from tqdm.auto import tqdm 17 | rdkit.rdBase.DisableLog('rdApp.*') 18 | 19 | 20 | def load_csv(path): 21 | if path.endswith('.gz'): 22 | df = pd.read_csv(path, compression='gzip', 23 | dtype='str', header=None) 24 | return list(df[0].values) 25 | return [x.strip() for x in open(path)] 26 | 27 | 28 | parser = argparse.ArgumentParser() 29 | parser.add_argument("--data", type=str, required=True) 30 | parser.add_argument("--model", type=str, required=True) 31 | parser.add_argument("--device", type=str, default="cpu") 32 | parser.add_argument("--save_dir", type=str, required=True) 33 | args = parser.parse_args(sys.argv[1:]) 34 | 35 | model = VAE_RNN.load(args.model) 36 | model = model.to(args.device) 37 | 38 | smiles = load_csv(args.data) 39 | 40 | logP_values = [] 41 | latent_points = [] 42 | cycle_scores = [] 43 | SA_scores = [] 44 | 45 | print("Preparing dataset...") 46 | collate_pad = partial(collate, pad=model.vocab.pad, return_data=True) 47 | dataset = StringDataset(model.vocab, smiles) 48 | data_loader = DataLoader(dataset, collate_fn=collate_pad, 49 | batch_size=512, shuffle=False) 50 | print("Getting latent codes...") 51 | for batch in tqdm(data_loader): 52 | z = model.encode(batch_to_device(batch[:-1], args.device)) 53 | mu, _ = model.get_mu_std(z) 54 | latent_points.append(mu.detach().cpu().numpy()) 55 | romol = [Chem.MolFromSmiles(x.strip()) for x in batch[-1]] 56 | logP_values.extend([Descriptors.MolLogP(m) for m in romol]) 57 | SA_scores.extend([-SA(m) for m in romol]) 58 | cycle_scores.extend([max_ring_penalty(m) for m in romol]) 59 | 60 | SA_scores = np.array(SA_scores) 61 | logP_values = np.array(logP_values) 62 | cycle_scores = np.array(cycle_scores) 63 | 64 | SA_scores_normalized = (SA_scores - SA_scores.mean()) / SA_scores.std() 65 | logP_values_normalized = (logP_values - logP_values.mean()) / logP_values.std() 66 | cycle_scores_normalized = ( 67 | cycle_scores - cycle_scores.mean()) / cycle_scores.std() 68 | 69 | latent_points = np.vstack(latent_points) 70 | 71 | targets = (SA_scores_normalized + 72 | logP_values_normalized + 73 | cycle_scores_normalized) 74 | os.makedirs(args.save_dir, exist_ok=True) 75 | np.savez_compressed(os.path.join(args.save_dir, 'features.npz'), 76 | latent_points=latent_points, 77 | targets=targets, logP_values=logP_values, 78 | SA_scores=SA_scores, cycle_scores=cycle_scores) 79 | -------------------------------------------------------------------------------- /bo/run_bo.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import gzip 3 | import os 4 | import pickle 5 | import sys 6 | 7 | import numpy as np 8 | import rdkit 9 | import scipy.stats as sps 10 | import torch 11 | from dd_vae.bo.sparse_gp import SparseGP 12 | from dd_vae.bo.utils import max_ring_penalty 13 | from dd_vae.utils import prepare_seed 14 | from dd_vae.vae_rnn import VAE_RNN 15 | from moses.metrics import SA 16 | from rdkit import Chem 17 | from rdkit.Chem import Descriptors 18 | from rdkit.Chem import MolFromSmiles 19 | 20 | rdkit.rdBase.DisableLog('rdApp.*') 21 | 22 | 23 | # We define the functions used to load and save objects 24 | def save_object(obj, filename): 25 | result = pickle.dumps(obj) 26 | with gzip.GzipFile(filename, 'wb') as dest: 27 | dest.write(result) 28 | dest.close() 29 | 30 | 31 | def load_object(filename): 32 | with gzip.GzipFile(filename, 'rb') as source: 33 | result = source.read() 34 | ret = pickle.loads(result) 35 | source.close() 36 | return ret 37 | 38 | 39 | parser = argparse.ArgumentParser() 40 | 41 | parser.add_argument("--model", type=str, required=True) 42 | parser.add_argument("--save_dir", type=str, required=True) 43 | parser.add_argument("--device", type=str, default="cpu") 44 | parser.add_argument("--seed", type=int, default=777) 45 | parser.add_argument("--load_dir", type=str, required=True) 46 | args = parser.parse_args(sys.argv[1:]) 47 | 48 | prepare_seed(args.seed) 49 | 50 | model = VAE_RNN.load(args.model).to(args.device) 51 | 52 | # We load the data (y is minued!) 53 | data = np.load(os.path.join(args.load_dir, 'features.npz')) 54 | X = data['latent_points'] 55 | y = -data['targets'] 56 | y = y.reshape((-1, 1)) 57 | 58 | n = X.shape[0] 59 | 60 | permutation = np.random.choice(n, n, replace=False) 61 | 62 | X_train = X[permutation, :][0: np.int(np.round(0.9 * n)), :] 63 | X_test = X[permutation, :][np.int(np.round(0.9 * n)):, :] 64 | 65 | y_train = y[permutation][0: np.int(np.round(0.9 * n))] 66 | y_test = y[permutation][np.int(np.round(0.9 * n)):] 67 | 68 | np.random.seed(args.seed) 69 | 70 | logP_values = data['logP_values'] 71 | SA_scores = data['SA_scores'] 72 | cycle_scores = data['cycle_scores'] 73 | SA_scores_normalized = (np.array(SA_scores) - np.mean(SA_scores)) / np.std( 74 | SA_scores) 75 | logP_values_normalized = (np.array(logP_values) - np.mean( 76 | logP_values)) / np.std(logP_values) 77 | cycle_scores_normalized = (np.array(cycle_scores) - np.mean( 78 | cycle_scores)) / np.std(cycle_scores) 79 | 80 | iteration = 0 81 | while iteration < 5: 82 | # We fit the GP 83 | np.random.seed(iteration * args.seed) 84 | M = 500 85 | sgp = SparseGP(X_train, 0 * X_train, y_train, M) 86 | sgp.train_via_ADAM(X_train, 0 * X_train, y_train, X_test, X_test * 0, 87 | y_test, minibatch_size=10 * M, max_iterations=100, 88 | learning_rate=0.001) 89 | 90 | pred, uncert = sgp.predict(X_test, 0 * X_test) 91 | error = np.sqrt(np.mean((pred - y_test) ** 2)) 92 | testll = np.mean(sps.norm.logpdf(pred - y_test, scale=np.sqrt(uncert))) 93 | print('Test RMSE:', error) 94 | print('Test ll:', testll) 95 | 96 | pred, uncert = sgp.predict(X_train, 0 * X_train) 97 | error = np.sqrt(np.mean((pred - y_train) ** 2)) 98 | trainll = np.mean(sps.norm.logpdf(pred - y_train, scale=np.sqrt(uncert))) 99 | print('Train RMSE:', error) 100 | print('Train ll:', trainll) 101 | 102 | # We pick the next 60 inputs 103 | iters = 60 104 | next_inputs = sgp.batched_greedy_ei(iters, np.min(X_train, 0), 105 | np.max(X_train, 0)) 106 | valid_smiles = [] 107 | new_features = [] 108 | for i in range(iters): 109 | all_vec = next_inputs[i].reshape((1, -1)) 110 | smiles = model.sample(1, z=torch.tensor(all_vec).float())[0] 111 | mol = Chem.MolFromSmiles(smiles) 112 | if mol is None: 113 | continue 114 | err = Chem.SanitizeMol(mol, catchErrors=True) 115 | if err != 0: 116 | continue 117 | valid_smiles.append(smiles) 118 | new_features.append(all_vec) 119 | 120 | valid_smiles = valid_smiles[:50] 121 | if len(new_features) != 0: 122 | new_features = np.vstack(new_features)[:50] 123 | else: 124 | new_features = np.zeros((0, X_train.shape[1])) 125 | os.makedirs(args.save_dir, exist_ok=True) 126 | save_object(valid_smiles, 127 | os.path.join(args.save_dir, 128 | "valid_smiles{}.dat".format(iteration))) 129 | 130 | scores = [] 131 | for i in range(len(valid_smiles)): 132 | mol = MolFromSmiles(valid_smiles[i]) 133 | current_log_P_value = Descriptors.MolLogP(mol) 134 | current_SA_score = -SA(mol) 135 | current_cycle_score = max_ring_penalty(mol) 136 | 137 | current_SA_score_normalized = (current_SA_score - np.mean( 138 | SA_scores)) / np.std(SA_scores) 139 | current_log_P_value_normalized = (current_log_P_value - np.mean( 140 | logP_values)) / np.std(logP_values) 141 | current_cycle_score_normalized = (current_cycle_score - np.mean( 142 | cycle_scores)) / np.std(cycle_scores) 143 | 144 | score = (current_SA_score_normalized + 145 | current_log_P_value_normalized + 146 | current_cycle_score_normalized) 147 | scores.append(-score) # target is always minused 148 | 149 | print(f"{len(valid_smiles)} molecules found. Scores: {scores}") 150 | save_object(scores, 151 | os.path.join(args.save_dir, "scores{}.dat".format(iteration))) 152 | 153 | if len(new_features) > 0: 154 | X_train = np.concatenate([X_train, new_features], 0) 155 | y_train = np.concatenate([y_train, np.array(scores)[:, None]], 0) 156 | 157 | iteration += 1 158 | -------------------------------------------------------------------------------- /bo/run_experiment.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | mkdir -p bo/results/$2/ 3 | python bo/gen_latent.py \ 4 | --data data/zinc/250k_rndm_zinc_drugs_clean.smi.gz \ 5 | --model $1 \ 6 | --device $3 --save_dir bo/results/$2/ 7 | 8 | for SEED in $(seq 1 10) 9 | do 10 | mkdir -p bo/results/$2/experiment\_$SEED/ 11 | python bo/run_bo.py \ 12 | --model $1 \ 13 | --save_dir bo/results/$2/experiment\_$SEED/ \ 14 | --device $3 \ 15 | --seed $SEED \ 16 | --load_dir bo/results/$2/ > bo/results/$2/experiment\_$SEED/log.txt & 17 | sleep 20 18 | done 19 | -------------------------------------------------------------------------------- /configs/mnist/N_N.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | layer_sizes = [256, 128, 32] 3 | latent_size = 2 4 | proposal = 'gaussian' 5 | prior = 'gaussian' 6 | 7 | [data] 8 | title = 'MNIST' 9 | 10 | [train] 11 | epochs = 150 12 | lr_reduce_epochs = 20 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-3 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'single' 19 | mode = 'sample' 20 | 21 | [kl] 22 | start_epoch = 4 23 | end_epoch = 100 24 | start = 1e-5 25 | end = 0.005 26 | 27 | [temperature] 28 | start_epoch = 0 29 | end_epoch = 100 30 | start = 0.01 31 | end = 0.001 32 | log = True 33 | 34 | [save] 35 | log_dir = 'logs/mnist/N_N/' 36 | model_dir = 'models/mnist/N_N/' 37 | -------------------------------------------------------------------------------- /configs/mnist/U_U.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | layer_sizes = [256, 128, 32] 3 | latent_size = 2 4 | proposal = 'uniform' 5 | prior = 'uniform' 6 | 7 | [data] 8 | title = 'MNIST' 9 | 10 | [train] 11 | epochs = 150 12 | lr_reduce_epochs = 20 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-3 15 | batch_size = 512 16 | verbose = 'epoch' 17 | checkpoint = 'single' 18 | mode = 'argmax' 19 | 20 | [kl] 21 | start_epoch = 4 22 | end_epoch = 100 23 | start = 1e-5 24 | end = 0.05 25 | 26 | [temperature] 27 | start_epoch = 0 28 | end_epoch = 100 29 | start = 0.01 30 | end = 0.001 31 | log = True 32 | 33 | [save] 34 | log_dir = 'logs/mnist/U_U/' 35 | model_dir = 'models/mnist/U_U/' 36 | -------------------------------------------------------------------------------- /configs/moses/DD-VAE_gaussian_seed1.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'gaussian' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'argmax' 20 | seed = 1 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0015 31 | end = 0.02 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/DD-VAE_gaussian_seed1/' 42 | model_dir = 'models/moses/DD-VAE_gaussian_seed1/' 43 | -------------------------------------------------------------------------------- /configs/moses/DD-VAE_gaussian_seed2.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'gaussian' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'argmax' 20 | seed = 2 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0015 31 | end = 0.02 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/DD-VAE_gaussian_seed2/' 42 | model_dir = 'models/moses/DD-VAE_gaussian_seed2/' 43 | -------------------------------------------------------------------------------- /configs/moses/DD-VAE_gaussian_seed3.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'gaussian' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'argmax' 20 | seed = 3 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0015 31 | end = 0.02 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/DD-VAE_gaussian_seed3/' 42 | model_dir = 'models/moses/DD-VAE_gaussian_seed3/' 43 | -------------------------------------------------------------------------------- /configs/moses/DD-VAE_triweight_seed1.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'triweight' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'argmax' 20 | seed = 1 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0015 31 | end = 0.02 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/DD-VAE_triweight_seed1/' 42 | model_dir = 'models/moses/DD-VAE_triweight_seed1/' 43 | -------------------------------------------------------------------------------- /configs/moses/DD-VAE_triweight_seed2.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'triweight' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'argmax' 20 | seed = 2 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0015 31 | end = 0.02 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/DD-VAE_triweight_seed2/' 42 | model_dir = 'models/moses/DD-VAE_triweight_seed2/' 43 | -------------------------------------------------------------------------------- /configs/moses/DD-VAE_triweight_seed3.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'triweight' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'argmax' 20 | seed = 3 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0015 31 | end = 0.02 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/DD-VAE_triweight_seed3/' 42 | model_dir = 'models/moses/DD-VAE_triweight_seed3/' 43 | -------------------------------------------------------------------------------- /configs/moses/VAE_gaussian_seed1.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'gaussian' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'sample' 20 | seed = 1 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0005 31 | end = 0.01 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/VAE_gaussian_seed1/' 42 | model_dir = 'models/moses/VAE_gaussian_seed1/' 43 | -------------------------------------------------------------------------------- /configs/moses/VAE_gaussian_seed2.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'gaussian' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'sample' 20 | seed = 2 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0005 31 | end = 0.01 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/VAE_gaussian_seed2/' 42 | model_dir = 'models/moses/VAE_gaussian_seed2/' 43 | -------------------------------------------------------------------------------- /configs/moses/VAE_gaussian_seed3.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'gaussian' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'sample' 20 | seed = 3 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0005 31 | end = 0.01 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/VAE_gaussian_seed3/' 42 | model_dir = 'models/moses/VAE_gaussian_seed3/' 43 | -------------------------------------------------------------------------------- /configs/moses/VAE_triweight_seed1.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'triweight' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'sample' 20 | seed = 1 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0005 31 | end = 0.01 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/VAE_triweight_seed1/' 42 | model_dir = 'models/moses/VAE_triweight_seed1/' 43 | -------------------------------------------------------------------------------- /configs/moses/VAE_triweight_seed2.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'triweight' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'sample' 20 | seed = 2 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0005 31 | end = 0.01 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/VAE_triweight_seed2/' 42 | model_dir = 'models/moses/VAE_triweight_seed2/' 43 | -------------------------------------------------------------------------------- /configs/moses/VAE_triweight_seed3.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 512 4 | latent_size = 64 5 | num_layers = 2 6 | proposal = 'triweight' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = [20] 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 10 18 | checkpoint = 'epoch' 19 | mode = 'sample' 20 | seed = 3 21 | 22 | [data] 23 | title = 'moses' 24 | train_path = 'data/moses/train.csv.gz' 25 | test_path = 'data/moses/test.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 20 29 | end_epoch = 200 30 | start = 0.0005 31 | end = 0.01 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 10 36 | start = 0.2 37 | end = 0.1 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/moses/VAE_triweight_seed3/' 42 | model_dir = 'models/moses/VAE_triweight_seed3/' 43 | -------------------------------------------------------------------------------- /configs/synthetic/N_N.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 8 3 | hidden_size = 128 4 | latent_size = 2 5 | num_layers = 2 6 | proposal = 'gaussian' 7 | prior = 'gaussian' 8 | 9 | [train] 10 | epochs = 100 11 | lr_reduce_epochs = 20 12 | lr_reduce_gamma = 0.5 13 | lr = 5e-3 14 | batch_size = 512 15 | verbose = 'epoch' 16 | clamp = 2 17 | checkpoint = 'single' 18 | fine_tune = 10 19 | mode = 'sample' 20 | 21 | [data] 22 | title = 'p0.2' 23 | train_path = 'data/synthetic/2d_map_0.2.csv.gz' 24 | 25 | [kl] 26 | start_epoch = 2 27 | end_epoch = 100 28 | start = 0 29 | end = 0.1 30 | 31 | [temperature] 32 | start_epoch = 0 33 | end_epoch = 100 34 | start = 1e-1 35 | end = 1e-3 36 | log = True 37 | 38 | [save] 39 | log_dir = 'logs/synthetic/normal_normal/' 40 | model_dir = 'models/synthetic/normal_normal/' -------------------------------------------------------------------------------- /configs/synthetic/U_T.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 8 3 | hidden_size = 128 4 | latent_size = 2 5 | num_layers = 2 6 | proposal = 'tricube' 7 | prior = 'uniform' 8 | 9 | [train] 10 | epochs = 100 11 | lr_reduce_epochs = 20 12 | lr_reduce_gamma = 0.5 13 | lr = 5e-3 14 | batch_size = 512 15 | verbose = 'epoch' 16 | clamp = 2 17 | checkpoint = 'single' 18 | fine_tune = 10 19 | mode = 'argmax' 20 | 21 | [data] 22 | title = 'p0.2' 23 | train_path = 'data/synthetic/2d_map_0.2.csv.gz' 24 | 25 | [kl] 26 | start_epoch = 2 27 | end_epoch = 100 28 | start = 0 29 | end = 0.1 30 | 31 | [temperature] 32 | start_epoch = 0 33 | end_epoch = 100 34 | start = 1e-1 35 | end = 1e-2 36 | log = True 37 | 38 | [save] 39 | log_dir = 'logs/synthetic/uniform_tricube/' 40 | model_dir = 'models/synthetic/uniform_tricube/' -------------------------------------------------------------------------------- /configs/synthetic/U_U.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 8 3 | hidden_size = 128 4 | latent_size = 2 5 | num_layers = 2 6 | proposal = 'uniform' 7 | prior = 'uniform' 8 | 9 | [train] 10 | epochs = 100 11 | lr_reduce_epochs = 20 12 | lr_reduce_gamma = 0.5 13 | lr = 5e-3 14 | batch_size = 512 15 | verbose = 'epoch' 16 | clamp = 2 17 | checkpoint = 'single' 18 | fine_tune = 10 19 | mode = 'argmax' 20 | 21 | [data] 22 | title = 'p0.2' 23 | train_path = 'data/synthetic/2d_map_0.2.csv.gz' 24 | 25 | [kl] 26 | start_epoch = 2 27 | end_epoch = 100 28 | start = 0 29 | end = 1 30 | 31 | [temperature] 32 | start_epoch = 0 33 | end_epoch = 100 34 | start = 1e-1 35 | end = 1e-3 36 | log = True 37 | 38 | [save] 39 | log_dir = 'logs/synthetic/uniform_uniform/' 40 | model_dir = 'models/synthetic/uniform_uniform/' -------------------------------------------------------------------------------- /configs/zinc/dd_vae_gaussian.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 1024 4 | latent_size = 64 5 | num_layers = 1 6 | proposal = 'gaussian' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = 50 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 2 18 | checkpoint = 'single' 19 | mode = 'argmax' 20 | seed = 1 21 | 22 | [data] 23 | title = 'zinc' 24 | train_path = 'data/zinc/train.csv.gz' 25 | test_path = 'data/zinc/valid.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 0 29 | end_epoch = 50 30 | start = 0.001 31 | end = 0.02 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 100 36 | start = 0.001 37 | end = 1e-4 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/zinc/DD_VAE_GAUSSIAN/' 42 | model_dir = 'models/zinc/DD_VAE_GAUSSIAN/' 43 | -------------------------------------------------------------------------------- /configs/zinc/dd_vae_tricube.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 1024 4 | latent_size = 64 5 | num_layers = 1 6 | proposal = 'tricube' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = 50 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 2 18 | checkpoint = 'single' 19 | mode = 'argmax' 20 | seed = 1 21 | 22 | [data] 23 | title = 'zinc' 24 | train_path = 'data/zinc/train.csv.gz' 25 | test_path = 'data/zinc/valid.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 0 29 | end_epoch = 50 30 | start = 0.001 31 | end = 0.02 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 100 36 | start = 0.001 37 | end = 1e-4 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/zinc/DD_VAE_TRICUBE/' 42 | model_dir = 'models/zinc/DD_VAE_TRICUBE/' 43 | -------------------------------------------------------------------------------- /configs/zinc/vae_gaussian.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 1024 4 | latent_size = 64 5 | num_layers = 1 6 | proposal = 'gaussian' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = 50 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 2 18 | checkpoint = 'single' 19 | mode = 'sample' 20 | seed = 1 21 | 22 | [data] 23 | title = 'zinc' 24 | train_path = 'data/zinc/train.csv.gz' 25 | test_path = 'data/zinc/valid.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 0 29 | end_epoch = 50 30 | start = 0.0001 31 | end = 0.0008 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 100 36 | start = 0.001 37 | end = 1e-4 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/zinc/VAE_GAUSSIAN/' 42 | model_dir = 'models/zinc/VAE_GAUSSIAN/' 43 | -------------------------------------------------------------------------------- /configs/zinc/vae_tricube.ini: -------------------------------------------------------------------------------- 1 | [model] 2 | embedding_size = 64 3 | hidden_size = 1024 4 | latent_size = 64 5 | num_layers = 1 6 | proposal = 'tricube' 7 | prior = 'gaussian' 8 | use_embedding_input = True 9 | 10 | [train] 11 | epochs = 200 12 | lr_reduce_epochs = 50 13 | lr_reduce_gamma = 0.5 14 | lr = 5e-4 15 | batch_size = 512 16 | verbose = 'epoch' 17 | clamp = 2 18 | checkpoint = 'single' 19 | mode = 'sample' 20 | seed = 1 21 | 22 | [data] 23 | title = 'zinc' 24 | train_path = 'data/zinc/train.csv.gz' 25 | test_path = 'data/zinc/valid.csv.gz' 26 | 27 | [kl] 28 | start_epoch = 0 29 | end_epoch = 50 30 | start = 0.0001 31 | end = 0.0008 32 | 33 | [temperature] 34 | start_epoch = 0 35 | end_epoch = 100 36 | start = 0.001 37 | end = 1e-4 38 | log = True 39 | 40 | [save] 41 | log_dir = 'logs/zinc/VAE_TRICUBE/' 42 | model_dir = 'models/zinc/VAE_TRICUBE/' 43 | -------------------------------------------------------------------------------- /data/moses/test.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/moses/test.csv.gz -------------------------------------------------------------------------------- /data/moses/test_scaffolds.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/moses/test_scaffolds.csv.gz -------------------------------------------------------------------------------- /data/moses/test_scaffolds_stats.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/moses/test_scaffolds_stats.npz -------------------------------------------------------------------------------- /data/moses/test_stats.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/moses/test_stats.npz -------------------------------------------------------------------------------- /data/moses/train.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/moses/train.csv.gz -------------------------------------------------------------------------------- /data/synthetic/2d_map_0.2.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/synthetic/2d_map_0.2.csv.gz -------------------------------------------------------------------------------- /data/zinc/250k_rndm_zinc_drugs_clean.smi.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/zinc/250k_rndm_zinc_drugs_clean.smi.gz -------------------------------------------------------------------------------- /data/zinc/test.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/zinc/test.csv.gz -------------------------------------------------------------------------------- /data/zinc/train.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/zinc/train.csv.gz -------------------------------------------------------------------------------- /data/zinc/valid.csv.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/data/zinc/valid.csv.gz -------------------------------------------------------------------------------- /dd_vae/__init__.py: -------------------------------------------------------------------------------- 1 | from .vae_rnn import VAE_RNN 2 | from .vae_mnist import VAE_MNIST 3 | 4 | __all__ = ['VAE_RNN', 'VAE_MNIST'] 5 | -------------------------------------------------------------------------------- /dd_vae/bo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/dd_vae/bo/__init__.py -------------------------------------------------------------------------------- /dd_vae/bo/gauss.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import theano 3 | import theano.tensor as T 4 | 5 | 6 | def casting(x): 7 | return np.array(x).astype(theano.config.floatX) 8 | 9 | 10 | def compute_kernel(lls, lsf, x, z): 11 | ls = T.exp(lls) 12 | sf = T.exp(lsf) 13 | 14 | if x.ndim == 1: 15 | x = x[None, :] 16 | 17 | if z.ndim == 1: 18 | z = z[None, :] 19 | 20 | lsre = T.outer(T.ones_like(x[:, 0]), ls) 21 | 22 | r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[:, 0: 1])) - \ 23 | np.float32(2) * T.dot(x / lsre, T.transpose(z)) + \ 24 | T.dot(np.float32(1.0) / lsre, T.transpose(z) ** 2) 25 | 26 | k = sf * T.exp(-np.float32(0.5) * r2) 27 | 28 | return k 29 | 30 | 31 | def compute_kernel_numpy(lls, lsf, x, z): 32 | ls = np.exp(lls) 33 | sf = np.exp(lsf) 34 | 35 | if x.ndim == 1: 36 | x = x[None, :] 37 | 38 | if z.ndim == 1: 39 | z = z[None, :] 40 | 41 | lsre = np.outer(np.ones(x.shape[0]), ls) 42 | 43 | r2 = np.outer(np.sum(x * x / lsre, 1), np.ones(z.shape[0])) - \ 44 | 2 * np.dot(x / lsre, z.T) + np.dot(1.0 / lsre, z.T ** 2) 45 | 46 | k = sf * np.exp(-0.5 * r2) 47 | 48 | return k 49 | 50 | 51 | ## 52 | # xmean and xvar can be vectors of input points 53 | # 54 | # This is the expected value of the kernel 55 | # 56 | 57 | def compute_psi1(lls, lsf, xmean, xvar, z): 58 | if xmean.ndim == 1: 59 | xmean = xmean[None, :] 60 | 61 | ls = T.exp(lls) 62 | sf = T.exp(lsf) 63 | lspxvar = ls + xvar 64 | constterm1 = ls / lspxvar 65 | constterm2 = T.prod(T.sqrt(constterm1), 1) 66 | r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), 67 | T.ones_like(z[:, 0: 1])) - \ 68 | np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \ 69 | T.dot(np.float32(1.0) / lspxvar, T.transpose(z) ** 2) 70 | psi1 = sf * T.outer(constterm2, T.ones_like(z[:, 0: 1])) * T.exp( 71 | -np.float32(0.5) * r2_psi1) 72 | 73 | return psi1 74 | 75 | 76 | def compute_psi1_numpy(lls, lsf, xmean, xvar, z): 77 | if xmean.ndim == 1: 78 | xmean = xmean[None, :] 79 | 80 | ls = np.exp(lls) 81 | sf = np.exp(lsf) 82 | lspxvar = ls + xvar 83 | constterm1 = ls / lspxvar 84 | constterm2 = np.prod(np.sqrt(constterm1), 1) 85 | r2_psi1 = np.outer(np.sum(xmean * xmean / lspxvar, 1), 86 | np.ones(z.shape[0])) - \ 87 | 2 * np.dot(xmean / lspxvar, z.T) + \ 88 | np.dot(1.0 / lspxvar, z.T ** 2) 89 | psi1 = sf * np.outer(constterm2, np.ones(z.shape[0])) * np.exp( 90 | -0.5 * r2_psi1) 91 | return psi1 92 | 93 | 94 | def compute_psi2(lls, lsf, z, input_means, input_vars): 95 | ls = T.exp(lls) 96 | sf = T.exp(lsf) 97 | b = ls / casting(2.0) 98 | term_1 = T.prod(T.sqrt(b / (b + input_vars)), 1) 99 | 100 | scale = T.sqrt(4 * (2 * b[None, :] + 0 * input_vars)) 101 | scaled_z = z[None, :, :] / scale[:, None, :] 102 | scaled_z_minus_m = scaled_z 103 | r2b = T.sum(scaled_z_minus_m ** 2, 2)[:, None, :] + \ 104 | T.sum(scaled_z_minus_m ** 2, 2)[:, :, None] - \ 105 | 2 * T.batched_dot(scaled_z_minus_m, 106 | np.transpose(scaled_z_minus_m, [0, 2, 1])) 107 | term_2 = T.exp(-r2b) 108 | 109 | scale = T.sqrt(4 * (2 * b[None, :] + 2 * input_vars)) 110 | scaled_z = z[None, :, :] / scale[:, None, :] 111 | scaled_m = input_means / scale 112 | scaled_m = T.tile(scaled_m[:, None, :], [1, z.shape[0], 1]) 113 | scaled_z_minus_m = scaled_z - scaled_m 114 | r2b = T.sum(scaled_z_minus_m ** 2, 2)[:, None, :] + \ 115 | T.sum(scaled_z_minus_m ** 2, 2)[:, :, None] + \ 116 | 2 * T.batched_dot(scaled_z_minus_m, 117 | np.transpose(scaled_z_minus_m, [0, 2, 1])) 118 | term_3 = T.exp(-r2b) 119 | 120 | psi2_computed = sf ** casting(2.0) * \ 121 | term_1[:, None, None] * term_2 * term_3 122 | 123 | return T.transpose(psi2_computed, [1, 2, 0]) 124 | 125 | 126 | def compute_psi2_numpy(lls, lsf, z, input_means, input_vars): 127 | ls = np.exp(lls) 128 | sf = np.exp(lsf) 129 | b = ls / casting(2.0) 130 | term_1 = np.prod(np.sqrt(b / (b + input_vars)), 1) 131 | 132 | scale = np.sqrt(4 * (2 * b[None, :] + 0 * input_vars)) 133 | scaled_z = z[None, :, :] / scale[:, None, :] 134 | scaled_z_minus_m = scaled_z 135 | r2b = np.sum(scaled_z_minus_m ** 2, 2)[:, None, :] + \ 136 | np.sum(scaled_z_minus_m ** 2, 2)[:, :, None] - \ 137 | 2 * np.einsum('ijk,ikl->ijl', scaled_z_minus_m, 138 | np.transpose(scaled_z_minus_m, [0, 2, 1])) 139 | term_2 = np.exp(-r2b) 140 | 141 | scale = np.sqrt(4 * (2 * b[None, :] + 2 * input_vars)) 142 | scaled_z = z[None, :, :] / scale[:, None, :] 143 | scaled_m = input_means / scale 144 | scaled_m = np.tile(scaled_m[:, None, :], [1, z.shape[0], 1]) 145 | scaled_z_minus_m = scaled_z - scaled_m 146 | r2b = np.sum(scaled_z_minus_m ** 2, 2)[:, None, :] + \ 147 | np.sum(scaled_z_minus_m ** 2, 2)[:, :, None] + \ 148 | 2 * np.einsum('ijk,ikl->ijl', scaled_z_minus_m, 149 | np.transpose(scaled_z_minus_m, [0, 2, 1])) 150 | term_3 = np.exp(-r2b) 151 | 152 | psi2_computed = sf ** casting(2.0) * \ 153 | term_1[:, None, None] * term_2 * term_3 154 | psi2_computed = np.transpose(psi2_computed, [1, 2, 0]) 155 | 156 | return psi2_computed 157 | -------------------------------------------------------------------------------- /dd_vae/bo/psd_theano.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy 4 | import scipy.linalg as spla 5 | import theano 6 | from theano.gof import Op, Apply 7 | from theano.tensor import as_tensor_variable 8 | from theano.tensor.nlinalg import matrix_dot 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | __all__ = ['MatrixInversePSD', 'LogDetPSD'] 13 | 14 | 15 | def chol2inv(chol): 16 | return spla.cho_solve((chol, False), numpy.eye(chol.shape[0])) 17 | 18 | 19 | class MatrixInversePSD(Op): 20 | r"""Computes the inverse of a matrix :math:`A`. 21 | Given a square matrix :math:`A`, ``matrix_inverse`` returns a square 22 | matrix :math:`A_{inv}` such that the dot product :math:`A \cdot A_{inv}` 23 | and :math:`A_{inv} \cdot A` equals the identity matrix :math:`I`. 24 | Notes 25 | ----- 26 | When possible, the call to this op will be optimized to the call 27 | of ``solve``. 28 | """ 29 | 30 | __props__ = () 31 | 32 | def __init__(self): 33 | pass 34 | 35 | def make_node(self, x): 36 | x = as_tensor_variable(x) 37 | assert x.ndim == 2 38 | return Apply(self, [x], [x.type()]) 39 | 40 | def perform(self, node, inputs, outputs): 41 | (x,) = inputs 42 | (z,) = outputs 43 | z[0] = chol2inv(spla.cholesky(x, lower=False)).astype(x.dtype) 44 | 45 | def grad(self, inputs, g_outputs): 46 | r"""The gradient function should return 47 | .. math:: V\frac{\partial X^{-1}}{\partial X}, 48 | where :math:`V` corresponds to ``g_outputs`` and :math:`X` to 49 | ``inputs``. Using the `matrix cookbook 50 | `_, 51 | one can deduce that the relation corresponds to 52 | .. math:: (X^{-1} \cdot V^{T} \cdot X^{-1})^T. 53 | """ 54 | x, = inputs 55 | xi = self(x) 56 | gz, = g_outputs 57 | # TT.dot(gz.T,xi) 58 | return [-matrix_dot(xi, gz.T, xi).T] 59 | 60 | def R_op(self, inputs, eval_points): 61 | r"""The gradient function should return 62 | .. math:: \frac{\partial X^{-1}}{\partial X}V, 63 | where :math:`V` corresponds to ``g_outputs`` and :math:`X` to 64 | ``inputs``. Using the `matrix cookbook 65 | `_, 66 | one can deduce that the relation corresponds to 67 | .. math:: X^{-1} \cdot V \cdot X^{-1}. 68 | """ 69 | x, = inputs 70 | xi = self(x) 71 | ev, = eval_points 72 | if ev is None: 73 | return [None] 74 | return [-matrix_dot(xi, ev, xi)] 75 | 76 | def infer_shape(self, node, shapes): 77 | return shapes 78 | 79 | 80 | matrix_inverse_psd = MatrixInversePSD() 81 | 82 | 83 | class LogDetPSD(Op): 84 | """ 85 | Matrix log determinant. Input should be a square matrix. 86 | """ 87 | 88 | __props__ = () 89 | 90 | def make_node(self, x): 91 | x = as_tensor_variable(x) 92 | assert x.ndim == 2 93 | o = theano.tensor.scalar(dtype=x.dtype) 94 | return Apply(self, [x], [o]) 95 | 96 | def perform(self, node, inputs, outputs): 97 | (x,) = inputs 98 | (z,) = outputs 99 | try: 100 | z[0] = numpy.asarray(2 * numpy.sum( 101 | numpy.log(numpy.diag(spla.cholesky(x, lower=False)))), 102 | dtype=x.dtype) 103 | except Exception: 104 | print('Failed to compute log determinant', x) 105 | raise 106 | 107 | def grad(self, inputs, g_outputs): 108 | gz, = g_outputs 109 | x, = inputs 110 | return [gz * matrix_inverse_psd(x).T] 111 | 112 | def infer_shape(self, node, shapes): 113 | return [()] 114 | 115 | def __str__(self): 116 | return "LogDetPSD" 117 | 118 | 119 | log_det_psd = LogDetPSD() 120 | -------------------------------------------------------------------------------- /dd_vae/bo/sparse_gp.py: -------------------------------------------------------------------------------- 1 | ## 2 | # This class represents a node within the network 3 | # 4 | 5 | import sys 6 | 7 | import numpy as np 8 | import scipy.optimize as spo 9 | import scipy.stats as sps 10 | import theano 11 | import theano.tensor as T 12 | from .sparse_gp_theano_internal import Sparse_GP 13 | 14 | 15 | def casting(x): 16 | return np.array(x).astype(theano.config.floatX) 17 | 18 | 19 | def global_optimization(grid, lower, upper, function_grid, function_scalar, 20 | function_scalar_gradient): 21 | grid_values = function_grid(grid) 22 | best = grid_values.argmin() 23 | 24 | # We solve the optimization problem 25 | 26 | X_initial = grid[best: (best + 1), :] 27 | 28 | def objective(X): 29 | X = casting(X) 30 | X = X.reshape((1, grid.shape[1])) 31 | value = function_scalar(X) 32 | gradient_value = function_scalar_gradient(X).flatten() 33 | return np.float(value), gradient_value.astype(np.float) 34 | 35 | lbfgs_bounds = list(zip(lower.tolist(), upper.tolist())) 36 | x_optimal, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, X_initial, 37 | bounds=lbfgs_bounds, 38 | iprint=0, maxiter=150) 39 | x_optimal = x_optimal.reshape((1, grid.shape[1])) 40 | 41 | return x_optimal, y_opt 42 | 43 | 44 | def adam_theano(loss, all_params, learning_rate=0.001): 45 | b1 = 0.9 46 | b2 = 0.999 47 | e = 1e-8 48 | updates = [] 49 | all_grads = theano.grad(loss, all_params) 50 | alpha = learning_rate 51 | t = theano.shared(casting(1.0)) 52 | for theta_previous, g in zip(all_params, all_grads): 53 | m_previous = theano.shared(np.zeros(theta_previous.get_value().shape, 54 | dtype=theano.config.floatX)) 55 | v_previous = theano.shared(np.zeros(theta_previous.get_value().shape, 56 | dtype=theano.config.floatX)) 57 | m = b1 * m_previous + (1 - b1) * g 58 | v = b2 * v_previous + (1 - b2) * g ** 2 59 | m_hat = m / (1 - b1 ** t) 60 | v_hat = v / (1 - b2 ** t) 61 | theta = theta_previous - (alpha * m_hat) / ( 62 | T.sqrt(v_hat) + e) # (Update parameters) 63 | updates.append((m_previous, m)) 64 | updates.append((v_previous, v)) 65 | updates.append((theta_previous, theta)) 66 | updates.append((t, t + 1.)) 67 | return updates 68 | 69 | 70 | class SparseGP: 71 | """ 72 | The training_targets are the Y's which in the case of regression are 73 | real numbers in the case of binary classification are 1 or -1 and in 74 | the case of multiclass classification are 0, 1, 2,.. n_class - 1 75 | """ 76 | def __init__(self, input_means, input_vars, training_targets, 77 | n_inducing_points): 78 | 79 | self.input_means = theano.shared( 80 | value=input_means.astype(theano.config.floatX), borrow=True, 81 | name='X') 82 | self.input_vars = theano.shared( 83 | value=input_vars.astype(theano.config.floatX), borrow=True, 84 | name='X') 85 | self.original_training_targets = theano.shared( 86 | value=training_targets.astype(theano.config.floatX), borrow=True, 87 | name='y') 88 | self.training_targets = self.original_training_targets 89 | 90 | self.n_points = input_means.shape[0] 91 | self.d_input = input_means.shape[1] 92 | 93 | self.sparse_gp = Sparse_GP(n_inducing_points, self.n_points, 94 | self.d_input, self.input_means, 95 | self.input_vars, self.training_targets) 96 | 97 | self.set_for_prediction = False 98 | self.predict_function = None 99 | 100 | def initialize(self): 101 | self.sparse_gp.initialize() 102 | 103 | def setForTraining(self): 104 | self.sparse_gp.setForTraining() 105 | 106 | def setForPrediction(self): 107 | self.sparse_gp.setForPrediction() 108 | 109 | def get_params(self): 110 | return self.sparse_gp.get_params() 111 | 112 | def set_params(self, params): 113 | self.sparse_gp.set_params(params) 114 | 115 | def getEnergy(self): 116 | self.sparse_gp.compute_output() 117 | return self.sparse_gp.getContributionToEnergy()[0, 0] 118 | 119 | def predict(self, means_test, vars_test): 120 | 121 | self.setForPrediction() 122 | 123 | means_test = means_test.astype(theano.config.floatX) 124 | vars_test = vars_test.astype(theano.config.floatX) 125 | 126 | if self.predict_function is None: 127 | self.sparse_gp.compute_output() 128 | predictions = self.sparse_gp.getPredictedValues() 129 | 130 | X = T.matrix('X', dtype=theano.config.floatX) 131 | Z = T.matrix('Z', dtype=theano.config.floatX) 132 | 133 | self.predict_function = theano.function([X, Z], predictions, 134 | givens={ 135 | self.input_means: X, 136 | self.input_vars: Z}) 137 | 138 | predicted_values = self.predict_function(means_test, vars_test) 139 | 140 | self.setForTraining() 141 | 142 | return predicted_values 143 | 144 | # This trains the network via LBFGS as implemented in scipy 145 | # (slow but good for small datasets) 146 | 147 | def train_via_LBFGS(self, input_means, input_vars, training_targets, 148 | max_iterations=500): 149 | 150 | # We initialize the network and get the initial parameters 151 | 152 | input_means = input_means.astype(theano.config.floatX) 153 | input_vars = input_vars.astype(theano.config.floatX) 154 | training_targets = training_targets.astype(theano.config.floatX) 155 | self.input_means.set_value(input_means) 156 | self.input_vars.set_value(input_vars) 157 | self.original_training_targets.set_value(training_targets) 158 | 159 | self.initialize() 160 | self.setForTraining() 161 | 162 | X = T.matrix('X', dtype=theano.config.floatX) 163 | Z = T.matrix('Z', dtype=theano.config.floatX) 164 | y = T.matrix('y', dtype=theano.config.floatX) 165 | e = self.getEnergy() 166 | energy = theano.function([X, Z, y], e, givens={ 167 | self.input_means: X, 168 | self.input_vars: Z, 169 | self.training_targets: y}) 170 | all_params = self.get_params() 171 | energy_grad = theano.function([X, Z, y], T.grad(e, all_params), 172 | givens={self.input_means: X, 173 | self.input_vars: Z, 174 | self.training_targets: y}) 175 | 176 | initial_params = theano.function([], all_params)() 177 | 178 | params_shapes = [s.shape for s in initial_params] 179 | 180 | def de_vectorize_params(params): 181 | ret = [] 182 | for shape in params_shapes: 183 | if len(shape) == 2: 184 | ret.append(params[: np.prod(shape)].reshape(shape)) 185 | params = params[np.prod(shape):] 186 | elif len(shape) == 1: 187 | ret.append(params[: np.prod(shape)]) 188 | params = params[np.prod(shape):] 189 | else: 190 | ret.append(params[0]) 191 | params = params[1:] 192 | return ret 193 | 194 | def vectorize_params(params): 195 | return np.concatenate([s.flatten() for s in params]) 196 | 197 | def objective(params): 198 | 199 | params = de_vectorize_params(params) 200 | self.set_params(params) 201 | energy_value = energy(input_means, input_vars, training_targets) 202 | gradient_value = energy_grad(input_means, input_vars, 203 | training_targets) 204 | 205 | return -energy_value, -vectorize_params(gradient_value) 206 | 207 | # We create a theano function that evaluates the energy 208 | 209 | initial_params = vectorize_params(initial_params) 210 | x_opt, y_opt, opt_info = spo.fmin_l_bfgs_b(objective, initial_params, 211 | bounds=None, iprint=1, 212 | maxiter=max_iterations) 213 | 214 | self.set_params(de_vectorize_params(x_opt)) 215 | 216 | return y_opt 217 | 218 | def train_via_ADAM(self, input_means, input_vars, training_targets, 219 | input_means_test, input_vars_test, test_targets, 220 | max_iterations=500, minibatch_size=4000, 221 | learning_rate=1e-3, ignoroe_variances=True): 222 | 223 | input_means = input_means.astype(theano.config.floatX) 224 | input_vars = input_vars.astype(theano.config.floatX) 225 | training_targets = training_targets.astype(theano.config.floatX) 226 | n_data_points = input_means.shape[0] 227 | selected_points = np.random.choice(n_data_points, n_data_points, 228 | replace=False)[ 229 | 0: min(n_data_points, minibatch_size)] 230 | self.input_means.set_value(input_means[selected_points, :]) 231 | self.input_vars.set_value(input_vars[selected_points, :]) 232 | self.original_training_targets.set_value( 233 | training_targets[selected_points, :]) 234 | 235 | print('Initializing network') 236 | sys.stdout.flush() 237 | self.setForTraining() 238 | self.initialize() 239 | 240 | X = T.matrix('X', dtype=theano.config.floatX) 241 | Z = T.matrix('Z', dtype=theano.config.floatX) 242 | y = T.matrix('y', dtype=theano.config.floatX) 243 | 244 | e = self.getEnergy() 245 | 246 | all_params = self.get_params() 247 | 248 | print('Compiling adam updates') 249 | sys.stdout.flush() 250 | 251 | process_minibatch_adam = theano.function( 252 | [X, Z, y], -e, 253 | updates=adam_theano(-e, 254 | all_params, 255 | learning_rate), 256 | givens={self.input_means: X, 257 | self.input_vars: Z, 258 | self.original_training_targets: y} 259 | ) 260 | 261 | # Main loop of the optimization 262 | 263 | print('Training') 264 | sys.stdout.flush() 265 | n_batches = int(np.ceil(1.0 * n_data_points / minibatch_size)) 266 | for j in range(max_iterations): 267 | suffle = np.random.choice(n_data_points, n_data_points, 268 | replace=False) 269 | input_means = input_means[suffle, :] 270 | input_vars = input_vars[suffle, :] 271 | training_targets = training_targets[suffle, :] 272 | 273 | for i in range(n_batches): 274 | minibatch_data_means = input_means[i * minibatch_size: min( 275 | (i + 1) * minibatch_size, n_data_points), :] 276 | minibatch_data_vars = input_vars[i * minibatch_size: min( 277 | (i + 1) * minibatch_size, n_data_points), :] 278 | minibatch_targets = training_targets[i * minibatch_size: min( 279 | (i + 1) * minibatch_size, n_data_points), :] 280 | 281 | process_minibatch_adam(minibatch_data_means, 282 | minibatch_data_vars, 283 | minibatch_targets) 284 | 285 | pred, uncert = self.predict(input_means_test, input_vars_test) 286 | test_error = np.sqrt(np.mean((pred - test_targets) ** 2)) 287 | test_ll = np.mean( 288 | sps.norm.logpdf(pred - test_targets, scale=np.sqrt(uncert))) 289 | 290 | print(f'Epoch: {j}') 291 | print('Test error: {} Test ll: {}'.format(test_error, test_ll)) 292 | sys.stdout.flush() 293 | 294 | pred = np.zeros((0, 1)) 295 | uncert = np.zeros((0, uncert.shape[1])) 296 | for i in range(n_batches): 297 | minibatch_data_means = input_means[i * minibatch_size: min( 298 | (i + 1) * minibatch_size, n_data_points), :] 299 | minibatch_data_vars = input_vars[i * minibatch_size: min( 300 | (i + 1) * minibatch_size, n_data_points), :] 301 | pred_new, uncert_new = self.predict(minibatch_data_means, 302 | minibatch_data_vars) 303 | pred = np.concatenate((pred, pred_new), 0) 304 | uncert = np.concatenate((uncert, uncert_new), 0) 305 | 306 | training_error = np.sqrt(np.mean((pred - training_targets) ** 2)) 307 | training_ll = np.mean(sps.norm.logpdf(pred - training_targets, 308 | scale=np.sqrt(uncert))) 309 | 310 | print('Train error: {} Train ll: {}'.format(training_error, 311 | training_ll)) 312 | sys.stdout.flush() 313 | 314 | def get_incumbent(self, grid, lower, upper): 315 | 316 | self.sparse_gp.compute_output() 317 | m, v = self.sparse_gp.getPredictedValues() 318 | 319 | X = T.matrix('X', dtype=theano.config.floatX) 320 | function_grid = theano.function([X], m, 321 | givens={self.input_means: X, 322 | self.input_vars: 0 * X}) 323 | function_scalar = theano.function([X], m[0, 0], 324 | givens={self.input_means: X, 325 | self.input_vars: 0 * X}) 326 | function_scalar_gradient = theano.function( 327 | [X], T.grad(m[0, 0], self.input_means), 328 | givens={self.input_means: X, 329 | self.input_vars: 0 * X}) 330 | 331 | return global_optimization(grid, lower, upper, function_grid, 332 | function_scalar, 333 | function_scalar_gradient)[1] 334 | 335 | def optimize_ei(self, grid, lower, upper, incumbent): 336 | 337 | X = T.matrix('X', dtype=theano.config.floatX) 338 | log_ei = self.sparse_gp.compute_log_ei(X, incumbent) 339 | 340 | function_grid = theano.function([X], -log_ei) 341 | function_scalar = theano.function([X], -log_ei[0, 0]) 342 | function_scalar_gradient = theano.function([X], 343 | -T.grad(log_ei[0, 0], X)) 344 | 345 | return \ 346 | global_optimization(grid, lower, upper, function_grid, 347 | function_scalar, 348 | function_scalar_gradient)[0] 349 | 350 | def batched_greedy_ei(self, q, lower, upper, n_samples=1): 351 | 352 | self.setForPrediction() 353 | 354 | grid_size = 10000 355 | grid = casting( 356 | lower + np.random.rand(grid_size, self.d_input) * (upper - lower)) 357 | 358 | incumbent = self.get_incumbent(grid, lower, upper) 359 | X_numpy = self.optimize_ei(grid, lower, upper, incumbent) 360 | randomness_numpy = casting( 361 | 0 * np.random.randn(X_numpy.shape[0], n_samples).astype( 362 | theano.config.floatX)) 363 | 364 | randomness = theano.shared( 365 | value=randomness_numpy.astype(theano.config.floatX), 366 | name='randomness', borrow=True) 367 | X = theano.shared(value=X_numpy.astype(theano.config.floatX), name='X', 368 | borrow=True) 369 | x = T.matrix('x', dtype=theano.config.floatX) 370 | log_ei = self.sparse_gp.compute_log_averaged_ei(x, X, randomness, 371 | incumbent) 372 | 373 | function_grid = theano.function([x], -log_ei) 374 | function_scalar = theano.function([x], -log_ei[0]) 375 | function_scalar_gradient = theano.function([x], -T.grad(log_ei[0], x)) 376 | 377 | # We optimize the ei in a greedy manner 378 | 379 | for i in range(1, q): 380 | new_point = global_optimization(grid, lower, upper, function_grid, 381 | function_scalar, 382 | function_scalar_gradient)[0] 383 | X_numpy = casting(np.concatenate([X_numpy, new_point], 0)) 384 | randomness_numpy = casting( 385 | 0 * np.random.randn(X_numpy.shape[0], n_samples).astype( 386 | theano.config.floatX)) 387 | X.set_value(X_numpy) 388 | randomness.set_value(randomness_numpy) 389 | 390 | m, v = self.predict(X_numpy, 0 * X_numpy) 391 | 392 | return X_numpy 393 | -------------------------------------------------------------------------------- /dd_vae/bo/sparse_gp_theano_internal.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import numpy as np 4 | import theano 5 | import theano.tensor as T 6 | from theano.tensor.slinalg import Cholesky as MatrixChol 7 | 8 | from .gauss import casting, compute_kernel, compute_psi1, compute_psi2 9 | from .psd_theano import MatrixInversePSD, LogDetPSD 10 | 11 | 12 | def n_pdf(x): 13 | return 1.0 / T.sqrt(2 * math.pi) * T.exp(-0.5 * x ** 2) 14 | 15 | 16 | def log_n_pdf(x): 17 | return -0.5 * T.log(2 * math.pi) - 0.5 * x ** 2 18 | 19 | 20 | def n_cdf(x): 21 | return 0.5 * (1.0 + T.erf(x / T.sqrt(2.0))) 22 | 23 | 24 | def log_n_cdf_approx(x): 25 | return log_n_pdf(x) - T.log(-x - 1 / x + 2 / x ** 3) 26 | 27 | 28 | def log_n_cdf(x): 29 | x = T.switch(T.lt(x, casting(-10)), log_n_cdf_approx(x), T.log(n_cdf(x))) 30 | return x 31 | 32 | 33 | def ratio(x): 34 | x = T.switch( 35 | T.lt(x, casting(-10)), 36 | -(casting(1.0) / x - casting(1.0) / x ** 3 + 37 | casting(3.0) / x ** 5 - casting(15.0) / x ** 7), 38 | n_cdf(x) / n_pdf(x)) 39 | return x 40 | 41 | 42 | def LogSumExp(x, axis=None): 43 | x_max = T.max(x, axis=axis, keepdims=True) 44 | return T.log(T.sum(T.exp(x - x_max), axis=axis, keepdims=True)) + x_max 45 | 46 | 47 | ## 48 | # This class represents a GP node in the network 49 | # 50 | 51 | class Sparse_GP: 52 | """ 53 | n_points are the total number of training points 54 | (that is used for cavity computation) 55 | """ 56 | 57 | def __init__(self, n_inducing_points, n_points, input_d, input_means, 58 | input_vars, training_targets): 59 | 60 | self.ignore_variances = True 61 | self.n_inducing_points = n_inducing_points 62 | self.n_points = n_points 63 | self.input_d = input_d 64 | self.training_targets = training_targets 65 | self.input_means = input_means 66 | self.input_vars = input_vars 67 | 68 | # These are the actual parameters of the posterior distribution 69 | # being optimzied 70 | # covCavity = (Kzz^-1 + LParamPost LParamPost^T * (n - 1) / n) 71 | # and meanCavity = covCavity mParamPost * (n - 1) / n 72 | 73 | initial_value = np.zeros((n_inducing_points, n_inducing_points)) 74 | self.LParamPost = theano.shared( 75 | value=initial_value.astype(theano.config.floatX), 76 | name='LParamPost', borrow=True) 77 | self.mParamPost = theano.shared( 78 | value=initial_value[:, 0: 1].astype(theano.config.floatX), 79 | name='mParamPost', borrow=True) 80 | self.lls = theano.shared( 81 | value=np.zeros(input_d).astype(theano.config.floatX), name='lls', 82 | borrow=True) 83 | self.lsf = theano.shared( 84 | value=np.zeros(1).astype(theano.config.floatX)[0], name='lsf', 85 | borrow=True) 86 | self.z = theano.shared( 87 | value=np.zeros((n_inducing_points, input_d)).astype( 88 | theano.config.floatX), name='z', borrow=True) 89 | self.lvar_noise = theano.shared( 90 | value=casting(0) * np.ones(1).astype(theano.config.floatX)[0], 91 | name='lvar_noise', borrow=True) 92 | 93 | self.set_for_training = casting(1.0) 94 | 95 | # We set the level of jitter to use (added to the diagonal of Kzz) 96 | 97 | self.jitter = casting(1e-3) 98 | 99 | def compute_output(self): 100 | 101 | # We compute the output mean 102 | 103 | self.Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye( 104 | self.z.shape[0]) * self.jitter * T.exp(self.lsf) 105 | self.KzzInv = MatrixInversePSD()(self.Kzz) 106 | LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost)) 107 | self.covCavityInv = self.KzzInv + LLt * casting( 108 | self.n_points - self.set_for_training) / casting(self.n_points) 109 | self.covCavity = MatrixInversePSD()(self.covCavityInv) 110 | self.meanCavity = T.dot(self.covCavity, casting( 111 | self.n_points - self.set_for_training) / casting( 112 | self.n_points) * self.mParamPost) 113 | self.KzzInvcovCavity = T.dot(self.KzzInv, self.covCavity) 114 | self.KzzInvmeanCavity = T.dot(self.KzzInv, self.meanCavity) 115 | self.covPosteriorInv = self.KzzInv + LLt 116 | self.covPosterior = MatrixInversePSD()(self.covPosteriorInv) 117 | self.meanPosterior = T.dot(self.covPosterior, self.mParamPost) 118 | self.Kxz = compute_kernel(self.lls, self.lsf, self.input_means, self.z) 119 | self.B = T.dot(self.KzzInvcovCavity, self.KzzInv) - self.KzzInv 120 | v_out = T.exp(self.lsf) + T.dot(self.Kxz * T.dot(self.Kxz, self.B), 121 | T.ones_like(self.z[:, 0: 1])) 122 | 123 | if self.ignore_variances: 124 | 125 | self.output_means = T.dot(self.Kxz, self.KzzInvmeanCavity) 126 | self.output_vars = abs(v_out) + casting(0) * T.sum(self.input_vars) 127 | 128 | else: 129 | 130 | self.EKxz = compute_psi1(self.lls, self.lsf, self.input_means, 131 | self.input_vars, self.z) 132 | self.output_means = T.dot(self.EKxz, self.KzzInvmeanCavity) 133 | 134 | # In other layers we have to compute the expected variance 135 | 136 | self.B2 = T.outer(T.dot(self.KzzInv, self.meanCavity), 137 | T.dot(self.KzzInv, self.meanCavity)) 138 | 139 | exact_output_vars = True 140 | 141 | if exact_output_vars: 142 | 143 | # We compute the exact output variance 144 | 145 | self.psi2 = compute_psi2(self.lls, self.lsf, self.z, 146 | self.input_means, self.input_vars) 147 | ll = T.transpose(self.EKxz[:, None, :] * self.EKxz[:, :, None], 148 | [1, 2, 0]) 149 | kk = T.transpose(self.Kxz[:, None, :] * self.Kxz[:, :, None], 150 | [1, 2, 0]) 151 | v1 = T.transpose(T.sum( 152 | T.sum(T.shape_padaxis(self.B2, 2) * (self.psi2 - ll), 0), 153 | 0, keepdims=True)) 154 | v2 = T.transpose(T.sum( 155 | T.sum(T.shape_padaxis(self.B, 2) * (self.psi2 - kk), 0), 0, 156 | keepdims=True)) 157 | 158 | else: 159 | 160 | # We compute the approximate output variance using 161 | # the unscented kalman filter 162 | 163 | v1 = 0 164 | v2 = 0 165 | 166 | n = self.input_d 167 | for j in range(1, n + 1): 168 | mask = T.zeros_like(self.input_vars) 169 | mask = T.set_subtensor(mask[:, j - 1], 1) 170 | inc = mask * T.sqrt(casting(n) * self.input_vars) 171 | self.kplus = T.sqrt( 172 | casting(1.0) / casting(2 * n)) * compute_kernel( 173 | self.lls, self.lsf, self.input_means + inc, self.z) 174 | self.kminus = T.sqrt( 175 | casting(1.0) / casting(2 * n)) * compute_kernel( 176 | self.lls, self.lsf, self.input_means - inc, self.z) 177 | 178 | v1 += T.dot(self.kplus * T.dot(self.kplus, self.B2), 179 | T.ones_like(self.z[:, 0: 1])) 180 | v1 += T.dot(self.kminus * T.dot(self.kminus, self.B2), 181 | T.ones_like(self.z[:, 0: 1])) 182 | v2 += T.dot(self.kplus * T.dot(self.kplus, self.B), 183 | T.ones_like(self.z[:, 0: 1])) 184 | v2 += T.dot(self.kminus * T.dot(self.kminus, self.B), 185 | T.ones_like(self.z[:, 0: 1])) 186 | 187 | v1 -= T.dot(self.EKxz * T.dot(self.EKxz, self.B2), 188 | T.ones_like(self.z[:, 0: 1])) 189 | v2 -= T.dot(self.Kxz * T.dot(self.Kxz, self.B), 190 | T.ones_like(self.z[:, 0: 1])) 191 | 192 | self.output_vars = abs(v_out) + abs(v2) + abs(v1) 193 | 194 | self.output_vars = self.output_vars + T.exp(self.lvar_noise) 195 | 196 | return 197 | 198 | def get_params(self): 199 | 200 | return [self.lls, self.lsf, self.z, self.mParamPost, self.LParamPost, 201 | self.lvar_noise] 202 | 203 | def set_params(self, params): 204 | 205 | self.lls.set_value(params[0]) 206 | self.lsf.set_value(params[1]) 207 | self.z.set_value(params[2]) 208 | self.mParamPost.set_value(params[3]) 209 | self.LParamPost.set_value(params[4]) 210 | self.lvar_noise.set_value(params[5]) 211 | 212 | ## 213 | # The next functions compute the log normalizer of each distribution 214 | # (needed for energy computation) 215 | # 216 | 217 | def getLogNormalizerCavity(self): 218 | 219 | assert self.covCavity is not None and \ 220 | self.meanCavity is not None and \ 221 | self.covCavityInv is not None 222 | 223 | return (casting(0.5 * self.n_inducing_points * np.log(2 * np.pi)) + 224 | casting(0.5) * LogDetPSD()(self.covCavity) + 225 | casting(0.5) * T.dot(T.dot( 226 | T.transpose(self.meanCavity), 227 | self.covCavityInv), 228 | self.meanCavity)) 229 | 230 | def getLogNormalizerPrior(self): 231 | 232 | assert self.KzzInv is not None 233 | 234 | return casting( 235 | 0.5 * self.n_inducing_points * np.log(2 * np.pi)) - casting( 236 | 0.5) * LogDetPSD()(self.KzzInv) 237 | 238 | def getLogNormalizerPosterior(self): 239 | 240 | assert self.covPosterior is not None and \ 241 | self.meanPosterior is not None and \ 242 | self.covPosteriorInv is not None 243 | 244 | return (casting(0.5 * self.n_inducing_points * np.log(2 * np.pi)) + 245 | casting(0.5) * LogDetPSD()(self.covPosterior) + 246 | casting(0.5) * T.dot(T.dot(T.transpose(self.meanPosterior), 247 | self.covPosteriorInv), 248 | self.meanPosterior)) 249 | 250 | ## 251 | # We return the contribution to the energy of the node (See last Eq. of 252 | # Sec. 4 in http://arxiv.org/pdf/1602.04133.pdf v1) 253 | # 254 | 255 | def getContributionToEnergy(self): 256 | 257 | assert self.n_points is not None and \ 258 | self.covCavity is not None and \ 259 | self.covPosterior is not None and \ 260 | self.input_means is not None 261 | 262 | logZpost = self.getLogNormalizerPosterior() 263 | logZprior = self.getLogNormalizerPrior() 264 | logZcav = self.getLogNormalizerCavity() 265 | 266 | # We multiply by the minibatch size and normalize terms according to 267 | # the total number of points (n_points) 268 | 269 | return (((logZcav - logZpost) + 270 | logZpost / casting(self.n_points) - 271 | logZprior / casting(self.n_points)) * 272 | T.cast(self.input_means.shape[0], 'float32') + 273 | T.sum(self.getLogZ())) 274 | 275 | # These methods sets the inducing points to be a random subset of the 276 | # inputs (we should receive more 277 | # inputs than inducing points), the length scales are set to the mean 278 | # of the euclidean distance 279 | 280 | def initialize(self): 281 | 282 | input_means = np.array(theano.function([], self.input_means)()) 283 | 284 | assert input_means.shape[0] >= self.n_inducing_points 285 | 286 | selected_points = np.random.choice(input_means.shape[0], 287 | self.n_inducing_points, 288 | replace=False) 289 | z = input_means[selected_points, :] 290 | 291 | # If we are not in the first layer, we initialize the length 292 | # scales to one 293 | 294 | lls = np.zeros(input_means.shape[1]) 295 | 296 | M = np.outer(np.sum(input_means ** 2, 1), 297 | np.ones(input_means.shape[0])) 298 | dist = M - 2 * np.dot(input_means, input_means.T) + M.T 299 | lls = np.log(0.5 * (np.median( 300 | dist[np.triu_indices(input_means.shape[0], 1)]) + 1e-3)) * np.ones( 301 | input_means.shape[1]) 302 | 303 | self.lls.set_value(lls.astype(theano.config.floatX)) 304 | self.z.set_value(z.astype(theano.config.floatX)) 305 | self.lsf.set_value(np.zeros(1).astype(theano.config.floatX)[0]) 306 | 307 | # We initialize the cavity and the posterior approximation to the prior 308 | # but with a small random mean so that the outputs are not equal to 309 | # zero (otherwise the output of the gp will be zero and 310 | # the next layer will be initialized improperly). 311 | 312 | # If we are not in the first layer, we reduce the variance of 313 | # the L and m 314 | 315 | L = np.random.normal( 316 | size=(self.n_inducing_points, self.n_inducing_points)) * 1.0 317 | m = self.training_targets.get_value()[selected_points, :] 318 | 319 | self.LParamPost.set_value(L.astype(theano.config.floatX)) 320 | self.mParamPost.set_value(m.astype(theano.config.floatX)) 321 | 322 | # This sets the node for prediction. It basically switches the cavity 323 | # distribution to be the posterior approximation 324 | # Once set in this state the network cannot be trained any more. 325 | 326 | def setForPrediction(self): 327 | 328 | if self.set_for_training == casting(1.0): 329 | self.set_for_training = casting(0.0) 330 | 331 | # This function undoes the work done by the previous method 332 | 333 | def setForTraining(self): 334 | 335 | # We only do something if the node was set for prediction 336 | # instead of training 337 | 338 | if self.set_for_training == casting(0.0): 339 | self.set_for_training == casting(1.0) 340 | 341 | def getLogZ(self): 342 | 343 | return -0.5 * T.log(2 * np.pi * self.output_vars) - \ 344 | 0.5 * (self.training_targets - 345 | self.output_means) ** 2 / self.output_vars 346 | 347 | def getPredictedValues(self): 348 | 349 | return self.output_means, self.output_vars 350 | 351 | def get_training_targets(self): 352 | return self.training_targets 353 | 354 | def set_training_targets(self, training_targets): 355 | self.training_targets = training_targets 356 | 357 | def compute_log_ei(self, x, incumbent): 358 | 359 | Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye( 360 | self.z.shape[0]) * self.jitter * T.exp(self.lsf) 361 | KzzInv = MatrixInversePSD()(Kzz) 362 | LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost)) 363 | covCavityInv = KzzInv + LLt * casting( 364 | self.n_points - self.set_for_training) / casting(self.n_points) 365 | covCavity = MatrixInversePSD()(covCavityInv) 366 | meanCavity = T.dot(covCavity, casting( 367 | self.n_points - self.set_for_training) / casting( 368 | self.n_points) * self.mParamPost) 369 | KzzInvcovCavity = T.dot(KzzInv, covCavity) 370 | KzzInvmeanCavity = T.dot(KzzInv, meanCavity) 371 | Kxz = compute_kernel(self.lls, self.lsf, x, self.z) 372 | B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv 373 | v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like( 374 | self.z[:, 0: 1])) # + T.exp(self.lvar_noise) 375 | m_out = T.dot(Kxz, KzzInvmeanCavity) 376 | s = (incumbent - m_out) / T.sqrt(v_out) 377 | 378 | log_ei = T.log( 379 | (incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s) 380 | 381 | return log_ei 382 | 383 | def compute_log_averaged_ei(self, x, X, randomness, incumbent): 384 | 385 | # We compute the old predictive mean at x 386 | 387 | Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye( 388 | self.z.shape[0]) * self.jitter * T.exp(self.lsf) 389 | KzzInv = MatrixInversePSD()(Kzz) 390 | LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost)) 391 | covCavityInv = KzzInv + LLt * casting( 392 | self.n_points - self.set_for_training) / casting(self.n_points) 393 | covCavity = MatrixInversePSD()(covCavityInv) 394 | meanCavity = T.dot(covCavity, casting( 395 | self.n_points - self.set_for_training) / casting( 396 | self.n_points) * self.mParamPost) 397 | KzzInvmeanCavity = T.dot(KzzInv, meanCavity) 398 | Kxz = compute_kernel(self.lls, self.lsf, x, self.z) 399 | m_old_x = T.dot(Kxz, KzzInvmeanCavity) 400 | 401 | # We compute the old predictive mean at X 402 | 403 | KXz = compute_kernel(self.lls, self.lsf, X, self.z) 404 | 405 | # We compute the required cross covariance matrices 406 | 407 | KXX = compute_kernel(self.lls, self.lsf, X, X) - T.dot( 408 | T.dot(KXz, KzzInv), KXz.T) + T.eye( 409 | X.shape[0]) * self.jitter * T.exp(self.lsf) 410 | KXXInv = MatrixInversePSD()(KXX) 411 | 412 | KxX = compute_kernel(self.lls, self.lsf, x, X) 413 | xX = T.concatenate([x, X], 0) 414 | KxXz = compute_kernel(self.lls, self.lsf, xX, self.z) 415 | KxX = KxX - T.dot(T.dot(KxXz[0: x.shape[0], :], KzzInv), 416 | KxXz[x.shape[0]: xX.shape[0], :].T) 417 | 418 | # We compute the new posterior mean 419 | 420 | samples_internal = T.dot(MatrixChol()(KXX), randomness) 421 | 422 | new_predictive_mean = T.tile(m_old_x, [1, randomness.shape[1]]) + \ 423 | T.dot(KxX, T.dot(KXXInv, samples_internal)) 424 | 425 | # We compute the new posterior variance 426 | 427 | z_expanded = T.concatenate([self.z, X], 0) 428 | Kxz_expanded = compute_kernel(self.lls, self.lsf, x, z_expanded) 429 | Kzz_expanded = compute_kernel(self.lls, self.lsf, z_expanded, 430 | z_expanded) + T.eye( 431 | z_expanded.shape[0]) * self.jitter * T.exp(self.lsf) 432 | Kzz_expandedInv = MatrixInversePSD()(Kzz_expanded) 433 | v_out = T.exp(self.lsf) - T.dot( 434 | Kxz_expanded * T.dot(Kxz_expanded, Kzz_expandedInv), 435 | T.ones_like(z_expanded[:, 0: 1])) 436 | new_predictive_var = T.tile(v_out, [1, randomness.shape[1]]) 437 | 438 | s = (incumbent - new_predictive_mean) / T.sqrt(new_predictive_var) 439 | 440 | log_ei = T.log((incumbent - new_predictive_mean) * ratio(s) + T.sqrt( 441 | new_predictive_var)) + log_n_pdf(s) 442 | 443 | return T.mean(LogSumExp(log_ei, 1), 1) 444 | -------------------------------------------------------------------------------- /dd_vae/bo/utils.py: -------------------------------------------------------------------------------- 1 | import networkx as nx 2 | from rdkit.Chem import rdmolops 3 | 4 | 5 | def max_ring_penalty(mol): 6 | cycle_list = nx.cycle_basis( 7 | nx.Graph(rdmolops.GetAdjacencyMatrix(mol))) 8 | if len(cycle_list) == 0: 9 | cycle_length = 0 10 | else: 11 | cycle_length = max([len(j) for j in cycle_list]) 12 | return -max(0, cycle_length - 6) 13 | -------------------------------------------------------------------------------- /dd_vae/proposals.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | 4 | 5 | def get_proposals(): 6 | ''' 7 | Returns a dict with implemented finite support proposals 8 | ''' 9 | return { 10 | 'uniform': UniformProposal, 11 | 'triangular': TriangularProposal, 12 | 'epanechnikov': EpanechnikovProposal, 13 | 'quartic': QuarticProposal, 14 | 'triweight': TriweightProposal, 15 | 'tricube': TricubeProposal, 16 | 'cosine': CosineProposal, 17 | 'gaussian': GaussianProposal 18 | } 19 | 20 | 21 | class Proposal: 22 | def __init__(self, eps=1e-9): 23 | self.buffer = np.zeros(0) 24 | self.eps = eps 25 | 26 | def density(self, z): 27 | raise NotImplementedError 28 | 29 | def kl(self, m=0, s=1): 30 | raise NotImplementedError 31 | 32 | def kl_uniform(self, m=0, s=1): 33 | raise NotImplementedError 34 | 35 | def sample(self, m, s): 36 | batch_size = np.prod(m.shape) 37 | uniform_height = self.density(0) 38 | acceptance_rate = 0.5 / uniform_height 39 | up_batch_size = int(batch_size / acceptance_rate) + 1 40 | while self.buffer.shape[0] < batch_size: 41 | sample, rejection_sample = np.split( 42 | np.random.rand(2*up_batch_size), 2 43 | ) 44 | sample = sample * 2 - 1 45 | rejection_sample = rejection_sample * uniform_height 46 | density = self.density(sample) 47 | sample = sample[rejection_sample < density] 48 | self.buffer = np.concatenate((self.buffer, sample), 0) 49 | 50 | sample = self.buffer[:batch_size] 51 | self.buffer = self.buffer[batch_size:] 52 | sample = sample.reshape(*m.shape) 53 | sample = torch.tensor(sample, dtype=m.dtype, device=m.device) 54 | return sample * s + m 55 | 56 | 57 | class UniformProposal(Proposal): 58 | def density(self, z): 59 | return z * 0 + 0.5 60 | 61 | def sample(self, m, s): 62 | sample = np.random.rand(*m.shape) * 2 - 1 63 | sample = torch.tensor(sample, dtype=m.dtype, device=m.device) 64 | return sample * s + m 65 | 66 | def kl(self, m, s): 67 | return (0.5 * m**2 + s**2/6 - torch.log(s+self.eps) + 68 | 0.5*np.log(2*np.pi) - np.log(2)).sum(1) 69 | 70 | def kl_uniform(self, m, s): 71 | return (-torch.log(s+self.eps)).sum(1) 72 | 73 | 74 | class TriangularProposal(Proposal): 75 | def density(self, z): 76 | return 1 - np.abs(z) 77 | 78 | def kl(self, m, s): 79 | return (0.5 * m**2 + s**2/12 - torch.log(s+self.eps) + 80 | 0.5*np.log(2*np.pi) - 0.5).sum(1) 81 | 82 | def kl_uniform(self, m, s): 83 | return (-0.5 + np.log(2) - torch.log(s+self.eps)).sum(1) 84 | 85 | 86 | class EpanechnikovProposal(Proposal): 87 | def density(self, z): 88 | return 0.75 * (1 - z**2) 89 | 90 | def kl(self, m, s): 91 | return (0.5 * m**2 + s**2/10 - torch.log(s+self.eps) + 92 | 0.5*np.log(2*np.pi) - 5/3 + np.log(3)).sum(1) 93 | 94 | def kl_uniform(self, m, s): 95 | return (-5/3 + np.log(6)-torch.log(s+self.eps)).sum(1) 96 | 97 | 98 | class QuarticProposal(Proposal): 99 | def density(self, z): 100 | return 15/16 * (1 - z**2)**2 101 | 102 | def kl(self, m, s): 103 | return (0.5 * m**2 + s**2/14 - torch.log(s+self.eps) + 104 | 0.5*np.log(2*np.pi) - 47/15 + np.log(15)).sum(1) 105 | 106 | def kl_uniform(self, m, s): 107 | return (-47/15 + np.log(30) - torch.log(s+self.eps)).sum(1) 108 | 109 | 110 | class TriweightProposal(Proposal): 111 | def density(self, z): 112 | return 35/32 * (1 - z**2)**3 113 | 114 | def kl(self, m, s): 115 | return (0.5 * m**2 + s**2/18 - torch.log(s+self.eps) + 116 | 0.5*np.log(2*np.pi) - 319/70 + np.log(70)).sum(1) 117 | 118 | def kl_uniform(self, m, s): 119 | return (-319/70 + np.log(140) - torch.log(s+self.eps)).sum(1) 120 | 121 | 122 | class TricubeProposal(Proposal): 123 | def density(self, z): 124 | return 70/81 * (1 - np.abs(z)**3)**3 125 | 126 | def kl(self, m, s): 127 | return (0.5 * m**2 + 35*s**2/486 - torch.log(s+self.eps) + 128 | 0.5*np.log(2*np.pi) + np.pi * np.sqrt(3) / 2 - 129 | 1111/140 + np.log(70*np.sqrt(3))).sum(1) 130 | 131 | def kl_uniform(self, m, s): 132 | return (np.pi * np.sqrt(3) / 2 - 1111/140 + 133 | np.log(140*np.sqrt(3)) - torch.log(s+self.eps)).sum(1) 134 | 135 | 136 | class CosineProposal(Proposal): 137 | def density(self, z): 138 | return np.pi/4 * np.cos(np.pi * z / 2) 139 | 140 | def kl(self, m, s): 141 | return (0.5 * m**2 + (0.5 - 4 / np.pi**2)*s**2 - 142 | torch.log(s+self.eps) + 0.5*np.log(2*np.pi) - 143 | 1 + np.log(np.pi/2)).sum(1) 144 | 145 | def kl_uniform(self, m, s): 146 | return (-1 + np.log(np.pi) - torch.log(s+self.eps)).sum(1) 147 | 148 | 149 | class GaussianProposal(Proposal): 150 | def density(self, z): 151 | return np.exp(-(z**2)/2) / np.sqrt(2 * np.pi) 152 | 153 | def sample(self, m, s): 154 | sample = torch.randn(*m.shape, dtype=m.dtype, device=m.device) 155 | return sample * s + m 156 | 157 | def kl(self, m, s): 158 | return 0.5 * (m**2 + s**2 - 2 * torch.log(s+self.eps) - 1).sum(1) 159 | 160 | def kl_uniform(self, m, s): 161 | raise ValueError("KL(N || U) = -inf") 162 | -------------------------------------------------------------------------------- /dd_vae/utils.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | import torch 4 | from torch import nn 5 | from torch.nn.functional import softplus 6 | 7 | 8 | def prepare_seed(seed=777, n_jobs=8): 9 | random.seed(seed) 10 | np.random.seed(seed) 11 | torch.manual_seed(seed) 12 | torch.backends.cudnn.deterministic = True 13 | torch.backends.cudnn.benchmark = False 14 | torch.set_num_threads(n_jobs) 15 | 16 | 17 | def smoothed_log_indicator(x, temperature): 18 | return softplus(-x/temperature + np.log(1/temperature - 1)) 19 | 20 | 21 | def combine_loss(loss_components, weights): 22 | if len(weights) == 0: 23 | raise ValueError("Specify at least one weight") 24 | loss = 0 25 | for component in weights: 26 | loss += loss_components[component] * weights[component] 27 | return loss 28 | 29 | 30 | class Reshape(nn.Module): 31 | def __init__(self, *shape): 32 | super().__init__() 33 | shape = shape or [-1] 34 | self.shape = shape 35 | 36 | def forward(self, x): 37 | return x.view(x.shape[0], *self.shape) 38 | 39 | 40 | class BaseModel(nn.Module): 41 | def __init__(self): 42 | super().__init__() 43 | self.device = 'cpu' 44 | 45 | def save(self, path): 46 | device = self.device 47 | self.to('cpu') 48 | weights = self.state_dict() 49 | data = { 50 | 'weights': weights, 51 | 'config': self.config 52 | } 53 | torch.save(data, path) 54 | self.to(device) 55 | 56 | @classmethod 57 | def load(cls, path, restore_weights=True): 58 | data = torch.load(path) 59 | model = cls(**data['config']) 60 | if restore_weights: 61 | model.load_state_dict(data['weights']) 62 | return model 63 | 64 | def to(self, device): 65 | self.device = device 66 | super().to(device) 67 | return self 68 | 69 | 70 | class LinearGrowth: 71 | def __init__(self, start, end, start_epoch, end_epoch, log=False): 72 | self.start = start 73 | self.end = end 74 | self.start_epoch = start_epoch 75 | self.end_epoch = end_epoch 76 | self.log = log 77 | if log: 78 | self.start = np.log10(start) 79 | self.end = np.log10(end) 80 | 81 | def __call__(self, epoch): 82 | if epoch <= self.start_epoch: 83 | value = self.start 84 | elif epoch >= self.end_epoch: 85 | value = self.end 86 | else: 87 | delta = (self.end - self.start) / ( 88 | self.end_epoch - self.start_epoch) 89 | value = delta * (epoch - self.start_epoch) + self.start 90 | if self.log: 91 | value = 10**value 92 | return value 93 | 94 | 95 | def to_onehot(x, n): 96 | one_hot = torch.zeros(x.shape[0], n) 97 | one_hot.scatter_(1, x[:, None].cpu(), 1) 98 | one_hot = one_hot.to(x.device) 99 | return one_hot 100 | 101 | 102 | class SpecialTokens: 103 | bos = '' 104 | eos = '' 105 | pad = '' 106 | unk = '' 107 | 108 | 109 | class CharVocab: 110 | @classmethod 111 | def from_data(cls, data, *args, **kwargs): 112 | chars = set() 113 | for string in data: 114 | chars.update(string) 115 | 116 | return cls(chars, *args, **kwargs) 117 | 118 | def __init__(self, chars, st=SpecialTokens): 119 | if (st.bos in chars) or (st.eos in chars) or \ 120 | (st.pad in chars) or (st.unk in chars): 121 | raise ValueError('SpecialTokens in chars') 122 | 123 | all_syms = sorted(list(chars)) + [st.bos, st.eos, st.pad, st.unk] 124 | 125 | self.st = st 126 | self.c2i = {c: i for i, c in enumerate(all_syms)} 127 | self.i2c = {i: c for i, c in enumerate(all_syms)} 128 | 129 | def __len__(self): 130 | return len(self.c2i) 131 | 132 | @property 133 | def bos(self): 134 | return self.c2i[self.st.bos] 135 | 136 | @property 137 | def eos(self): 138 | return self.c2i[self.st.eos] 139 | 140 | @property 141 | def pad(self): 142 | return self.c2i[self.st.pad] 143 | 144 | @property 145 | def unk(self): 146 | return self.c2i[self.st.unk] 147 | 148 | def char2id(self, char): 149 | if char not in self.c2i: 150 | return self.unk 151 | 152 | return self.c2i[char] 153 | 154 | def id2char(self, id): 155 | if id not in self.i2c: 156 | return self.st.unk 157 | 158 | return self.i2c[id] 159 | 160 | def string2ids(self, string, add_bos=False, add_eos=False): 161 | ids = [self.char2id(c) for c in string] 162 | 163 | if add_bos: 164 | ids = [self.bos] + ids 165 | if add_eos: 166 | ids = ids + [self.eos] 167 | 168 | return ids 169 | 170 | def ids2string(self, ids, rem_bos=True, rem_eos=True): 171 | if len(ids) == 0: 172 | return '' 173 | if rem_bos and ids[0] == self.bos: 174 | ids = ids[1:] 175 | if rem_eos and ids[-1] == self.eos: 176 | ids = ids[:-1] 177 | 178 | string = ''.join([self.id2char(id) for id in ids]) 179 | 180 | return string 181 | 182 | 183 | class StringDataset: 184 | def __init__(self, vocab, data): 185 | self.tokens = [vocab.string2ids(s) for s in data] 186 | self.data = data 187 | self.bos = vocab.bos 188 | self.eos = vocab.eos 189 | 190 | def __len__(self): 191 | return len(self.tokens) 192 | 193 | def __getitem__(self, index): 194 | tokens = self.tokens[index] 195 | with_bos = torch.tensor([self.bos] + tokens, dtype=torch.long) 196 | with_eos = torch.tensor(tokens + [self.eos], dtype=torch.long) 197 | return with_bos, with_eos, self.data[index] 198 | 199 | 200 | def collate(batch, pad, return_data=False): 201 | with_bos, with_eos, data = list(zip(*batch)) 202 | lengths = [len(x) for x in with_bos] 203 | order = np.argsort(lengths)[::-1] 204 | with_bos = [with_bos[i] for i in order] 205 | with_eos = [with_eos[i] for i in order] 206 | lengths = [lengths[i] for i in order] 207 | with_bos = torch.nn.utils.rnn.pad_sequence( 208 | with_bos, padding_value=pad 209 | ) 210 | with_eos = torch.nn.utils.rnn.pad_sequence( 211 | with_eos, padding_value=pad 212 | ) 213 | if return_data: 214 | data = np.array(data)[order] 215 | return with_bos, with_eos, lengths, data 216 | return with_bos, with_eos, lengths 217 | 218 | 219 | def batch_to_device(batch, device): 220 | return [ 221 | x.to(device) if isinstance(x, torch.Tensor) else x 222 | for x in batch 223 | ] 224 | -------------------------------------------------------------------------------- /dd_vae/vae_base.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from .utils import BaseModel, batch_to_device 4 | from .proposals import get_proposals 5 | 6 | 7 | class VAE(BaseModel): 8 | """ 9 | Generative Recurrent Autoregressive Decoder 10 | """ 11 | def __init__(self, prior, proposal): 12 | super().__init__() 13 | if prior not in ['gaussian', 'uniform']: 14 | raise ValueError( 15 | "Supported priors are 'gaussian' and 'uniform'") 16 | if proposal not in get_proposals(): 17 | proposals = list(get_proposals().keys()) 18 | raise ValueError( 19 | f"Supported proposals are {proposals}") 20 | 21 | self.config = { 22 | 'proposal': proposal, 23 | 'prior': prior 24 | } 25 | self.proposal = get_proposals()[proposal]() 26 | self.prior = prior 27 | 28 | def encode(self, batch): 29 | """ 30 | Encodes batch and returns latent codes 31 | """ 32 | raise NotImplementedError 33 | 34 | def decode(self, batch, z=None, state=None): 35 | """ 36 | Decodes batch and returns logits and intermediate states 37 | """ 38 | raise NotImplementedError 39 | 40 | def compute_metrics(self, batch, logits): 41 | return {} 42 | 43 | def language_model_nll(self, with_eos, logits): 44 | loss = nn.NLLLoss(ignore_index=self.vocab.pad)( 45 | logits.transpose(1, 2), with_eos) 46 | return loss 47 | 48 | def encoder_parameters(self): 49 | raise NotImplementedError 50 | 51 | def decoder_parameters(self): 52 | raise NotImplementedError 53 | 54 | def get_mu_std(self, z): 55 | dim = z.shape[1] // 2 56 | mu, logstd = z.split(dim, 1) 57 | std = logstd.exp() 58 | 59 | if self.prior == 'uniform': 60 | left = torch.sigmoid(mu - std) 61 | right = torch.sigmoid(mu + std) 62 | mu = (right + left) - 1 63 | std = (right - left) 64 | 65 | return mu, std 66 | 67 | def sample_kl(self, z, mu_only=False): 68 | mu, std = self.get_mu_std(z) 69 | if self.prior == 'gaussian': 70 | kl_loss = self.proposal.kl(mu, std).mean() 71 | elif self.prior == 'uniform': 72 | kl_loss = self.proposal.kl_uniform(mu, std).mean() 73 | else: 74 | raise ValueError 75 | 76 | if mu_only: 77 | sample = mu 78 | else: 79 | sample = self.proposal.sample(mu, std) 80 | return sample, kl_loss 81 | 82 | def argmax_nll(self, batch, logits, temperature): 83 | raise NotImplementedError 84 | 85 | def sample_nll(self, batch, logits): 86 | raise NotImplementedError 87 | 88 | def get_loss_components(self, batch, temperature): 89 | batch = batch_to_device(batch, self.device) 90 | z = self.encode(batch) 91 | sample, kl_loss = self.sample_kl(z, not self.variational) 92 | logits, _ = self.decode(batch, z=sample) 93 | metrics = self.compute_metrics(batch, logits) 94 | language_model_nll = self.sample_nll(batch, logits) 95 | argmax_nll = self.argmax_nll(batch, logits, temperature) 96 | loss_components = { 97 | 'sample_nll': language_model_nll, 98 | 'kl_loss': kl_loss, 99 | 'argmax_nll': argmax_nll, 100 | **metrics 101 | } 102 | return loss_components 103 | 104 | def sample(self, batch_size=1, mode='argmax', z=None): 105 | raise NotImplementedError 106 | -------------------------------------------------------------------------------- /dd_vae/vae_mnist.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from .utils import smoothed_log_indicator, Reshape 4 | from .vae_base import VAE 5 | 6 | 7 | class VAE_MNIST(VAE): 8 | def __init__(self, layer_sizes, latent_size, 9 | proposal='tricube', 10 | prior='gaussian', 11 | variational=True, 12 | image_size=28, 13 | channels=1): 14 | super().__init__(prior=prior, proposal=proposal) 15 | self.config.update({ 16 | 'layer_sizes': layer_sizes, 17 | 'latent_size': latent_size, 18 | 'variational': variational, 19 | 'image_size': image_size, 20 | 'channels': channels 21 | }) 22 | 23 | self.encoder = nn.Sequential( 24 | Reshape(784), 25 | *self.DNN(784, *layer_sizes, 2*latent_size) 26 | ) 27 | 28 | self.decoder = nn.Sequential( 29 | *self.DNN(latent_size, *layer_sizes[::-1], 784), 30 | Reshape(1, 28, 28), 31 | nn.Sigmoid() 32 | ) 33 | self.latent_size = latent_size 34 | self.variational = variational 35 | 36 | @staticmethod 37 | def DNN(*layers): 38 | net = [] 39 | for i in range(len(layers) - 1): 40 | net.append(nn.Linear(layers[i], layers[i+1])) 41 | if i != len(layers) - 2: 42 | net.append(nn.LeakyReLU()) 43 | return net 44 | 45 | def compute_metrics(self, batch, logits): 46 | images, _ = batch 47 | match = (images.long() == (logits > 0.5).long()) 48 | match = match.view(match.shape[0], -1).float() 49 | return { 50 | 'pixel_accuracy': match.mean(), 51 | 'image_accuracy': match.min(1)[0].mean(), 52 | 'image_accuracy@10': ((1-match).sum(1) < 10).float().mean() 53 | } 54 | 55 | def encoder_parameters(self): 56 | return self.encoder.parameters() 57 | 58 | def decoder_parameters(self): 59 | return self.decoder.parameters() 60 | 61 | def sample_nll(self, batch, logits): 62 | images, _ = batch 63 | return torch.nn.BCELoss()(logits, images) 64 | 65 | def encode(self, batch): 66 | image, _ = batch 67 | return self.encoder(image.float()) 68 | 69 | def decode(self, batch, z=None, state=None): 70 | return self.decoder(z), None 71 | 72 | def argmax_nll(self, batch, logits, temperature): 73 | images, _ = batch 74 | p_correct = logits*images + (1 - logits)*(1 - images) 75 | delta = p_correct - (1 - p_correct) 76 | reconstruction_loss = smoothed_log_indicator(delta, temperature).mean() 77 | return reconstruction_loss 78 | 79 | def sample(self, batch_size=1, z=None): 80 | if z is None: 81 | if self.prior == 'gaussian': 82 | z = torch.randn(batch_size, self.latent_size) 83 | elif self.prior == 'uniform': 84 | z = torch.rand(batch_size, self.latent_size)*2 - 1 85 | z = z.to(self.device) 86 | return self.decoder(z) 87 | -------------------------------------------------------------------------------- /dd_vae/vae_rnn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | import numpy as np 4 | from .utils import to_onehot, smoothed_log_indicator 5 | from .proposals import get_proposals 6 | from .vae_base import VAE 7 | 8 | 9 | class VAE_RNN(VAE): 10 | def __init__(self, embedding_size, 11 | hidden_size, latent_size, 12 | num_layers, vocab, 13 | proposal='tricube', 14 | prior='gaussian', 15 | variational=True, 16 | use_embedding_input=True, 17 | fc=None, 18 | fc_norm=False): 19 | super().__init__(prior=prior, proposal=proposal) 20 | self.vocab = vocab 21 | self.config.update({ 22 | 'embedding_size': embedding_size, 23 | 'hidden_size': hidden_size, 24 | 'latent_size': latent_size, 25 | 'num_layers': num_layers, 26 | 'proposal': proposal, 27 | 'prior': prior, 28 | 'vocab': self.vocab, 29 | 'variational': variational, 30 | 'use_embedding_input': use_embedding_input, 31 | 'fc': fc, 32 | 'fc_norm': fc_norm 33 | }) 34 | 35 | self.vocab_size = len(self.vocab) 36 | self.num_layers = num_layers 37 | self.latent_size = latent_size 38 | self.encoder_embedding = nn.Embedding(self.vocab_size, 39 | embedding_size, 40 | self.vocab.pad) 41 | rnn_input_size = embedding_size 42 | self.encoder = nn.GRU(rnn_input_size, hidden_size, 43 | num_layers=num_layers) 44 | self.encoder_to_latent = self.get_fc( 45 | fc, hidden_size * num_layers, 2 * latent_size, fc_norm) 46 | self.latent_to_decoder = self.get_fc( 47 | fc, latent_size, hidden_size * num_layers, fc_norm) 48 | if use_embedding_input: 49 | self.decoder_embedding = nn.Embedding(self.vocab_size, 50 | embedding_size, 51 | self.vocab.pad) 52 | decoder_input_size = rnn_input_size 53 | else: 54 | self.decoder_embedding = None 55 | decoder_input_size = 1 56 | 57 | self.decoder = nn.GRU(decoder_input_size, hidden_size, 58 | num_layers=num_layers) 59 | 60 | self.proposal = get_proposals()[proposal]() 61 | self.prior = prior 62 | 63 | self.decoder_to_logits = nn.Linear( 64 | hidden_size, self.vocab_size 65 | ) 66 | 67 | self.variational = variational 68 | self.use_embedding_input = use_embedding_input 69 | 70 | @staticmethod 71 | def get_fc(layers, input_dim, output_dim, fc_norm): 72 | if layers is None: 73 | return nn.Linear(input_dim, output_dim) 74 | layers = [input_dim] + layers + [output_dim] 75 | network = [] 76 | for i in range(len(layers) - 2): 77 | network.append(nn.Linear(layers[i], layers[i+1])) 78 | if fc_norm: 79 | network.append(nn.LayerNorm(layers[i+1])) 80 | network.append(nn.ELU()) 81 | network.append(nn.Linear(layers[-2], layers[-1])) 82 | return nn.Sequential(*network) 83 | 84 | def encode(self, batch): 85 | with_bos, with_eos, lengths = batch 86 | emb = self.encoder_embedding(with_eos) 87 | packed_sequence = nn.utils.rnn.pack_padded_sequence(emb, lengths) 88 | _, h = self.encoder(packed_sequence, None) 89 | h = h.transpose(0, 1).contiguous().view(h.shape[1], -1) 90 | z = self.encoder_to_latent(h) 91 | return z 92 | 93 | def decode(self, batch, z=None, state=None): 94 | with_bos, with_eos, lengths = batch 95 | if state is None: 96 | state = self.latent_to_decoder(z) 97 | state = state.view( 98 | state.shape[0], self.num_layers, -1 99 | ).transpose(0, 1).contiguous() 100 | 101 | if self.use_embedding_input: 102 | emb = self.decoder_embedding(with_bos) 103 | else: 104 | emb = torch.zeros( 105 | (with_bos.shape[0], with_bos.shape[1], 1), 106 | device=with_bos.device 107 | ) 108 | 109 | packed_sequence = nn.utils.rnn.pack_padded_sequence(emb, lengths) 110 | states, state = self.decoder(packed_sequence, state) 111 | states, _ = nn.utils.rnn.pad_packed_sequence(states) 112 | logits = self.decoder_to_logits(states) 113 | logits = torch.log_softmax(logits, 2) 114 | return logits, state 115 | 116 | def compute_metrics(self, batch, logits): 117 | with_bos, with_eos, lengths = batch 118 | predictions = torch.argmax(logits, 2) 119 | pad_mask = (with_eos == self.vocab.pad) 120 | non_pad_mask = (~pad_mask).float() 121 | correct_prediction = (predictions == with_eos) 122 | string_accuracy = ( 123 | correct_prediction | pad_mask 124 | ).float().min(0)[0].mean() 125 | character_accuracy = ( 126 | correct_prediction.float() * non_pad_mask 127 | ).sum() / non_pad_mask.sum() 128 | return { 129 | 'string_accuracy': string_accuracy, 130 | 'character_accuracy': character_accuracy 131 | } 132 | 133 | def sample_nll(self, batch, logits): 134 | with_bos, with_eos, lengths = batch 135 | loss = nn.NLLLoss( 136 | ignore_index=self.vocab.pad, 137 | reduction='mean')(logits.transpose(1, 2), with_eos) 138 | return loss 139 | 140 | def encoder_parameters(self): 141 | return nn.ModuleList([self.encoder_embedding, 142 | self.encoder, 143 | self.encoder_to_latent]).parameters() 144 | 145 | def decoder_parameters(self): 146 | modules = [self.decoder, 147 | self.latent_to_decoder, 148 | self.decoder_to_logits] 149 | if self.use_embedding_input: 150 | modules.append(self.decoder_embedding) 151 | return nn.ModuleList(modules).parameters() 152 | 153 | def argmax_nll(self, batch, logits, temperature): 154 | with_bos, with_eos, lengths = batch 155 | with_eos = with_eos.view(-1) 156 | 157 | logits = logits.view(-1, logits.shape[2]) 158 | oh = to_onehot(with_eos, logits.shape[1]) 159 | delta = (logits * oh).sum(1, keepdim=True) - logits 160 | error = smoothed_log_indicator(delta, temperature) * (1 - oh) 161 | error = error.sum(1) 162 | pad_mask = (with_eos != self.vocab.pad).float() 163 | error = error * pad_mask 164 | error = error.mean() / pad_mask.mean() 165 | return error 166 | 167 | def sample(self, batch_size=1, max_len=100, mode='argmax', 168 | z=None, keep_stats=False, 169 | temperature=1): 170 | if mode not in ['sample', 'argmax']: 171 | raise ValueError("Can either sample or argmax") 172 | generated_sequence = [] 173 | if z is None: 174 | if self.prior == 'gaussian': 175 | z = torch.randn(batch_size, self.latent_size) 176 | elif self.prior == 'uniform': 177 | z = torch.rand(batch_size, self.latent_size)*2 - 1 178 | batch_size = z.shape[0] 179 | character = [[self.vocab.bos for _ in range(batch_size)]] 180 | character = torch.tensor(character, dtype=torch.long, 181 | device=self.device) 182 | h = self.latent_to_decoder(z.to(self.device)) 183 | h = h.view( 184 | h.shape[0], self.num_layers, -1 185 | ).transpose(0, 1).contiguous() 186 | if keep_stats: 187 | stats = [] 188 | lengths = [1]*batch_size 189 | for i in range(max_len): 190 | batch = (character, None, lengths) 191 | logits, h = self.decode(batch, state=h) 192 | if keep_stats: 193 | stats.append([logits.detach().cpu().numpy()]) 194 | if mode == 'argmax': 195 | character = torch.argmax(logits[0], 1) 196 | else: 197 | character = torch.distributions.Categorical( 198 | torch.exp(logits[0])).sample() 199 | character = character.detach()[None, :] 200 | generated_sequence.append(character.cpu().numpy()) 201 | generated_sequence = np.concatenate(generated_sequence, 0).T 202 | samples = [self.vocab.ids2string(s) for s in generated_sequence] 203 | eos = self.vocab.i2c[self.vocab.eos] 204 | samples = [x.split(eos)[0] for x in samples] 205 | if keep_stats: 206 | return samples, stats 207 | else: 208 | return samples 209 | -------------------------------------------------------------------------------- /illustrations.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from matplotlib import pyplot as plt\n", 10 | "import numpy as np\n", 11 | "import torch\n", 12 | "%matplotlib inline\n", 13 | "\n", 14 | "from dd_vae.proposals import get_proposals\n", 15 | "from dd_vae.utils import smoothed_log_indicator\n", 16 | "\n", 17 | "import matplotlib\n", 18 | "matplotlib.rcParams['text.usetex'] = True\n", 19 | "matplotlib.rcParams['text.latex.preamble'] = [r'\\usepackage{sansmath}', r'\\sansmath']\n", 20 | "matplotlib.rcParams['font.family'] = 'sans-serif'\n", 21 | "matplotlib.rcParams['font.sans-serif'] = 'Helvetica, Avant Garde, Computer Modern Sans serif'" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 2, 27 | "metadata": { 28 | "scrolled": false 29 | }, 30 | "outputs": [ 31 | { 32 | "name": "stderr", 33 | "output_type": "stream", 34 | "text": [ 35 | "findfont: Font family ['sans-serif'] not found. Falling back to DejaVu Sans.\n" 36 | ] 37 | }, 38 | { 39 | "data": { 40 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAdQAAAD4CAYAAABVPheVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOzdeVzUdf7A8dd3ZriZ4RouYQA5PABvU0GxzEwpvDpX7MLdDiqt3XTbVn/VtrlH0m5qm5YlWWu2WmlFZl7lfSQmgnhCgNwM1wznwMz398eIoeAVKKif5+PB4zEwn+/n+/7yUN7zuSVZlhEEQRAEoWMUXR2AIAiCINwIREIVBEEQhE4gEqogCIIgdAKRUAVBEAShE4iEKgiCIAidQCRUQRAEQegEqmt9Q61WKwcFBV3r2wqCcB1KTU3Vy7Ls2dVxCMLluOYJNSgoiAMHDlzr2wqCcB2SJCm3q2MQhMslunwFQRAEoROIhCoIgiAInUAkVEEQBEHoBCKhCoIgCEInEAlVEARBEDqBSKiCIAiC0AlEQhUEQRCETnDN16EKNzdZlin52UDukXKKTlVj0NdTbzQhKSTsHW1w83HEJ8SFwEgPPAPUSJLU1SELgiBcFpFQhWvC3Gzh6K5C0rbmU1VS126ZpgYzxooG8jIr2P/1z3j4OTNgrD+9h/ugUIrOFEEQujeRUIWrLiddz47/ncCgbwDAUWNL6BAv/Hq74e7rhKPGFlmWqTc2UV5Qw+ljlWT/VEp5QQ1bPzrGT5tOE/NAGLq+7l38JIIgCBcmybJ8TW84dOhQWWw9eHMw1Tez438nOLa3GAA3H0duietJyCDPS7Y4zU0WTqWWsD/l57OJuP8Yf6KmhqCyVV712IXuQZKkVFmWh3Z1HIJwOUQLVbgqqkrrWL8kncqiWpQ2CoZPCmbAWB0KxeWNiSptFPQe4UvoEG9+2pTLjyk5HP4+n8JTVdz9dH+c3eyv8hMIgiBcGTEwJXS6oqxqPvvHASqLanHzceTBubcwaFzAZSfT1pQ2Cobe1ZN7XxyCi6cD+tM1fPaPA5TlGa9C5IIgCL+eSKhCp8o/XslXiw7RWNdMUD8P7ntxKG4+Th2u1ytQw30vDqVHmCu11SbW/fsnSnIMnRCxIAhC5xAJVeg0BScqSXk7jeZGM71H+BCb2B9bh84bVbB3tmHSrIGEDPLEVN/MVwsPiaQqCEK3IRKq0CnKC2pYvyQdc5OF8JG+jH2k76/q4r0UpY2Ccb+LOJtUU95Oo7qs/WU4giAI15JIqEKHGSsa+HpxGqb6ZkIGeXLr9D5IVyGZtlAqrUk1IMKdhpomUt4+TENt01W7nyAIwuUQCVXokOYmMxveTae2qhHfUBfumBF+VVqm51MqFYx/PBIPf2eqSur4dmk6FrPlqt9XEAThQkRCFTpkx/9OUpprRO1hz12J/VHZXLs1orb2KuKe6Y+jiy2FJ6vY+2X2Nbu3IAjC+URCFX61o7sLydxZiNJGQeyT/bB3srnmMTi72TP+8UgkhcRPG/P4Oa3smscgCIIAIqEKv1JVaR3bPz0BwK3TeuMZoO6yWHqEujJiSjAAW1YcpaayoctiEQTh5iUSqnDFLGYLm5MzaTZZCLvFm77Rvl0dEoPGBRDYz4PGuma2fnyMa72lpiAIwiUTqiRJT0iS9MeLvP9PSZI2SZKUKklScOeGJ3RHB7/LpeRnA06udoz+Ta+uDgcASZIY81Af7JxUnM6s4MiOwq4OSRCEm8xFV91LkrQJuAN48QLvDwYGy7I87szrd4FxnR6l0G2UF9TwY0oOAGMf7dsl46YX4uRix63TerPx/SPs+vwUur7uuHg6dHVYwlV28ODB8SqV6hVZln0QvW7C1WGRJKm4ubn5L4MHD/7uQoUumlDPJMonANcLFLkD2HSm7EFJksSpEDcw2SLzw8pjWCwykbf6dc5xag0GWPMY2DjAvR+ATcc2vQ8b6k32oTJOHShl2yfHmDhroDik/AZ28ODB8XZ2dm8HBQWZHBwcKhUKhejrFzqdxWKR6uvrXXJyct4+ePDgsxdKqh39NOcBXHKtwplu4wOSJB0oKxOzMK9XmbsKKc424Ohiy4gpIZ1T6cZ5kLUFjqXAD3/vlCpHP9gLO0cVp49Wciq1tFPqFLonlUr1SlBQkMnJyaleJFPhalEoFLKTk1N9UFCQSaVSvXLBch28TznQety03ZasLMvvybI8VJbloZ6enh28pdAV6gwm9qzNAmDU/WHYdcYevac2w8EVv3y/exGc/rHD1TqobYmaak34O9ecxFTf3OE6he5JlmUfBwcHMa1buCYcHBwazgwttKujCXUzZ8ZMz4yhbu5gfUI3tevzkzTWNRMQ4U7oEK+OV1hfBV/OtL4e+wqMfA5kC6xLhKb6DlcfPrIH3j011FWb2Pe12PDhBqYQLVPhWjnzb+2CefOKE6okScGSJFWCddwUOHhm8tI/gSd/baBC91Xys4ET+0pQqhSM/k3vzhmT/O7PYCwEv6EQPQtu+zN49oHyk7D19Q5XLykkbp3WG0mC9B8KqCis7XjMgiAIF3HJhHqmu/aNVt9ny7Ls1ur7F2VZHnfmSzQFbjCyLLPrs5MADBir65xZs8c3wKGVoLKHKUtAqbJORpqyBCQl7PkP5O7u8G08A9SEx/ghW2R2rz3V8bgF4SqKjY0N1ul0ke29t3PnTkdJkoasW7fuinZQkSRpSFJSkrbl+8zMTFudThcpSdIQvV5/7fYJvUmIKebCRWUdLKMoqxoHtQ1DJgR2vMK6Cvh6lvX17f8Hnq3WsfoNhpg/ADKsexpMHW9VDovriY29ktz0ck4fq+hwfYJwPYmKijKEhoY2tnz/+uuv+1ZXV6vWrl17QqvVmrsythuRSKjCBZmbLOw507IbNjG4cw4L//ZFqCkB3QgYkdj2/dF/BO9IqPwZNv+lw7dz1NgyeLz1g8Cuz05hsYjhNuHmsXv37pNTpkwxtv5ZZGRk7fk/EzqHSKjCBaVvy8egb8DN14nwkZ2wveDRryF9NagcYMo7oGinx0lla+36Vahg/7vw8/YO33bgWB3ObnaU59dwfG9xh+sThO5Ao9EMTE5Odps3b563TqeL1Gg0A2NjY8/Zra51l290dHTYqlWrtHv27NG0dPm2XCtJ0pCIiIi+53cpt9wjMTHRT6PRDNTr9cqWn8XHxwdqNJqBOp0uMikpSavX65XR0dFhkiQN0el0kVfaPX0jEAlVaJepoZmD3+UCED01BIWyg/9UavXw9fPW1+P+Ah4XWcfq29/aUgX48hlo7NiHaZWtkhGTrX9n9qdkY24W56YKN4akpCSf1NRUp8WLF+fOmjWraMOGDW6JiYl+7ZX96quvsidMmFAZHh5ed+TIkfRZs2b5z58/3z8uLq5y+fLl2QEBAY1Tp07tdX4iXLZsmTYlJcVt+vTpZS3dxM8991ygi4tL80cffZSl0+ka58yZExgcHNzv9ttvN6xdu/YEwCOPPNJJi9WvH53QhyfciNJ/yKfe2IR3Tw2B/Tw6XuE3L0CdHoJi4JbHL10+5g9w/BsoSoON/wcT3+rQ7cOG+ZD6XR6VRbUc3V1E5Oh2/+YIN4CgP30zpKtjAMj5x92pV/seBoNB+e2332YDTJkyxZiamuqUlpbm2F5ZrVZrdnNzM1dXVzd7eXmZV61apV2wYEHu7Nmz9QAJCQmVERERfefOnes/ZcqUoy3XZWRkOGVnZ6e3HnONjIysXbJkSUFLvTExMZrp06eXvf766yUAlZWVBTNmzAjW6/XKm2msViRUoY3G+mZ+2pgHwPBJwR1fJpPxBWSuA1tnzLf9nbrt2zFlZWPKy8NcWYmlrg4AhbMzSlcXbIOCsAsJweGON1GujIXUZOg7EULH/uoQFAqJYXE9+W5ZBgfW59AnyueaHoYuCFdDXFxcZevvzyTMS/7D3rlzpyPAY489ds71CQkJZXPmzDln9mFcXFzl+Unx9ttvN7S87tOnTyPA+PHjz/4sLCyskZuQSKhCG2lbTtNY10yPMFf8+7hd+oKLqSmlceVsDCfUGI3BNH78IFzu0WoKBfYB4ajVWWj++yy2f9oL9i6/OpSQQZ54+DlTXlBD5s5C+o/R/eq6hO7rWrQMuwsPD49ftQ3YqVOn7MDaujyvPjNA65ZlcHCw2InqMomEKpyjobaJtM0db53KZjPGLVupWPAS9aftADugDMnGBvsB/bHv0xfbwEBUnloUjtYeKktNDc36ckw5P9Nw9Bj16ek05JTRgIaydHD8cQIeL/4Tp5iYXxWXpJAYNrEn3y5NJ/XbXMJH9kBlK1qpQvdwpnXZ7t/kkydP2gH06tWrU1p+LUtpzu+SLS8vV0LbRCtcHpFQhXOkbTmNqcGMrq8bPcIudMjQhcmyjHHzZsre/BemnBwAFDYy6vET0Ey+D8dht6Cws7usuiy1tdTu34/hi/9h3PoDdVlV1D3xJHa9e+P1wh9wHj36iuPrOUCLZ4CasjwjGdsLGHhHwBXXIQhXw+DBg2tXrVqlXbdunfr8ZS3Lli3TAoSHh5s6416jRo2qA/jwww/dWsZQAZKTkz3Dw8PrOuMeNyORUIWzTA3NpP+QD8DQu3te8fUNx45R/Prr1B+w9rjZOFtw72XE9dm/ooj+7RXXp3ByQj1mDOoxYzBvTqJq2b+oOKmh8fhxTj/xJE6jRuH955ewC778c+0lSeKWuJ6sf+cwhzbl0e9Wf5Q2YrK70PVmz56tT05O9pw6dWqvadOm6QcPHlxbVVWlXLt2rXtmZqbjggULcjvrXlqt1jxt2jT9nDlzArOysuyGDRtWt2zZMm1mZqZjyyxd4cqJhCqcdWR7IY11zfiGutAj9PJbp7LJhP69ZeiXLoXmZpRubmhHOOCmPoDU+06ImtHh2JS3/x6P3E249dpLZc1I9DvKqN25k5+nTMXz+edxf/QRJOXldd8G9fPAvYcTFYW1HN9fTPjIHh2OTxA6w5EjR44mJib6paSkuK1atUqrVqvNkZGRtWvXrj3R2ZsxfPLJJ7nBwcENK1as8Fy6dKlPeHh43dW4z81Eki93gkgnGTp0qHzgwIFrek/h0pqbzHw8dw91BhN3P9OfoH7aS18EmE6fpuC552nIzATALX4anmN7oNz0gnUC0dN7QdNJCas8C5aMhOZ6mie8S+mXh6leuxYAhyFD8HszCRufC56sdI7j+4rZnJyJi5cD8a+OQKEQh5B3R5IkpcqyPPRC76elpeUMGDBAf6H3BaGzpaWlaQcMGBDU3nuir0sA4NieYuoMJjz8nQmMvLx1p8bvv+fne++jITMTG39/AlaswOe5GSi3n9kyMPaNzkumYN0M4o5XAVDtmEePeS/gv3QJKk9P6lNT+fmee6ndu++yqgob6oXaw57q0nqyfxKH3guC0HEioQpYzBZ+2mgdnhkyPvCSM2hlWabsnXfIT3wai8GA89ix9Pzic5yG3QJfzYRGA/S+G/o/2PnBDnsCAkdBbRmsn436ttvo+dWXOEVHY66oIG/GDCo++viS1SiUCgaNs05IOvhdLte6p0YQhBuPSKgCpw6WYtA3oPF0IGSw50XLys3NFL/8CvpFi0GhwPMPf8B/8SKUGg2kfghZW8HBDeL+DZ1xbur5FAqY/DbYOMGRL+DIWlRubuiWvYfHk0+CxULJ3/5GyRsLkC0X32Kwb7QvDmobyvKM5B+tvGhZQRCESxEJ9SYnyzKHNp0GYNC4gIvu2Wupryf/mWepWrMGyc4O/8WL0D7xOJJCAZW5sHGeteDdb4La++oF7d4T7nzN+vqbF6CmDEmpxOv3z9Pjn/8AlYqK5cspnPNHZNOFVxmobJX0v926ucPBjZ02gVIQhJuUSKg3uaKsasryjNg729BnxIUn9Fjq6zmd+DQ127ahdHUl4MNk1GPPbAVosVg3sTfVQPhkiLjn6gc+9LcQfBvUlUPK82d3X3KZPBndu0tRODpi+OYb8n//h4sm1X63+qGyU5J/rJLygpqrH7cgCDcskVBvcoe3WFunkaP9LrhrkKW+ntNPP03d3r0oPbUErvwvjoMG/VLgx/chZwc4auHuf12drt7zSRJMehts1XAsBdI/O/uW88iRBHz0EQoXF2q2bLloUrVztKHvmQ8SaVtPX/24BUG4YYl1qDcxg76e7ENlKJTSBU9fsTQ2kv/MM9Tt2YtSqyXwww+xC2l1KlN5Fmx+xfo67l/gpMViMVNvMFBvqKbOUE290UizqRFzUxPNTSbMTU1YzOftbHYmCbeeEKVU2WBjb4eNrR0qe3ts7RxwdHXF2d0DeydnJFcdTPibdSLU+tnQMwbU1uToEBlBwPIPyJvxW2tS/cMf8H/rLSRV23/y/W/Xkb6tgBP7SoiaEoKD2rYDv1VBEG5WIqHexA7/kI8sW5eQOLm23Q5QNpspnPNHanfvQenhQeCHyeckU9lsxvjpMxSVO1LsFEPF14eoXPYt1SXFWMy/as/uy6a0sUGj9cTDPwBt821oSzPwWz0T5xmrzyZnh4hWSXXzFor/8ho+r/2lzSxmV29HAvt5kJtezpEdhQy9K+iqxi4Iwo1JJNSblKmhmaM7CwEYcHvbU1dkWabkb3/HuHEjCmdnApZ/gF1oKE2NDeQePkT2Tz/y875t1NSogL5AHfDj2evt1Roc1RocXVxxUGtQ2dmhsrFBaWOLytYWheKX0Qb5l5uevTeAubmJpoZGmhobrF/19dRWVVJTWYGpvo7KokIqiwo5BdYYCupx+zEe3eCRhA6LIiByAA4REeiWLCEvIYGqNWtQeXnhOfPZNs874HYduenlpG/LZ9CdAShVYjREEIQrIxLqTero7iJMDWZ8Q13wCtS0eb/igw+oXLkSycYGv7ffplw2k7F0ESf27sBUX3+2nL2iCZ/gEHwHjkQbEISbTw9cvX2xsbe/qvGbGuqpLi2h/HQu+tN5lKRtpyDnNJWVRiq3bODwlg3YOzkTNmIkA++8G79//4v8Z2ei/89/UHl64vabc9fI+vdxO7sd4anUUnoPv7wdlwRBEFqIhHoTki0yh7+3boI/YGzb1qlh40ZKk95EBkxPP8G6L1dRkn3q7PvewaGESkfpaUnHa9gkpHveuVahn2Vr74BnQBCeAUHWHzz4EJZPplF6eAfZtkM5UedHeX4e6Vu+I33Ld/ToHU7fJxKwXfoBxX/9K7ZBgTiNGHG2PkmS6D/Gnx9WHufw1tP0Gubd8YPVBeEKxMbGBm/YsOGiBxAvWLAgt/XpMACSJA1p7+fdhUajGfjyyy/nd9f4OpNIqDehvKMVGMrqUbvb03PAuRs5NJw4QcGfXqJE7cipfr2o/mEDAA4aFyJvu4OIW+/AI+dz2JwM6h4Q+4+ueIS2JAnFpIX45A/Hp/4HouMXofd6kfQt35Hxw2YKj2dSSCYe0QMJPnwc5XPPE/TZGmx1v3yg6D3ch73rsinNNVLyswGf4F9/mLkgXKm5c+cWP/DAA2d3GHnuuecCdTpd4+zZs4tbfjZ8+PDa86+LiooytJxvKnQtkVBvQhnbCgCIGN3jnE3hzdXVHJ01k8M+LujVjlBrROPpxdCJ9xA5Zhw2tnZQehS+n2+9YNJicLjyM1OvGrU33JUEn/8WvpuL9undjHnsCUb+5mEytm5k37o1lFdXUR7SA++qGkzPPE34qk9RODkB1o0e+o705aeNeWRsKxAJVbimRo0aVddyTinAyy+/7BcQENCYkJBw0W28du/effLqRydcDjHz4iZjrGggN12PQinRN/qXjevNTSa2znyKH9RK9GpH7ByduO2Rx5nx1rsMGh9nTabmJlj7FJhNMPgRCLujC5/kAiLvhb6TwGS0bjZhsWBr78Dguybzu8XvExP/GDZ29pS4OrPZ1sy2557B0mqLwogYP5DgVGopDTVNXfgggtCWRqMZmJyc7JaYmOin0WgG6vV6pSRJQ5KSks45Hio+Pj5Qp9NFSpI0RKfTRc6bN8+7vXrmzZvnrdPpIjUazcDY2Ng2BwvHx8cHajSagS11zJs3zzsiIqJvy/uSJA1JTk4+p5s6NjY2uL26fk18rZ/zSn5PXUUk1JvMkR0FyDKEDPLEUWNdb2ms0PPJ07/lcKMRi0JB31uimLHwPYbcPRmlyuaXi3e+BUWHwEUHd87voie4BEmybi7h6AE/b4cDH5x9y8bOnmGT7yPh30sJ6TcQs1LBQaOez55PpM5QDYCLpwMB4e6Ymy0c3VPUVU8hCBe0bNkybUpKitv06dPLtFqt+fz3Y2Njg1NSUtweffTRsuXLl2fHxMQY58+f739+4ktKSvJJTU11Wrx4ce6sWbOKNmzY4JaYmOjXup5Vq1ZpZ82aVfTaa68VrF271n3RokW+HY3/cuO71HN2R6LL9yZibraQucuaJCJvtf6/OZ2ZztcLXqe+rha7pmZuv2864fEPt724OB22/dP6etJisG87M7jbcPa0bs6/+hHY9DKE3mHd//cMtYeWKfNe59CSt9m2+RtOlxTw0R8SmfKnV/EJ7UVEjB95Ryo4sr2AgWN1SOKsVKEbycjIcMrOzk6/UJKprq5WLly4MLelqzghIaFyx44d6qysrHN2LDEYDMpvv/02G2DKlCnG1NRUp7S0NEeAzMxM2w0bNrgtX748u6WeiRMnGjw9PQd2NP7Lje9Sz9kdXTKhSpL0T2Aw4A7cL8tydjtl3gVaDgF+UZblzZ0apdApsg+VUW8w4d7DCd9QVw5tXM/W5KXIFgsexjrGjr0LXXvJtNkEaxPB0mTdQzdkzLUP/kqFT7Z2/2Z8bu36fTTFelJNKwMTn8W5oJDtP+2jEvj01ReZkPg8vUbE4OxmR3VZPfnHK9H1de+aZxB+nVddhnR1CAC8Wp16NaqNi4urvFiSaT2mmpmZabt+/XpNfn6+XXl5+Tl/7+Pi4s4Zm3VzczNXV1crAdavX68Ba7JreV+r1ZqjoqIM1dXVHWqIXUl811MyhUt0+UqSNBgYLMvyOOBx4N12ytwBBMuyPORMmX9ejUCFjjuy/cxkpJge7Prff9nywTvIFgvBpZWMdvPF/7nn279wRxKUpINrIIx77RpG3EF3JYGTF+Tugv1t/ukCEPx/rzDaVoOu3IC5qYlvFi3gx68/J3yUdXw548zvTBC6i+Dg4IaLvb9z507HiIiIvpIkDRk/fnyvLVu2aNRqdZvE5OHhccHtzLKysuzauyYoKOjCJ01cpsuN71LP2R1d6pPGHcAmAFmWD0qSNLSdMhWtXrsDBzopNqETVRTVUnCiCqWtREHm52Ru34wkSUTmlRBkVqBLSmp3n1sKD8H2JOvrKe+AnfO1DbwjHN1h4kL4dBps/guEjgNt6DlFFHZ2BPz73zTfcy/ODSaO+mnZuWoFgyYYQOrJz2l6aqsa292aUeimrlLL8Hqg1+uVMTExfadNm6Zfs2ZNVnh4uAlAp9NFXkk9ISEhjUajsc1EoJycnEtudF1dXa10cXFpt2XZWfF1V5ealOQBtOnibU2W5YMAkiRlYU2+m84vI0nSE5IkHZAk6UBZWdmvjVXogCPbC5BlGXvb7WRu34zKxoYheWXoKoz4/m0+Nr7tzDVobrTO6pXNMDwRgkZd+8A7qs9dMGAaNNfDukSwtP1/bhsUhM+fX6KnvppBJdVICgU/bViLo+MeLGYLmbsKuyBwQbhyO3fudASYN29eUUuyArjSbtoRI0bUAaxbt07d8jO9Xq/cs2dPm8kTrcc+L1Sms+Prri6VUMuB1tOf2yw6lCTpj8BBWZZDgBBg2fllZFl+T5blobIsD/X09Dz/beEqazaZOba3iOa6zZSf/hGVrS1RJiVeFdW4TJ36y7mm5/vhH1B2FNxDYOzLnRaPbJExG000FddiyjfSmGugMbsaU76RprI6zAYTslm+dEWXa8LfQe0L+fthz9vtFnG5916cb7sN32I90TZqlCoVFfl7aG7YxZEdhVjMlnavE4TupGUd6wsvvOC/bt06dXJysptOp4s0Go3KtLQ0x8tdfjJq1Ki6qKgow9SpU3vNmzfPOykpSXvrrbf28vf3P2cDifDw8LpFixb5JicnuyUnJ7u1V+ZqxNddXepTwWasY6JvnBlPbW+ykQfWxAvndv8K3UTWT2XUln+P2ZSOysaW28OHoFrxX1S+vnj/+aX2L8o/ALveAiSYsgRsHa/4vrJZpqmoBlN+DU0ltTQV19FcXo+lxgSXyk8KULrYoXKzR+XliK2fMzZ+zth4OyIpr3C1l4ObdWbyyvtg63wIGw9efc4pIkkSvn99jeyJk3DZf5DbHk9g64GdmBv2U11sS96R3gT1117gBoLQPWi1WvPy5cuzX375Zb+pU6f2Cg8Pr1u8eHHuqVOn7F577TX/t956S/v666+XXE5du3fvPhkfHx+4aNEiXxcXl+bXXnutYNOmTZrW3b5r1qzJuv/++0NmzJgR7O/v3/jaa68V7N+/3zEnJ6fdMZLOjK87klpO9rhggV9m+QI8KctytiRJwUCqLMtukiS5Amuwjp/CJWb5Dh06VD5wQAyzXksrXlyKPicFSaEgbvpvMb/0f9DUhO6D93EeObLtBU318O5o0J+A6Jlw5+uXfa/m8nrqj5TTcKoKU64BubH9SXoKRxUKZxsklcKaIBUScpMZS6MZucGMpbb9TRUkWyV2IS7Y93LDvpcbKg+Hy46NL5+Fnz6GHoPht5tA2fbzpGHDdxQ8/zySgwNNr/yZjatWADK+fe4j/i+PXf69hE4hSVKqLMvtzd0AIC0tLWfAgAE3/B6xXSEzM9O2dbcsQHR0dFhQUJDpk08+ye2quLpaWlqadsCAAUHtvXfJfmtZll9s52fZgNuZ11XAuA7GKFwlh7duR5/zDQBjHnkK1XvvY25qwnXab9pPpgBbX7cmU20vGDPvkvdormqgLrWU+gw9TUXnbjWq8rDHNlCDjY8TNj5OqDwdUKptkS5xPJrcZKG5qgFzRQNNxXWYCoyY8mswVzTQcLSChqPWzhAbP2ccB3ri2N8TpcslJg6Nnw9Z30PhQWvre/TsNkU0E8ZjjIvDkJKCZm0KI38zg12ffkDRsS84dWAgoUM7vAxPELo9vV6vjIiI6Ldjx46jrbdDzMjIcLr99tsNXRlbd10WEzYAACAASURBVHZDDAQL7dPn5bDl/bcAGe/QOwgoLKL06FFs/Pzwnt02mQCQtxf2/AckBUxZCjbtH8Mmm2UajpZT+2MxDScqzx5qKtkpse/rjkMfd+x6ulw6yV2AZKPAxtMRG09H7Hv/sg60uaqRxpOVNJywfjUV1FBdUEP1+p+x7+OOc3QP7EJd2z8pxt4FJr8NH0+xjg/3jgXviDbFvOf+mdpdu6jbt48+kyaS6RdFZcEe1i/6O48sWIirtzjaTbixtaw5nTZtWvDixYtzAd544w0fo9GofP7550WPwAWIhHqDaqyr5cs3/4bFbEJh24dRsbGUPfsbAHxefeXshvDnMNVaZ8Iiw6g/gH/b9fFyk4XagyUYt+VjrjizTEwp4RCpxXGQF/ahrpdsfXaEytUO1S0+ON3ig9xkpv5YJfWHSqk/VnG25arydEAd44/jYK+2sYSMgaEz4MBy6wzmx7eC0ubce7i54f3Snyj844uUvLGAUW98xDfvFdHUmMO6BX9l+vw3sbG7uue9CkJX++qrr7IffvjhwEceeSQEIDIysvbIkSPX1c5F15pIqDcgWZbZ8M5bVBUXIim1aAMnIa14E7mhAc1dd+EcE9P+hVteg4ps8AqHW8/t6ZebLNTsLcS4LR/LmU3jlR72OEf1wHGQF0onm/ZqvKokGyWO/bQ49tNirjFR+2MxtXuLaC6rp/KLkxi25qEeo8NpiPe5iXXcX+HUZig+DDvehNv+1KZuzcSJVH/1NbU7d+K87j1cfKdSVfAh5adz2Zr8LuOfeu7aPaggdAGtVmtu2ZpQuDwiod6ADqSs5dSPe1Co7FE5TiTEs566z3ag0Gjwfqlt8gAgZyfsWwoKlXVWr8raVStbZOrTyqj+LgdzlXU2vE0PJ9S36XCI1F5yn9u6pjpyDDkU1BSQb8ynsKaQ6sZqDCYD1Y3VNFoakWUZWZaRJAknGyecbZxxtnXG08ETXydffJ198Xf2J9g1GDtl+13ISmdbNGMCUI/WUZ9ehmHraZpL66haewrj9nxcY3tiH+Fh7Qq2c4bJ78CKONi+AHpNgB7njo1KkoTPq6+QPXESNd9+Q0ji3RyuiqOpdhUZ32/Cv28kEbdeYLmRIAg3JZFQbzAl2afYuWoFACqH8Sht3FCvXQCA1wsvoGpvHXBjDax72vo6ZvbZ5NKYZ6DqyyyaCmoAsPFxRDOhJ/a93dodozRbzByrOMaBkgMcKT/C0fKj5BpykemcNaVKSUmQJohebr0Y4DWAod5DCXMLQyH90vqUlBKOA71w6O9JfYYew+ZcmkvrKf/vUeyCXXCJC8a2hzP0jIHhT1k/RKx7Gp74/uyHiBa2/v54zpxJ6Rtv4Lr+HRQ9E7F1Hkuj4Ts2f/AOPiFhePgHdMqzCYJw/RMJ9QbS1NjA+sVJWMxm/MNvQ18UgrdNOariHBwGD8b1/vvav3DTy1CVCz79IOYFLA3NVG/IoXZfEcig1NiiuTMQx8HebVqk+no9W/O2sqtgFz+W/IjRZDznfZVCRZAmCH+1P/7O/vg5++Fq74qLrQsaOw32SnskSUKBArNspq65jhpTDUaTkdK6UopqiyisLSTPkEeOIYes6iyyqrP4NudbANS2aob7DOc23W2M9h+Nm731BChJIeHY3xOHCC21+4swbMqlMbua0sU/4TzKD824QBRjX4aTG6H0iPUknXY2r3B/5GGqv/wSjmegjailrDacHr2rKTy+l/Vvv0n862+ibG/LRkEQbjriL8ENZPvKD6kozMfdT0ezeTjQhOfBz0GpxPcvryIp2pkslPW99cxQhQ1MWUr9cQOV605hMTaBQkI92g/12AAUtr9sYFJWV8aGnA1szt3MT6U/ndMC9XP2Y5jPMAZ4DqCvR19CXUOxVV5y+8/L0tDcQFZ1FkfLj3Kw5CAHSg5QVFvE5rzNbM7bjEJSMMhrELFBsUzoOQEXOxckpWQd5x3giWFLHjW7C6nZUUD9kXLc7gnFfsoSWD4Bdv4b+twNfudOxJJUKnz+bx65Dz2MV9qX6MPiUdrfhsYzm9Kfs9i39n9E3z+9U55PEITrm0ioN4ictIMc+i4FhVLFsKlP8cPKcuwsdXjo03F/5CHswsLaXtRgsG52AFhGvUTVTlvqDhwFwDZAjds9Ydj4WGcDmy1mdhXu4vMTn7Mtfxtm2TrRz1ZhS3SPaG7T3cZw3+H4q/2v2jPaq+yJ8IggwiOC+3pZW9sFNQXsyN/B96e/Z3/xflJLUkktSeWNH99gTMAYJodMZqTfSBSONrhODMFxkBeVn52kqbgW/fsZOI3wxXXYTKT9i6xH1D25vc1SIcehQ9FMmog55TtUYfdTlge3xT/Fhv+8xt4v/kfw4GH4hLTz+xUE4aZyyZ2SOpvYKanzNTU08OHspzGUlTLqN49QW9OPo7uKCMjbSO+qHYRs+Balpp39qs/sHNToPomKhpmYKxpBJeEyoSfO0T2QFBI1pho+O/EZK4+tpLi2GACVpGK0/2gm9JzAaP/RONm0swSnCxhNRrblb+OrU1+xt2jv2ZZzkCaI6X2nMylkEo42jshmC8bt+Rg254FZRuVpj7v0V2wN2yB6Ftz51zZ1N5WWkj0hlkz/yRT2GMXAcQGYar7n4PovcffT8fA/FqKy7ZyWuPALsVOS0N1cbKekq7dgULhmdq1ZiaGsFM+gYAZNmMKpA6UA+BbvxesPv28/mZ7chHzwY4zmBygregJzRSM2vk54zxyEepQfZQ1l/Cv1X4z7bBxvpr5JcW0xAeoAnh/8PJvu38TC2xcS2zO22yRTsI6nxgXH8d6d77Hxvo3MHDQTXydfcgw5zN83nzs+u4NFBxdhaDaiGROA19MDUXk60FzWQGn5HGrME5F3LYa8fW3qtvHyQvvss/gW7wXgxL4iRj7wEG49/KkoOM3+Lz+71o8r3GDi4+MDJUkacqGvefPmeXd1jL+WRqMZ2BnxS5I0JCkpSavX65WSJA1pOb2muxBdvte5kuxTHPzmSyRJwZ1PzCQ3o5KmRjNqQw4ewZ643HNP24vqK7Gsm0Nl00vUW6zbDzqP9sflzkCqmqv54MclfHr8UxrN1mUyQ72HkhCZwCi/UefMqO3OfJx8eKL/E8yInMHmvM18nPkxh8sOsyx9GZ8c+4SHwx/m4fCH8Zo5iOqUbGr3F1NlfhKTIgzXL2ahePr7NgcCuD/8ENrPP8ehroQ6vCnKruPOx5/lf3/5E/vXrabPyNG497h6Xd7CzWH58uXtrv0cPnx4bXs/v5lERUUZQkNDL3iaTVcTCfU6ZjGb2fjuYmTZwpC7J+MTEsa+z60tKN/ifXi/+ed2JyI1ffEPyitepFn2R7JT4v5gb+QwB5YeeZcVR1ZQ22T9fzs2YCy/jfwt/Tz7XdPn6kwqhYoJQROYEDSBQ6WHWJK2hN2Fu1matpSVmSv5Xf/f8dDkh7ALcaXysxPUNd1OU0kAHt8koZp67qxfycYGnz+/hM9rn/Fzz4kc25bL+KcHE3HbHRz5YTNbPniH++bNb3/bQ0G4DGq12pyQkFDZ1XF0V7t37z4J1r2GuzqW9lwfzQ2hXYe+S6E0JwuNpxfRDzxEbXUj+dm1SJZmQod44jhoUJtrGjZ/R2n67TTL/thoVXg9O5AfHPYzce1E3jn0DrVNtYzsMZJP4z7lrTFvXdfJ9HwDvQby7rh3WTFhBcN9hmNsMvLv1H8z5csp7HVLx/PpAShdJJrkUEr3DaFh1842dTiPHElwoPV19uFyGuubGT09AXu1hryMwxzd8f01firhZpOZmWkrSdKQzMxM29jY2GCNRjNQp9NFttelGh8fH6jT6SIlSRrSXhmNRjMwOTnZbd68ed46nS5So9EMjI2NDT6/nqSkJG1ERETflnqSk5Pdzi+TmJjo11JHdHR0WGZmZptJBS1lJEkacv59LieWli7f9n4v0dHRYRqNZmBLsm2pR5KkIREREX1bH5YeGxsbHBER0ff8OjQazcDExES/9uq/HCKhXqfqDNXsXvMJAGMeexJbeweOrD0ISGirjuI/e2aba2q2n0K/2Q4ZRxz8qjA87M6TB5/lj9v/SGl9KREeESwfv5yl45YS4dF20/gbxWDvwbw//n3eveNdQlxCOG08zXPfP8cz6b/HNMMPe20FFjTov26idn9em+t7znka16oTWFBybEMGjhoXbn1oBgDb/rucxrq6NtcIwuXS6/XK9r7OLzd+/PheAAsXLsyNiYkxzp8/3791woyNjQ1OSUlxe/TRR8uWL1+e3VLm/GSYlJTkk5qa6rR48eLcWbNmFW3YsMGtdVKZN2+e95w5cwJHjx5tWL58eXZkZGTdjBkzglvXEx8fH7h06VKfRx99tGzhwoW51dXVqoiIiH6t4160aJFvTk6O3eLFi3Ofeuqp4vPvczmxXEhsbGxwRkaG0969ezO1Wq05Pj4+cP78+f5xcXGVy5cvzw4ICGicOnVqr5ak+sADD1RmZmaec6D5unXr1EajUTl9+vRf3UMgunyvU7tX/5fGuloC+w8iZMgwLBYLx3YVgNKdsH5qbLx/+SAqW2Sq1/9Mzc4iQImj+04+GmHPh9/9H2bZjJudG88PeZ4poVOumzHSzhDtF80a3zWsPr6adw69w96ivdxbej+JY59g6rdHqDPEUPlFLmYjqG/Xne3KtQsLIzhgMwcNkPndcQZMHUjErWM5vGUDRSeOsX/damLiH+vahxOuS0ajUenp6dnuGYHnH6UWGRlZ17LXbks38aJFi3xbDuiurq5WLly4MLflvYSEhModO3aos7Kyzmk5GgwGZUs9U6ZMMaampjqlpaWdnUCwaNEi36eeeqp4yZIlBS31REdHhyUlJfkkJCRUZmZm2q5atUq7fPny7JZ7DR8+vDYiIqLfhx9+6DZ79mw9gIuLS3Pr++Tk5Ni1vs/lxNKexMREvw0bNrjt2LHjaHh4uEmv1ytXrVqlXbBgQW7LvRMSEiojIiL6zp0713/KlClHJ06caABoHd/q1avd/f39G1v/jq+USKjXodKcbA5v/g5JoWDMo48jSRK5n23GqHTHprmOiJm/7IgkN1uo+PQY9RnlQDO1Tu/zbEglWZmnkZB4sPeDzBw0Exc7l657oC5ko7Bhet/p3N3zbhYcWMBXWV/x1uHFbOgTyN/ST+BQn4BhUy7NlQ24TQ21HoYODHjuHg69+hMVSk+KN+3GZ1w0Yx55nE/mvUDqN+vof8cEXLzEMW9dod+Kfm2PSeoC6Y+mp17pNWq12vzRRx9ltfdenz59zpmM88ADD5zTknr66afLVq1apW05GLxlvBGs3cTr16/X5Ofn25WXl5/zdz8uLu6cetzc3MzV1dVKgJ07dzoajUblzJkzy1qXefzxx/UzZswIBti3b58T/JLUAcLDw01lZWWHWp9Mc7H7XEmZ1v74xz/67dmzRxMeHl7XkghbZv4+9thj59SVkJBQNmfOnED45Xi6L7744mxCTUlJcZs1a1bRhe51OW6e5sgNQpZlflixDFm2MHD83Xj4ByA3NZHxxUEAgnQytq7WZTIWkxn9iiPUZ5QjSbVsc/8nDwSkk1VzmiBNEB/FfsS8EfNu2mTamqu9K/NHzefdO97Fz9mPY8ZcHgg6yEGXfwEm6g6UUL7yGHKzBQBHP290WuvxdWkrtiGbzfiG9aZvzBjMzc1s/29yFz6NcD2bMmWKsb2v849NCwsLOyfBtiTcEydO2IE1sbSMe44fP77Xli1bNGq1us3Rax4eHs0XiuXkyZN2ABEREf1aL+FpSaZ6vV6ZlZVl216958d7sftcSZnW9uzZo5k7d25+ZmamY8vY6qlTp+wucH9zS8wA99xzT+WePXs08MsHh/j4+A5NCBMt1OvMqQN7OZ2Zjr1aQ/R91i3vyv+3hkKH3gD0n25dBmOpb0b/4RFMuQZkVR1J3gvYoilFQuKxiMd4ZuAz2KvEmZ7ni/aL5otJX7Dw4EI+OfYJc3tkM9XhDZ4om0NDZjn6FUfweDgcha2Sfr8ZQe6STPKVwVSt+wq3e6cSM+1RTu7bzYl9u8g/moF/38iufqSbzq9pGV6PTp48ade6e/LYsWN2AL169WrU6/XKmJiYvtOmTdOvWbMmKzw83ASg0+mu6B9kS9LesWPH0fbe12q1ZldXV7PRaGzTity5c6eju7t7c8u9r4aWbubU1FSn1157zX/27Nn6lmU1er1e2TqplpeXK1tiBmsLds6cOYHJyclu+/fvdwwPD6/raKyihXodsVjM7Pr0YwCi7p2GvbMz5poajn+8iSZbDRq1jHeIG+YaE2XvHcaUa8Bk38BM3T/YoinFz9Gbj2I/4oWhL4hkehGONo68NPwl3hn7Dh62Lqx1K2S2/xs02zXReLIK/fIMLA3N6Pp542hnocHBk5MfrMNiMqH20HLLpHsB2PbxB1zrnciEm8fq1avPmVz0zjvveIK1u7Wl23PevHlFrZNEdXX1FTWiWlq9Lcm75WvlypVu8+fP9wG46667DACtJyllZmbaxsTE9F2/fn07u8p0npaE/+abb+YbjUZlYmKiX8uHjA8//PCc309ycrJneHj42Q8gLd2+q1evdktJSXFLSEgoo4NEC/U6cnTHD5Tn56Hx9GbAuAkAlL//PoVO1hm54WNDsNQ2UfbeYZpL66lyrGFWj79TZlPJnZpevHr3h6ht1Re7hdBKjH8Mn0/5kldTHuYHTvOU33wWFb0EOQbKlqXj+dtIet8ayE8bT5OvCiX0009xf+QRbpl4D2mb1lOcdZJTB/YSdktUVz+KcJ0wGo3KCy0LCQ0NbZwyZcrZ45w2bNjgFhsbG/zAAw9Ubtq0SbNq1Srt3Llz8wFaksoLL7zg/+STT5ZVVlaqXn75ZT+j0ahMS0tzPL/1diFardb81FNPFc+YMSN4//79xcOGDatrudeCBQtywZrAJ0yYUDljxozgrKys/JCQEFNSUpKPWq02nz+OebWEh4ebnnrqqeKlS5f6zJw5s2zatGn6OXPmBGZlZdkNGzasbtmyZdrMzEzHtWvXnmh93T333FPZMq7aGbGKFup1ormpiV2r/wvAyAemo1TZ0FxWRsnHq9FrrWtFQyM9KFuWTnNpPUWOehL9/opBWc7LkhdJk9eIZPoreDh4sOier3jRrKbEtoTEHn+l3MFAU0ENZcsz6D3IOpu61GswJUvfx1xTi429PSPueRCAXZ9+jMVyyb9bgnDWnDlzAtv7euONN86Z5bZ27doT1dXVyueeey5wx44d6rlz5+a3zPDVarXm5cuXZ2dkZDhOnTq1V1JSks/ixYtzFyxYkJuRkeH01ltvtZu027NkyZKCuXPn5qekpLjNmDEjeMeOHerWM2gBvv322+ynnnqqeMWKFZ4zZswIdnFxaW5ZwtJ5v5lLx6lWq82/+93vAj/55JPc1jFXV1er1q5de6L1BxL4JYlGRUUZOiNWsTn+deLg+i/5fsUytLpAHn5jEQqFkuL5fyNjczbHe8cT2MuVW5TQVFRLgX0pL+iSUMuVvFXZQO8ndoHLr16rLADoT3Jw+Rhe8NCA7MGbeXPwMrlhG6hhe2UjJXk1RGQuJ+LBKDyfeYbmpiaSf/8khrJSYp99gfCYMV39BNclsTl+W5mZmbYRERH9zl9GI1wbYnP865ypvo69X/wPgFHTHkGhUNJUWEjVp59S7DMclQT9TM00FdWSb1vCHP9/0ddcyaeFJfS+Y75Ipp1BG8bgmJdYXVhEABXM0b1JqU0FplwDQ5QSSqDYezgVy5NprqxEZWND1H3xAOxesxJzc1PXxi8IwlUnEup14OC3X1NvNNCjV1+CBw8DQL9kCbUqV2pcQohWq1BWNVJoU8qfAt5ioq2SJYVFuITeCQPjuzj6G8jwp/D0G877+fnc6qjiTwEL0auqsKlsYLizkkr3vtQ3KSl/bxkA4aPH4O6no7qkmIzvN3Vx8IIgXG0ioXZzpvo6Ur9ZB8DIBx9CkiRMOTlUfbGWYp8R3OKkxE0hUWJTzryg//D7ntHMyU5DZe8KcW+B2Ki98yiUMPk/2Ng48n8nDzA9ZCh/DlhEhbIaT5WCwU42lHjdQuXKlTQVF6NQKBn54EMA7P38U5pNV231gCAI3YBIqN3coY3raagx0qN3OLqI/gCUvf0fZLMFr77j8bZRYFTUMT/4A14b/Xsm77Pu78tdC0Dj24WR36A8QuCOvyABjx74nBdue4bXe75PvdSIn60Cl36TkE0m9P95B4CwYdF4BvakprKCI9s2d23swg0hPDzcJMtyqhg/7X5EQu3GmhoaOJCyFoCoex5EkiQaTpzA8M03SEMfx9fWliZZJinsv/x9chIj9rwPJiP0iYN+93dx9DewW34HQTFQp2fsobW8MnU+/wr+BLMs42/nAP2mUfXFF5hOn0aSJIZPtc743f/lZ5ibr2gjGEEQriMioXZjh7dsoN5QjU9IGIEDBgOgX7wY25A7UfsPxSLLpKiPMf+BBfTO2gnZP4CDO8T9W3T1Xk0KBUz+D9g6Q+Y6IoqO8acHX2GD/SlkWUYdMgYb/yj0S5YCEDY8Cvce/hjKSjm684eujV0QhKvmkglVkqR/SpK0SZKkVEmS2pyTd6bME2fKZEmSNLjzw7z5NJka+fGrzwEYce9vkCSJ+vQM6jOrsIu8F1mWOVhnZtL0Sfg01sPG/7NeGPcvcPbqwshvEm6BcOdfra+/eYFAyZ5R94/hcL11r1+7gQ9Rs+ckprw8FAolw8+sS92/brVYlyoIN6iLJtQzyXGwLMvjgMeBd9spEww8eabMOGDZ1Qj0ZpOxdSO1VZV4BgWfndmbt+gD7AcnAHCkwUKdlwOBgV7w5bPQVAsRU61fwrUxJAGCx0B9JaT8nvDwQPQaG443mJEkBQ6Df0fuovcA6BM9GhdvHyqLCjm+p+3B5YIgXP8u1UK9A9gEIMvyQaC9Bdb3Af87UyYbGNuZAd6MzM3N/PjVFwBE3WNtnWZ+9xUOTncgKW04qagmq9FC3+E9YP97kLsTnDzhrje7OPKbjCTB5LfBTgPHv0FKX0PfET041mDhZ6kaycYBO9MQcg7sRqFUMmyydVx73xf/Q7ZYujh4QRA626USqgeQfRllbmnpFqadpHumS/iAJEkHyso6vP/wDe/Enh0Yy8tw7+FP6C0jOF50FOW3dSjsNVSYczlucAIJwkLrYfOr1ovi3gInjy6N+6bk4g8T/m59/e0cekdY/0sdq3XGYMpH4ailZuUp8ipyibj1dpw9tJTn55H9k9gtTBBuNJdKqOVA63FT1wsVPNPlOxZY085778myPFSW5aGenp6/KtCbhSzL/HhmZu+QuKlkV2WTv2QXTvZ+mGuKkEcPwtws49/LFefvZ0FzPfR/EPrGdXHkN7GB0yHsTmioxmX3bHxDXDCZLJiie2GpK8fVrieH/vM1BbWFDImdBMCBlC+6OGhBEDrbpRLqZqzjoi3jqe0tpNsEVHRyXDetvIw0ynKycXRxxXlgCFs//Jzept5YGo2oehSQfcJ6pnBvbQac3gvOPjDhH10c9U1OkmDiIrB3gZMb6dUjF4DcfAU4HkFuamBw7QDWrViB14iB2Do4kp+ZQfGpE5eoWLhZrVu3Th0bGxus0+kiJUkaEhER0XfevHne1zqO5ORkt9bHskmSNORCp+EIl0ioZ8ZND0qStAn4J/AkWCciSZJUeabMZqDqTHdvKtbJS8KvdOBra8sl5PZbSf7sP8SWRiNbzDT89AGO9z9I4ckqVDYSwdlnZvVOWgSO7l0YsQBYN9GIXQBA6M8voVBK5B+vxPnhadT/9AGybGFK4a28k7KQXrfdCnB2jbEgtBYfHx84derUXnl5eXZxcXGVc+fOzQ8ICGicP3++v06ni8zMzLS9VrGsXr3arfW5q1FRUYaWA7yFti55Hqosyy+287NswO1iZYQrV5b7MzlpB1HZ2rKubjd/yLFuW9eY/iku44eSnWXduq6ncwa2chUMfAh6je/KkIXW+j8AmV9if/wberqcJKsilJx8JT7RodQf/hK78Kk8dvIulvReTZBSyYm9u6guLcHF65o3PIRuKjk52a3lXNOWo9ha7Ny50/Guu+7qdf/994ccOXLkaFfEt3v37pNdcd/rhdjYoRtp2bNXH2RHws+TsJdtacrdRXPBHtwSZnB8bzEAveS1oPGDCX/rynCF80kSTHwLHNzpbVkNwPF9xXg8+SSm7M00FR3C2eLIfdnjMATaI8sWDq7/souDFrqT5557LjAqKspwfjIF66HhCxcuzM3MzHRs6YaVJGlI6y5ZgNjY2ODY2Nhz9gyIj48PbOk+1ul0ked3H2s0moHJycluiYmJfhqNZqBer1dGRET03bBhg9uGDRvcJEka0nK/87t8ExMT/XQ6XaRGoxkYHR0ddi1b0N2NSKjdhLFCz9Gd20CSGK4cg2+TFou5nIa0lbg+8ADVJkcqi+twUFSjszsEkxZbx+yE7sXZC+5OIsDuJ+wVRioKazHIrrhMnkhD6nIsspGejX6E20cDkL51Iw01NV0ctNAdZGZm2hqNRuXjjz9+wfNdExISKtVqtXn//v2Ol1tvbGxscEpKitujjz5atnz58uyYmBjj/Pnz/c9PxMuWLdOmpKS4TZ8+vUyr1Zq3bdt2IioqyhAVFWU4cuRIent1x8fHBy5dutTn0UcfLVu4cGFudXW1KiIiop9er1de/pPfOERC7SZ+2pCCxdyMzm8EAxv6ItvJ1G1ZgKSU8Pjdbzm+txCAUPsdKIc+DKFiuW+3FXkvyog4wuy3A3B8bxHaxx8Hi4n67f9GVkFU/WB8fAfS1NhA2uZvuzhgoTs4ceKEHcDw4cNrL1ZOp9M1pqWlXXZCra6uVi5cuDD39ddfL0lISKj85JNPcv39/RuzsrL+n70zj4uy3B74950ZhnVmGHaVTVBUUDE1FQKtzFyy0rot2q2rtIjtt6zuTbLVuiX9Pc86TgAAIABJREFUKrulZoHVTSsrzUwt01wQ3BXBUUQQFxBhWIaBAYaZeX9/DCA4aJuIwvv9fObzkfc575nnGWHOe85znnNaeZLZ2dnu+/btO7RgwYJCAB8fH6tGo7FqNBprZGSkQ6sknU6nXLZsmU9KSkp+k+7ly5fnASxZskR7rnxX4Df3UCXanwZzPbt++p5uruHEOo1EFEAwpSOayvGccjdyXz9y0/cDSvr45MCNX3b0lCV+i5veps/hu8gy3cSR9AJib78B9bhxVK1Zg1xxEJsliniXMWxyKSN9zXKuvvk2ZPIu+VB/0TnUt9+Qjp4DQL/Dh/Z09Byg9b6nTqdTrlmzRn3q1CnnsrKyVt//EydOrPDx8fnddTF37NjhDnavuelaZGSkubS0dP8f0dOZkDzUy4Cl372Da4MLw/wmAOBxtQbjj5+DkxM+Dz7IqYy9mOqUaORF+N39LDirOnjGEr+Juw9+f3sST/kpausUnNihw3vGDACqVizEfZg3MmQM95uIk1FkxbqPO3jCEh1NREREPZw1VOfj5MmTztHR0b+7dVtaWppbVFRUP0EQhowdOzZiw4YNapVK5WDwwsLC6v7IfPPy8pRt6emqxhQkD7XD2Xl6J0c3bOJ2v3/gInPDpY8W047/gc2G59/+hpOfD0fe/xIYRJ9eNQhhozp6yhK/EyHyZvqEvsOOvECOrN5E6Gsz8Rg9muoNGzDnrUEZOgYKYLjvRJb/+AURw2IZ4Dugo6d9xXO5eIZ/lMjISLNKpbIuXrzYp6XXB5CcnOwzbdq0ih9++EFtNBrlY8eOrTqfHoPBINdoNFYAvV4vj4+P7zdlyhT98uXL85pCt0FBQf3/6nw9PT2tRqPRIaySlpbm5uXlZWkrTNzZkTzUDuRE1QnmfvsMI+Wj8XEJRKZ2QhWvpurHH0Eux/vBB2jYmExeZR8AIu78WwfPWOKPEjFlCgD55eHUb/0In0S7l1q5bBmeE7ojuMrp5hbGKEs8/17xOEXVRR05XYkO5r333juekZGhPjcL97vvvtOGhYUNeOKJJ0LGjRtXMWnSJGPTWMu9UL1eL8/IyFA3/ZyWluYGkJSUdLqlgTMYDH/ZmZowYUIV2I/6NF3T6XTK+Pj4fmvWrFGf/87Oi2RQO4gqcxWPbnyUUfl96Oc5AhER76n9qFj2GVgsqCdMQCkrJX/jDiyiKwE9BDQ9pFq9VxrqwAC6B9qw4kzeuk24dnPBPTYWm8mEYdXXeN3dF4AB2pEMPxLEE78+Qa2ltoNnLdFRTJ8+vWLKlCn6uXPnBkZFRfWbOXNmj6SkJH+NRmM1Go1yo9EoHzJkSHPSUmRkpGn+/PndmioajRo1KiIwMLC58EJcXJwJ4Omnnw5cuXKlKjU1VRsUFNTfaDTKMzMz3X5PNm52drbbypUrHfaZIiMjzePGjatISEgIS0pK8k9NTdXecccd4SqVyjpt2rSKtnR1diSD2gFYbBZmbZpFQ2EVt8pvB8BlpB9ylYXKb74BwHvavbByJkdM8QD0Gdm7w+Yr8dfoc10kADnV18CKRLxn2IuJlX/+OcoezigGq5EJMu7iLkqLCpmzbQ6iKHbklCU6kKVLlx5fsWLFkeDg4PrVq1dr586dG5idne02e/bsU/PmzTve8sjL8uXL84KCguoTEhLC5syZ02PWrFnFEydObDZmPj4+1pSUlPzs7Gy3yZMnRyQnJwe8//77x+fNm3c8Ozvb/d13371gGcEZM2aUAkyePDmirfG1a9fmJyYmFn/66ae+CQkJYRqNxrJ9+3ZdV91HFS71H+7QoUPF3bu7dqeNt3a9xRcHv+DDnGcIEUOoUlbQ76WbKX3vPcoWLcLj2msJut0P05ZUlpSmIMjkTH8rDhcPp46eusSfwFxrIeXZrVgbRO71fQjVjQ9zfMEeavfuxW/W03hNT+DQC6tR27QcluXwz4j3eHLIk9w/4P6OnnqHIwjCHlEU22obCUBmZmZBdHT0ec9tdkZWrlypiouLM3VVo9XRZGZm+kRHR4e2NSZ5qJeYNflr+Fz3OVP04wgRQ6i1GFFPDsVmqqFi6VIAvCfFQ/p8cutHIiIjuL+3ZEyvYJSuCsKi7V2WjtSOQtj0Bj5T7BndZUs+RWwwoxznR721lr62PkysGMl7e99j66mtHTlticuUSZMmGSVjenkiGdRLyJGKI7yU8RIRtSFM1d8EwGHZXnoMGkDll19iMxpxGzIYt9y3QbSRI7M3pO4zPKAjpy1xEYho/D/MEW9BtJhxL1yES1QkVr2eym++JSzuanTm7QAkltxBYL0/z215jgJDQQfOWkJC4o9wWR+bCf3Xjx09hYuHrBb3nv/FTWHjmZOJyBDIMexigcyZp579niU/f4QXsMu1gpCyo+w1D6a0UkU9ImO/3oXVocusxJWEIMJMwQVqVGQ7D2VA8W62qa5nCJD19n+5/4AHA+plPGXJoqdqAM+cTOSp8FeZ8NX9mI49CmLnKY9a8J+bOnoKEhLtguShXhJsuHb/GpmyjAcL7yXQqqHSXMKuygyOuPdmzIldeNUbKdJ4M7l7OhZRxv9MiQDkKK1YhQ6evsRfRhTgkNIepfuy5gEA7gjeTLHKC7/aSq47uZdDqr7sKttIdUMlvRv8+Pvpu5A7l+ASsAKQkpQkJC53LmsPtbM8yS7KXMR/9x9idG0ME6oHYxNsbC/5gaGjr+W5aRPJG/cBDcDVwwzIZSJi/CwGbQzCWFfHnEeuZlGfLlkWs9NResLI16/vIkwRiHXIgzjvWcxVMWZO/wz/qtzFR1+8yJr/HmP77tWM7n4PdxriyNJmssdzH69PmMRtvW/r6CVISEhcAMlDbWd2Fe/iw8wP8W7w5J/F9wJw0JiOoUHPgOvHUrV2HQ2nTqH0dUelPQH+/TkdOANjWR0eWme69/bs4BVIXCx8gjzw6u5OXU0DJ7o9AdpQNJ5HUGjdMB87RvXGjUTfMJ6y+kJya/ciAHNKZ+JhdeX1Ha+TU57T0UuQkJC4AJJBbUcq6ir415Z/YbPZeLv6X8jrwOIjoivdhl/PcPx6hlO2eDEAXqGFCAoFTFpAzu4yACKG+SPIpHhvZ0EQhOYEs5y9lXDrhwgy8O5p73Or/2gx3ftG4tUjiP3FG7FpQVkj4z/Vz1JvreepTU9RbZZavUlIXK5IBrWdEEWRpG1JlNSWMMM6Bf9iNTI3BfuqNwIwcPRYqjdtov7IERRuoAk1wchnsfr0J29PCXA2M1Si8xAxzB8EOHZAT53vMBg+E8+wGuQuAnUHDlC7azfRN4xDxEameSuCUkZ4kT93Wm/ihPEEL6a/KBV9kJC4TJEMajvxme4ztpzaQjghTDo+EgCnUV7kH9qNwtmZPrGjKPuo0TuNMCALjIb4pzieXUa9yYJPkAfe3T06cgkS7YCH1oXAPlpsFpG8vSUweg4yvzC0vQwAlC1eTOTI0SiclBzJTkcZZy83Oe3kzXTHn5+P/8w3ud905BIkJCTOg2RQ24FsfTbv7n0XQRR4s+oZMIu4DvDh0Kl0APrExGPT6ajdtw+Z0oZn7waYtBDkThzefhqAiKsl77Sz0mdEY9h3ezEo3WDSArQRtQgKGzVpaXDiBH1i7SUnD5Vk4NzLE6HWxv/VPg8ivLXzLfIN+R25BAkJiTaQDOpFxmg28szmZ7DYLLzo/E/cC2XI3J1QTQzh4OYNgD3cq1/4IQBevWuQ3/gv8I+kttrM8ewyBAEihvtf6G0krmDCBvmiUMo4nWfAUGqC4OEorn0Ebbi9xWXZR4sYeMN4ALI3rUd9a08EZzmaAif+6fIQddY6/rXlX5itXa47Vqdm/PjxYYIgDLnQKzk52aH27vmuXyz+jH61Wj3o3I4559JU0P+vze7yQjKoF5k3drzBqepTXOM2nBGH7PWkPSeFU3B4HyZDJd6BwWgbbNRs244gt6Ed1QtinwAgd9cZbFaRoEhv3DXOHbkMiXZE6aIg7Cp7KcKcHWfsF6+bjVeMP8hEqn76GW+lC77BodQaqzh2ZA+am3oCMDZnCJHOfThUfoj3973fUUuQaAdmz55dnJKSkt/0UqlU1sjISFPLa00t01oSExNT1atXr/q2dF4M2kv/119/rf366687lUG9rM+hXmn8VPATP+T/gKvMledLH0Q0m3Ed6IPbAF+yXrd/+Q0cPZayd14DQNu7HsWURSC3/zcczrBne/aNkcK9nZ0+wwM4suMMOTuKufqmUAQnF5zuXYRm8+0YjrlSPv9NBo67mQ2ffEjmL2vpM2cktVl66nMrecP4NLcpZ7Lk4BJiu8cS0z2mo5cjcRGIi4szNbVbA5gzZ06P4ODg+nObjZ9Lenp6bnvOq731dyYkD/UiUWIq4dXtrwIwz+MFZCfMyDyc8Ly1F4aSMxQc2IfcyYleAX4Yt+0DmYjXAzPA1948vKyomtITRpSuCnpGt1v0RuIyIbCvF24aJVWltRTnNzodPYbg/ffbARHDuk30Cg/DydmFU7psKk4Xob09AsFZjjLPwqua5wBISkuisq6y4xYicUlRq9WDUlNTtTNnzuyhVqsH6fV6ecuQbFRUVL/x48eHNcmvXLlSJQjCkKlTp4Y0XUtNTdUKgjCkqRdqcnKyT1RUVD9BEIYEBQX1PzcM21bId+rUqSFqtXpQUFBQ/6SkJP+kpCT/qKiofufOd+bMmT2CgoL6C4IwpOW8oqKi+q1bt067bt06rSAIQy7eJ9SxSAb1IiCKInO2zcFQb2Ci51h67fUCQDupF3J3J7I3rQdRpPewWGreeR5EAU2UB04Tnm3WkdPonfYa6ofC6Td7/kpc4chkAhHDGpOTdhQ3X3e+63VU4UpEG5jefqY5OSn7159ReDrjebP9O2nw/hBGaeIoqS3hpYyXpKM0XYjFixf7rF69WnvPPfeUntt1ZuTIkVXbtm1TN/38008/qQG2bt3a3CB8/fr16sDAwHofHx9rUlKS/zPPPBMycuTIqpSUlPz+/fubEhISwi60tzl+/PiwZcuW+Tz++OOnX3nllcIVK1Z4zZ8/v9u5cvPnz+9WUFDg/P777x9PTEwsXrdunXbmzJk9ADZv3nwkJiamKiYmpurgwYNZF+NzuRyQDOpF4Oucr9lWtA1PpSePFt2N2GDDdZAvrv19sFmtZP+6HoBIfzmGPacBEe9/vw0yu+G0WW3k7GwM945w+L2U6KT0bcz2Pbr7DNYGm/2iwhnvp+YAULH1CJGhGgAObt6A1WLBbYg/Ln20iHVW/lXxAB5OHmw4sYEfj3WiRhISFyQ7O9t93759hxYsWFB47tg999xTYTQa5TqdTgmQmZnpFhMTU3Xq1CnnJo9069atqvj4eCPYjV5iYmLxggULCqdPn16xdu3a/JiYmKrk5OQ29510Op1y3bp12pSUlPzXXnvtzPTp0ys2b958xGg0OngBGo3Gsnbt2vxJkyYZFyxYUDhu3LiKzMxMN7A3PtdoNFaNRmONjIzsNNl10h7qX6TAUEDy7mQA3vZ8CTGzFpm7E543hwNwbP8eqsvL0Pr747p8IXU2OaqhvXAePKpZx8nDFZgMZjR+rgSEqdt8H4nOh3cPD7wDPSg7VU1Btp7wq/wAcB39N9wiPsB0pBiXpfPw7hFHWWEh+Xt30ntYLJ6Te3Pm//Yg5ph4Y3QSjxX9izd2vMGwgGH4ufl18Ko6ng8SN14WIcRHFl6/pz30Tpw4seJ8/VCb9mDXrFmjjoyM1GdkZKhXrFhxZPLkyeq0tDS3uLg406lTp5zvvPPO42lpaW5Go1H+2GOPlbbU8eCDD+oTEhLC2tK/Zs0aNUDLfV0fHx9rTExMlcFgaGVPJk6c2GrvV6vVWg0GQ6cOv0ke6l/AYrPwfNrz1FnruLP77fTY4QaA5y1hyN3tDcGzNv4EQJRnFYbD9jKC3s+93kpPTob97GnfEQEIglRqsCvRt+WZ1BZ4P/0iABUHGujvVwvQHOlQeDqjGR8KQMROH27wu54qcxUvZ7wshX67AGFhYXUXGo+JianasGGDuslLnTRpkjEyMtL0008/qdPS0tyaruXm5joDREVFDWh5NKfJmDZ5tC3Jy8tzVqlUDsY8NDTUwcv09va2/LkVXrn8pocqCMKbwGDAC7hDFMXznigXBCEPGHMhmc7EZ7rPyNJnEeAewEPFt9NQV4lLHy2uA+1HIqrLy8jfuwuZTKD7oZ1UW9xwv/oqXAcMaNZRX2shP1MPSKUGuyK9r/Yn/dujHM8uo7bajKuHve+p+8hROPcKpf5oAT2yNiKTD+bYvj0Yy/SovH1wH94N074SzCeMPGO8nx3KXWw5tYXv875nUq9JHbyqjqW9PMMrhdtuu63ilVdeCWz0Uk1g31vdsmWLGqDpWu/evesBtm7deqgtPW15weHh4fVthXcLCgo6T8Pev8AFPVRBEAYDg0VRHAM8CCy6gOyzQJthgs7IMcMxPtj3AQBv+r9Iw8FKBKUMz8m9mr3M7E2/INpshKkqqT1iP1fq/cjjrfQ07Z/16OOJ2tv10i5CosNx1zgTFOmFzSpydHdJ83VBEPB5+DEAag670ktdhSjaOLjpF/u4TEB7e2+QC9j2GpgblATAmzvfpLim2PGNJLoMEyZMqDIajfLU1FTf6OhoE8DYsWOrdDqd25YtW9STJ08uB+jbt289QG5urnPTkZ24uDjTF198oZ07d26bT/cjRowwgT17uOmaXq+XZ2RkSHtV/HbI9wZgPYAoinuBoW0JCYIQBowB9l7U2V2mWG1W5mybg9lm5o6Q2+m21R7eVd8YisLTBQDRZiP7158BCDOWYK2X4zJwAG7Dh7fS1RTqk5KRui7NHWh2tDaEqhtvxCkoiIYaBeEme/5J1q/rEW32BCYnf3fU1wUB0DfDl9Hdr6e6oVoqoN/FiYyMNKtUKqtOp3MbM2ZMFZzdW9XpdG7jxo0zgt0DTUxMLE5ISAibOXNmj9TUVO3UqVNDFi5cGDB69GiHAhJNemJiYqomT54ckZSU5J+cnOwzatSoiMDAwD9V+CE7O9utpXG+0vktg+oN/J7w7SJgxvkGBUF4SBCE3YIg7C4tLT2f2BXDlzlfsr90P76uvswovxNrlRmnIBUesd2bZY5nZ2IoOYNaUYfzEfs17wcfbLVHWlli4nSeAYWzvLlyjkTXo+cgX5yc5Zw5VkVFcU3zdUGhwPv++wFwzgW1oo6q0jOcyD7QLKO6NgiFnyvWsjqeq5+BxllDelE63+V+d8nXIXH50JQQdPPNN1eB3XgGBgbWq1Qqa8viEQsWLCicPXv2qdWrV2sTEhLCtm7dqpo3b97xWbNm6c+nOz09PXfKlCn6+fPnd3vvvfcCZs2aVRwfH2/UaDR/aM90xowZpQCTJ0+O+HOrvPwQLvQk2xjGRRTFtxp/FkVRFM6ReQjwFEXxLUEQ9vAb+6xDhw4Vd+/efVEm3xGcMp7itlW3UWup5aO+8wlaqQBBwP/xq3AKcG+W++GtFzmyZw9DFIX476lD2bMnYT+uRpCdfYbZsSqf3WsK6DMigBumRXbEciQuEzZ8quNwRjFDJ4Qy/JazOye2+nqOjr4Bq15PyXBndtcF0mfYcCY+/UKzTP3xKkoXZoIgkDPZyJO6Z1EpVayatAof1yu7SIggCHtEUWwzMgaQmZlZEB0dfd4vf4mLj06nU5571CU2NrZ3aGioeenSpcc7al6XiszMTJ/o6OjQtsZ+y0P9BXsot2k/9Zc2ZIYAYwRBWI99D3V5Ywi40yGKIi+lv0StpZaJwTfRc5saRFCNCmxlTE2GSo7u3YOAiO8p+0fs/cADrYypaBNbhHulZKSuTsuwr2g7+5Arc3bG6x/3AeBXLAdEju7eQa3xbETOOUSN+4huYBPpvyOA+O7xGM1G3tjxxiVdg0TnR6/Xy6OiogY0ZQs3kZ2d7f5b2cddgQsa1MZ9072NxvJNGsO6giCECYJQ0SgzQxTFMY2JS/n8hod6JfNt7rfsKN6Bl4sXT5qnYykxofBxRX19cCu5g8uSsYkQ5FSN7IwJRUAAmpsntpI5lVOBsbwODy9nekR0qvrQEn+CHhFaPLTOGMvqOJ3XupSg9u67kXl4wHETwcoqrDY49M0HrWQ0Y0ORq5U0nKpmttNjuCpc+fn4z/x64tdLuQyJTk7TmdMpU6aErVy5UrVy5UpVbGxsb6PRKH/yySe7fKTgN8+hiqL4XJPBbDKUoijmi6LoYAVEURzSWY2pvlbP/+3+PwBe7PM8DVvte8Gek3shOLXwPCtPkZW+HYAe1Y2ZvdOnIShbZ5XrthUB0C+2O4JMOnva1RFaliI850yqXKVCO2UKAIEWezJl1uaNiDVlzTIyFwWaxmIibKpgVt8nAXhtx2tUm6vbe/oSXYhVq1bl9+/f33TfffeF33fffeEABw8ezDpfsYmuhFTY4Xfy1q63MDYYie8ez4C9PcAi4naVHy7hnmeFRJHCJY9SUe+CuxNoDp9ErtHg+be/tdJVV91A/v5SEKBfrJTdK2GnKex7dE8JFnPr7yav++5FUCpR78/HVSGir3Wm+H9PtpJx7e/dXJbw+tyrGOAzgBJTCe/tfe+SrUGi8+Pj42Ndu3ZtflVV1f6qqqr96enpuZ2pfOBfQTKov4P0wnTWHluLi9yF5z2foP5IBYKLHM2Enq0F937KgYP24w3BuCIDtH//OzJ391ZiOTuKsVlEgiO9UHm5XKJVSFzueHV3xzdYhbnOyrHM1tEzha8vmtsmIxMhxMX+EJe1VwcHVzbLCIKA5y3hoJBRt1/Py93/jUJQ8FXOV+wv2X9J1yIh0RWRDOpvUGep47Ud9v6lD0cmIv/Fvr+lGRuKXNUijFt5grof55BrtGdV+h84jODqivbv97TSJ4piq3CvhERLmiIWB9OKHMa8ExJAJsNvnw6Aw1W+mFc9DdVnj6IpvF1RX28/m6raWE9Cv+mIiLyc8TIN1oZLsAIJia6LZFB/g8VZizlpPEkvz17cenoUVoMZp0AP3Ie3CNXabPD9I+j07lhEOf7ObriZLWjvvAOFtvVWc0mBkfKiGlw8nKS+pxIORAzzR+EkozCnAkOpqdWYMjgY9fjxeJjq8HVT0WBTkHNGDj8+BS2Ov6lGBqLwdcVSWss9hokEq4I5WnmU/x3636VejoREl0IyqBcgvzKflOwUAF7uNRvTttMg2Puctkok2v0JYv4Wsqp6AND9yDFQKPCaNs1Bpy7d7nn0HRGAXCF9/BKtcXZzotcQe8cYXdpph3Hvhx4CoPtR+3G/LEN3OLQKsr9tlhEUMjxv7QWAadNp5vT9NwALMhdIZQklJNoR6Rv9PIiiyKvbX8Vis3B7r9sJ2OIENnAf0Q1lYItKWeX5sH4OxXUq9LUuOMsV+Buq0dx8M07dWiccNdRbyd11BoB+10jhXom2iYyz/24cyjiN1WprNebSJwKP66/Hv6wSJ7mc0yYP9PVu8OPTYDxrLF16eeI2yBcsNsJ3eDImeAy1llre2vXWJV2LhERXQjKo52FV3ip2n9mN1lnLI7L7MB+vQubhhObG0LNCNhusfAQaTBwQRgDQo6QcGQLeD9zvoPPonhIa6qwEhGnw6ubuMC4hARAQrkHbzZ3aKjMFBxyP9vnMeAiFTaR7uRGAbPFqqKuEH55sFfrV3BSG4CKnLqeCWaqZuCpcWX98PemF6ZdsLRISXQnJoLaB0Wzk//bYz5w+238WDevtXqXmpjBkri063u1YCCfSMbv4k1Nk/yILLK1EdcNonMPDHfQeakpGukY6KiNxfgRBIKrRS9VtdUxOco2Oxi1mBIFn7OdQD5a4YnHyhCNrIfPLZjm5SolmXKhd58/lPNwvEYDXd76O2SqdcpCQuNhIBrUNFmYupLyunEG+g4jN6YvNZME5TGMPoTWhPwobXgbgcOCDNNTX41VrxqO+Ae8HHnDQWVFcw+k8A07O8uY9MgmJ89FnuH2P/cShcqr0tQ7jPjNmoK41ozZbqKup4WjPxt4Ua58DQ2GznPuwbjgFemCtMnNL8Sh6anpyvOo4Sw4uuUQrkfi9jB8/Pqxlo++2XsnJyQ6ZjOe7/mfR6XRKQRCGNDUol/j9SAb1HPIr81l6aCkCArODZ2HadQZkAp6TzvY5xWaFlTPBUgfRUzhw0O5FBJVU4DZ8OK7R0Q56D26xy/S+2h+ly2/2dZfo4rh4ONk7EIlwKN0xOclt+HDcoqMJKqkAIOtoNUSMg3oD/PB4c+hXkDWeTQVM204zJ+J5ABYfWExhdaGDXomOY/bs2cUpKSn5TS+VSmWNjIw0tbw2YcIEh7ZqMTExVb169fpT7dMkLi6SQW2BKIr8Z+d/sIgWbu99O9rN9i8lj7geOPm1qAWd8V84tRNU3Sjpl8iZ/FycrDYCDDX4zHjIQW+D2crh7fYvxf4je1yStUhc+UTFNyYnbSvCdk5ykiAIeM+YQfeKauSiyImDB6gY9jy4eMLRX2DvZ82yzsFq3Ib4g1UkdIcH40PHU2et482db17S9UhcmLi4ONP06dMrml4ajcYSHBxc3/JaWxWJ0tPTcydNmmTsiDlLtEYyqC3YeHIjGaczUClVzBTupeGkEZlKiXp00FmhksOwca7937e8z4E0e4JH9/IqPAYOxC0mxkFv7q4z1Jss+PdU4xvcaXrpSrQz3Xt74unvRo3BzPGD5Q7jHteOwqNXL7pV2L9Ls3bugwnJ9sGfZkPliWZZzbhQBGd7gtI/1TNwU7jx68lfSS+SEpSuJNRq9aDU1FTtzJkze6jV6kF6vV7eVsh35syZPYKCgvqr1epBsbGxvVuGbwVBGJKamtrqgPz48ePDxo8f36pL2JEjR5zHjx8fplarBwUFBfVPSkryP3c+ycnJPlFRUf0EQRgSFBTU/1y9XQ3JoDZSZ6lj3q55ADwR+RiWDSUAaCb0RObcGKK1WuyhXms9XHUvDUHxHNpq7+YRVG7Ee2ZiqwbiTRzcYg+tSd4RchBRAAAgAElEQVSpxB9BEITmBLaDWx3Ds4JMhveMhwgqs0cBD/66Hmu/W6HvRDAb4ftH7Zno2BOU1GNC7Df+Us6MKHskZd6ueVhsf6gvtEQHs3jxYp/Vq1dr77nnntK2CtJPnTo1ZOHChQH/+Mc/St97773jBoNBERUVNUCv18v/yPs0Fb5/7733jsfHxxvnzp0bOHPmzOYvsaSkJP9nnnkmZOTIkVUpKSn5/fv3NyUkJIR1ZaMqbeY1suTgEgqrC+mt7c2Yk1djqj6NMljVOhFp27tQtBfUgTB2LocztmCuq8Wzpg7fnuF4jBrloPdMQRUlx404uyukZCSJP0y/mG7sWJXP8ewyDKW1aHxdW42rx43Dd/58PGrrqcZA3p6dREx8F05kwLHNsCcFrrYnyXnEdKNmVzGWMyYmlV3P1x7LOVp5lG+OfMPdfe/uiOW1C2/fNXFIR88B4OmvVu9pD73Z2dnu+fn5bXZ30el0ymXLlvmkpKTkT58+vQJg+PDhNVFRUQOWLFminTVr1u9usXbNNddUrV27Nh+gSdfChQsDFixYUAgwf/78bomJicVNP0+fPr0iNja2d3JyckCTfFdD8lCB09Wn+STrEwBe6PUcpvRiEMDzlvCzHmdxNmz6j/3ft/4XXDQcWL8WgOCyKnwS2/ZOsxu9034x3VAo/9ADooQEriolvYf6g3j2d6klglyOzwMPEFxu91IP/LIOPHzhprftAj/PgfJjjbKy5gSl2k1FPNf3aQA+2P8BhnrDJViNxMVg4sSJFedrlbZjxw53OGsAASIjI82lpaX7/4gxBZgxY0Zpy58ffvjhUoC0tDS3tLQ0N6PRKH/sscdayTz44IN6nU7Xqvl4V0LyUIF39r5DnbWOsSFjCcxwo95WifuwgLMVkawNsDIRbA0wNAHCr6OkIJ/ivFwUVivB3n6oxtzgoLeupqG5MlJUvBTulfhzDLg2kJztxRzaVsSwm3vidM6DmeettxL84Yccttk4nrUfQ0kxmqjJoFsFB7+zh37/8QPIZLiEe+I6wIfaLD0D9wcy1H8ou8/sZmHmQp4b9lwHrfDi0l6e4eVCWFhY3fnG8vLylCqVysHY/plepefe07dv33oAvV4vr6ioUABERUUNaOtevV4v74r9Ubu8h5pVmsXaY2tRypT8UzWD+txKBBcF6htDzgptSYbiLPAMhjGvAo2eANCjopqAGYkIMseP8nDGaawNNoIivfD077IPbRJ/Ef9QNX6haupNluYHtJYISiXdpk+nW2UNAAc2/GQfmJAM7r5wPA12ftQsr7mpJ4KTjNosPf/2fxIBgS8Pf8kxw7FLsh6J9sPT09NqNBodQmFpaWluFzpXajAYHO45d8/18OHDzgARERH1vXv3rgfYunXrobZeXdGYQhc3qKIokrzbnhX5jz73IdvQ2JptTDByj8bfvaL9sLUxc/LWD8HZg4a6OnSb1gMQpnRDPX6co26byMHGKjdSMpLEX2XgdYEAZG06hdiivGATnnfcQUiD/Xr2z2uwWizg7g03NzYX/+UlezESQOHpguo6e+a6epOF28NvxyJamv8WJK5cms6ptkwM0ul0yvj4+H5r1qxRN13Ly8trNq56vV6ekZGh5hwWLVrk2/LnDz/80LfxbKy5yVvNzc11jouLMzW9vvjiC+3cuXMD2mNtVwJd2qBuOLGBvSV78XLxYqrhJqzldSj83XAf0Vi43lJvz+q1WWB4IvSMB+Bw+hYaGhrwrKkj7IGHEBSOkfMTunIqz5jw0DoTOsD7Ui5LohPSa7Afrion9CerKc5z3O+UubrSa+rf8agzYzLVkLdnp32g700w8C6w1ML3D9uLkgCq+EDkXi5YzpiYYZmCu5M7W05tYVvhtku5LImLTGRkpHncuHEVCQkJYUlJSf6pqanaO+64I1ylUlmnTZtW0Shjmj9/frfU1FRtamqqdtSoURGBgYEOhSG2bdumHj9+fFhqaqp2/PjxYcuWLfOZM2fOKbCHgxMTE4sTEhLCZs6c2SM1NVXblF08evRoh+ITXYUua1AbrA28s+cdAJ7s9Sj1W+yhNM9bwhHkjclFm9+EEh14hcHoOc337v/uawBCrQKaW25pU3/mxpOAff9LJu+yH7PERULuJGvuQpO16VSbMt733EOwyd5EfP+3Z2v6Mv5N8AiAkzsg4wMABCcZnhPtxw6tm/Q8EmGv8ysdo7nyWbt2bX5iYmLxp59+6puQkBCm0Wgs27dv1zWFYZcvX54XFBRUn5CQEDZnzpwes2bNKp44caJDVu727dt1BoNB/sQTT4RkZ2e7zZs373jLxKYFCxYUzp49+9Tq1au1CQkJYVu3blWdK9PVENoKH7UnQ4cOFXfv3n1J37MtPtd9zlu73qKnpicpNf+hLqsM1wE+eN/Tzy5QuAc+vsFewi1hHQTbu8mUFOTz+XOPo7BamXLrVPzuvddBd1lRNV++shOFUsY/3rgGF3enS7k0iU6KsbyOz5MyEID73ojFXePsIHPqgw9YvmkNNpnAA+9/gsav8Sz+kZ9g6Z0gd4bEreDbB1EU0acepP5IBa7D/bjH9gSF1YW8HPsyt/W+7dIu7jwIgrBHFMWh5xvPzMwsiI6O7rJf4BKXnszMTJ/o6OjQtsa6pOtkqDewMHMhALO7PU1dVhmCkwzNhJ52gYY6WDETRBvEPNJsTAH2fPoxAIF1VnzuvLNN/Qc22j2IvjHdJGMqcdFQebkQFu2DzSae10vtPm0a3Wrt1en2fpZydiBiLAz6u70oyYpEsFoQBAHPm3qCDGp3lvBMyJMAfLDvA2otjgX5JSQkLkyXNKgfHfiIKnMVI/xHELrDfjRGNSoQhdbFLvDrXNDngE8EXJ/UfJ+5tpbcgwcAiJ5wCzJnRw+httpMzg57o+emRBIJiYtFdGMZzOwthTTUOyZSytzd6T/yegB0u9LtyUlNjHsd1D3sxUnS7clKTv7uuA/vBiIM2N+dSK9ISmpL+J/uf+2/GAmJTkaXM6gnq06y9LC9m8xzro/QUFSDXKPEY2Sj8TuxA9LfB0EGkxaA09nKNPs/+pAGAbQNVsIecCyCD/auMtYGGyH9vdEGSE3EJS4uAeEa/Huqqa+xcDjDsQsNQL+HH8O9wUodIrr/fXp2wEUDt7xv//evb8CZgwCobwhBcFFgPmrgeS+7l/pJ9ieU1znWD5aQkDg/Xc6gzt83H4vNwm0hk3FLs5+P1ozriUwpB7PJntWLCNc8AYFnt25sFgsHGuv29h8xEpnS8UiX1WIja7M9FBfdsqC+hMRFQhAEBt0QDMD+X05gsznmQMg9PIjqPwiAfWtXtT5m02s0DJluL1KyIhGsDcjdnVA36vTPcGJkt5HUNNTw0YGPHHRLSEicny5lUHVlOtYVrEMpU/Jg9Z3YjA04BalwjW48brXhFSjPA99+cO2/W9179LNUDHJQ2kSuevixNvUf3X0Gk8GMV3d3Avt22frQEu1M2FW+qH1cqNLXcSyztE2ZIY8/hdwmUoqVU6u+bz1446v2IiXFB2Dr/wH2Or8KX1cs+lqeEh9AQOCrnK84WXWyvZcjIdFp6FIGdf7e+QA8EDINcbu9iIPnxDAEmQAFabBjAQhymLwAFGf3R0WLhX0/rACgX2Q0Th4eDrpFm8jen+3tsqJHB7VZ11dC4mIgkwlEj270UtefaFPGzceXXsH2YzG7ly5p7aU6q+BW+/EZtrwFpzMR5DI0N9nlXTJquTPodiw2C/P3zW+/hUhIdDK6jEHdVbyLbUXb8HDyYPKJUWCx4TrQB+cQNdRXw8qH7YIjZ0H3q1rde/qrryhUCCCKXD2zbe+0IEtPeVEN7p7O9BneZQuFSFwi+sV2w9lNQXF+FafbKPQAMGzGIwAct5opX7++9WDPkTDsIXvRkhUzwWLGpY8W596eiHVWppdPxlnuzLqCdWSVZrX3ciQkOgW/aVAFQXhTEIT1giDsEQQh7Dwyixpl8gRB+NvFn+ZfQxRF3t37LgBP+s/Ekm0AhYBmfOMxmV9ehMrjEDAA4me1vrehgX1ffo4oEwgJCkUT0K1N/XvWHQdg0A1ByBVd5jlFooNwcpY3l7Tc9/PxNmX8IvoSoPXFKpexd+F/ERt7ozZzw0ug7QklB2Hzm/ZjNBPDQAbiHgMzu98P0Py3IyEhcWEu+M0vCMJgYLAoimOAB4FFbcjcANAoMwRY3A7z/Ev8evJXDpQewMvZi5GH+wOgims8JpO/CXZ9DDIne1avonWyUfk333Ks8Sjp1ffd36b+otxKzhyrwtld0VzNRkKivRlwXSByhYxjmXrKCqvblBl6zz8AyLPWYfhxTetBpTtM+hAQIO0dKNzT6hjNhLwRqJxU7CzeyfbT29t5NRISVz6/5UrdAKwHEEVxL9BWxZJ84M1GmUrgssq1t9qszXunSap/Yj1Zg8zDCdV1gVBXZW9tBTDqObuH2gKbyUR2ykfUOynw1HgSPPCqc9UDsLfROx14XRBKF6kjnsSlwV3j3PwAt3ttQZsyvWPjcXd1o8ZFie7D9xHN5tYCIbEw4mEQrfbQb0Nd8zEaa141z2rtfx/v732/zaL8EhISZ/ktg+qN3WCeF1EU80VRzBcEIUwQhD00GtfLhdX5q8kz5BHqFkLUAXu4VjM2FJmzAn5OAsNJ6DYI4v7pcG/5Z5+Tr7QnFw2+7a42E41KTxg5oStH4Sxn4LVSIQeJS8tVNwYjkwsc3VNCRXGNw7hMLmfQxMkA5IlmKr75xlHJ6BfAu5e9mMmm11sdoxmW3QtfpS8H9AfYcmpLu65FQuJK57cMahnQct/Usy0hQRCeBZYDD4qi6HB4TRCEhwRB2C0Iwu7S0rbT/NsDs9XMB/vt2YwvCI9jM5hx6uaO2xB/yP0F9n4KciVMXgjy1p6lpaKCvM+WUO7hipNSSeTI0W2+R5NnEBXXHRcPqcygxKVF5eVC31h7iHbP2rb3UgeOGY9MJqNE7cbxRYuwmUytBZxcYdJCezGT9Pfh5E48RnRD4eOKrayeF5yeAOD9fe9jE21tvIPExSY1NVUbGxvbW61WDxIEYUhUVFS/li3ZLiaCIAxJTk72aQ/dXY3fMqi/AGOgeT/1l3MFGvdQx4iiOKQxLOyAKIofiaI4VBTFob6+vm2JtAvLjyzndM1pBrtF0+OAvWqRZmIYQr0BVjVm6173PPj1c7i3bNFH5DX2RB144004uzk2CC89YSR/XylyJxlXjQluv4VISFyAIWNDkMkEjuwsprLE5DDupvGkX/x1IAjkKWyUf/a5o5KgqyH2cXv96hWJCLY6NDfZk/YiDvoSpgwhpyKHn4//3N7L6fJMnTo1JCEhIQzg8ccfPz179uxTGo3G0tQq7WK/X0xMTFWvXr0c2rdJ/HEuaFAbDeReQRDWYw/lzgBoDO82tfsZAwxtzPDNEwQhr11n/DuptdSy+IA9P+rZmgcRzTZcIr1xCfeEdf8GYxEENn6JnENDYSFFX31JsacHMpmMwePbbtG28wd7NLz/qB64ezrW9ZWQuBSofVyJGBGAKJ7dzz+XoTfbu8ec9FJxOuUTLBUO3brsxUx8+9qLm2x8FZe+XjiHaxBrrcy22PdSP9j3gdTerR1ZuXKlatmyZT7z5s07np6envvaa6+dee21186kp6fnJiYmFi9cuDBAr9fLL+Z7pqen506aNMl4MXV2VX7zfIcois+Jojim8ZXfeC1fFEVti3GtKIrhTa/2nvTv4eucrymrK2Os87VocmQgF+zdZHLWQuZSULjYs3pljr+bpfPf55inO6IgEBETj9rH0asuPmagIKsMhbOcwTeGXIolSUiclyHjQhAEOLy9mMozjl6qT1AIPQcNwSaTUeAip2yhQ8I+ODX+TQhy2L4A4Xi6vdiDAN0PezDYaQAFVQWszl99CVbUNZk9e3ZgZGSkqa2eoq+++mpxZGSkKS0trVW4LCkpyT8oKKh/U2h45cqVqpbjaWlpbrGxsb0FQRiiVqsHjR8/PqylUT435KtWqwelpqZqm/Q23XPufJKTk32ioqL6CYIwJCgoqH97haSvJDrlgUlTg4mU7BQQYUbJHSCCR0x3nNxq4Qf7fhCj54BPb4d76w4fRr/6B056qwEY2pjQcS47fzgGwMBrA3FTO9b1lZC4lHj6udE3phuiTWTHD23nETZ5qQU+GkqXfkH9sWOOQj0GQ/xTgAjfP4zSR8DtKj+wiswy2o+NLcxcSIO1ob2W0qXR6XRu06dPbzPRxMfHx3rw4MFDLb3JqVOnhsydOzdw4sSJFSkpKfnBwcH1kydPjmhpVCdMmBBhMBgU8+bNO/7444+f3rZtm/ree++9oBeQnJwcsGfPHvf333//+OOPP3563bp12pbh5qSkJP9nnnkmZOTIkVUpKSn5/fv3NyUkJIR1daPaKc94fJXzFeV15dwtuwXXIgGZmwL19UGwdiZUn4HgGBie6HCfKIqcef0NTnqpsMhlBEUNxD+sl4Nc0dFKTurKcXKRS3unEpcNV0/syZGdZzi6u4TBNxrxDW7lqBAUNRC/0HBKCvIoUrmimZdM0IcfOCoa+aw9knMmG9a/iHrsXEwH9GiPKbkhKo5fqtP4Nvdb7u579yVa2R/j1L+2DunoOQAE/id+zx+R1+l0SoDfu5+p1+vlTeHhJo92+vTpFVFRUf1mz54dOGnSpENpaWluRqNRvmjRouNxcXEmgPDwcPPOnTsdk0JaUFVVJV+7dm0+wKRJk4x79uxxz8zMbL5n/vz53RITE4sXLFhQ2PS+sbGxvZOTkwOmT5/exn5C16DTeaimBhOp2anIRRlTi8YBoB4djKxgLWQtByc3ex3TNkK9xvXrqd61kwI/+0PW0JsdvVNRFMn47igA0dcHSZm9EpcNKi8XBlxrdyK2r3RMZRAEofl3+pi/F8aNG6nJyHBUpFA2bocoYNdiFGXbUcXb9T6svxtEWHxgMfVWKY+lI2kK/U6bNq2VAZs+fXqpTqdzA+jbt289wIwZM0JSU1O1er1ePn369IomQ3g+Jk6c2EqnVqttbr7bZKQfe+yxVp70gw8+qG96365Kp/NQlx5eSkV9BQ9ZpuBUCQpvF9wHKGFh4znTG14Gb8dtXlt9PSVvvkWhVkWdQo53YDA9ox0fdPP2llKcX4WryomrbpS8U4nLiyHjQtGlFXFCV86pnAoC+7SOwEWMiGPr0k8xlpVyRu2Oyxv/oeeK7xDk5zxgdhto91Q3vQ7fP4oqYQs1u4pxPdPAnZqb+Lr2R77L/Y4pfadcwtX9Pv6oZ3i5EBkZaQbYvXu32/mShJKSkvw9PT2ts2bN0h89etQZ7KHgljLe3t5WsHuwjWHirKeffjqwKXM4MjLSNHfu3FMXSkTy9vY+b+ZZbm6uM0BUVNSAtsab3vfCq+2cdCoPtaahhiUHl+BqdeaWwngA1ONCEdbNApMeQuPh6gfavLd8yafUFxaS38MPgOG33YUga/3xWC02MlbYvdNhN4dJVZEkLjtcPJy4qjFJLuO7o4jn9EuVKxTNe6l5gX7UHTlC5Tfftq0s/inoFg2GE8i2vIR6jF3vPcUTcLIp+CTrE8xWc9v3SvwpAgMD61esWOHV1pher5fPnTs3sLKyUg5nQ8PnZv2WlZXJ4ayhjYyMNK9duzZfFMU9W7duPaTRaCyTJ0+O+LPZwr17964H2Lp166G2Xl3VmEInM6hLDy3FUG/g0bp7kdeCMliFK1tAtxKUHo2hXsclN5wpQb9oEUVaD2pkoO3Wgz4xcQ5yWZtOUaWvQxvgRuQ1jkXyJSQuB6JHB+GmUVJy3EjOjmKH8YGjx+Ku9cLgZC/2UPree1irqhwVyZ3sBR/kStizBHftQRR+riirBKbV38YZ0xlWHl15CVbUdXjiiSeKdTqdW1JSkv+5Yy+88EIAwLhx44wATXuiS5YsaRWGSE1N9Y2MjDQ1/lurVqsHNRnPuLg408cff3wc4PDhw3/qrF9TGDk3N9c5Li7O1PT64osvtHPnzu3SrbY6jUGtNlez5OASvBs0XFs0GADNdZ4Ia562C9z4KmjbTmwr/b+3sZlMHOtp3ycaPvlOZOfssdZVN7B7TQEAsbf3QibvNB+dRCfDyVlO7GT7tkb6ijzMta2jdwqlkmG33A5AXs9ALOXllL77XtvK/CPt51MB4YfH0Iy2R3BuPT0KD6sbH2d9LGX8XkRmzZqlHzduXMXcuXMDY2NjeyclJfknJSX5R0VF9Vu4cGFAYmJicZMh9fHxsU6ZMkX/zDPPhMycObNHU3UlnU7nNnfu3FMAw4cPrzEajfJRo0ZFpKamapOTk33uuOOOcJVKZW3S80fx8fGxJiYmFjcVmkhNTdVOnTo1ZOHChQGjR49u48ms69BprMIXh76gylzFk9XTkFnANcob5/3PQ20FhF0HQ6a3eV/N9h0Yvl9FsY8Go9WCxs+fvteMcpDLWJlHvclCYF8tIf2923s5EhJ/iYhhAQSEqamtMrOr8UGwJQNuGIebxpNKrJRqPKhYtozarPP0PY19HHoMAWMRLvn/wTlcg7xeYKZxCqdrTrMyT/JSLyZr167NT0lJyTcYDIr58+d3mz9/fjeAlJSU/HOTiZYuXXp89uzZp1avXq1NSEgIMxgMihUrVhxp2h+NjIw0r1ix4ghAQkJC2CuvvBKo0Wgs27dv1/2VOS5YsKCw5ftu3bpV1TLbuKsiXOoOEkOHDhV37959UXVWm6u58dsb8aryYEHBCwiCgP+EYpx+uR+c1fBwBmgcC9fbzGaO3XIrdQUFZMQMxGCqYcxDjzJw9LhWcsX5Br59aw8yucDdLwxDG+B+UecvIdEelByvYvl/diOTtf17u3v1CjZ//gnebiqGZezHNTKS0OVfOyYoAZQegYVxYK3HfONySn5wxSaIPNDzRQStktWTV+Mkv/gZ74Ig7BFFsa0uVwBkZmYWREdHd+kvcYlLS2Zmpk90dHRoW2OdwkP9MudLjGYj/zRMQxDB/SoVTmlP2QfHvdGmMQUo+2gx5oICivuEYTDVoPEPIGpU6yL4NquNTUtzALhqTLBkTCWuGPxC1ETGdsNmFdm87IhD+7XoG8bjpvGkzGREHxJInU5HxdJlbSvzjbB3pQGU6Y/iNtATmU3g0Yp7KKopYlXeqvZejoTEZc8Vb1BrLbV8rvucQTV96FMehOAsR216G+oM0PtGGHRPm/fVHztG2aJFWAWBXB97VaRr7roXuaL1U3bWpkLKTlWj8nZhyITQ9l6OhMRFZcTkcFw8nCjMqeBQ+ulWY04uLgyffBcAuaHdsAGl775Lw5mS8yh7GIJGQPUZ1HwMChmDy/vQ19STxVmLabBJe6kSXZsr3qB+l/sdFbUVPFI2FQBVbz3yY9+DiwZung9t9DAVbTaKX3oZsaGBM9fGUlNtxC80nL4x8a3kDKW1bF9lL+M28q4InJQXtSa1hES74+qhJP4ue4nNbd8cpaaydTGG6DHj8PTvRmVlOaWxV2OrqaH41VfabiYuk8OkD0HhiiJnCap+9pyWR8umUmgsZHWeVONXomtzRRtUs9VMSnYK11VdTWCNL3KVHI8TjQUcxs8DddtHWyqWLcO0YwdWLy8O1dnPNsdPua/VuVPRJrLxs0NY6q30GupH6ECpXaDElUnvof6EDPDGXGth87KcVsZSrnAibsp9ABySW7GpPKj+ZQNVq89jHL3D4YaXAFAV/hOZu5zw6h7EGa9icdZiqRONRJfmijaoq/JWUVFdzv16+0F1tepHZGY99J0IA+9s8x5zQQElyW8DcPLGUdSbagjuP5CQ6MGt5A78eoqi3Epc1UpG3d2nfRciIdGOCILAqCl9cHKRcyxTz5GdZ1qNR4yIIyC8N6YqA2cmjgWg+LW55w/9DnsIQuKQmU6i9t4CwEP6v1FsOM3PBVK/VImuyxVrUC02C59kfcKtFdfhZVbjpKnHrexDcPWCie+0Heq1Win69/OItbVYx47h0OFsBJmM6/7xEEIL+fLTNWQ01kK97p4+Ur1eiSselZcLcXfYQ7+bl+VgKK1tHhMEgVH32jvJHMg9iHhNDDaDgeI5c84T+pXBrf8FJ3fcz7yJQmPBt17LTRUj+ST7k7bvaT9sNpvN8Y9dQqIdaPxds51v/Io1qOsK1mGorGBK2XgANOZkBMEGN70NHn5t3lO+ZAm1+/Yh9/UlW+WEKNoYNPYmfIJDm2Ua6q38tDgba4ONviMC6Bnt2AtVQuJKpF9sN8IH+9JQZ+XnTw5itZ79Xgjs15/I+OuwNjRwKNgfQa2mevNmKr/5pm1lXj3hxlcRBBsay3wA7im7iSL9SbYWbr0UywFAEITi2tpal0v2hhJdmtraWhdBEBzLjzVyRRpUm2jj4wMfM1U/HjerC85ux3ARMyByEvS/rc17ag8coKSxGozx73dRmJuDq1pD7B2ts4C3LMuhvKgGbYAb8XdHtPtaJCQuFYIgcO09ffHQOlNSUMWO71v3TR359wSc3dwp0GVRP83+d3Fm7uvU5+a2rXBoAoRdi4tlI84ehXhYXbmzbCwfZ33czis5i8ViebmgoEBZU1PjKnmqEu2FzWYTampqXAsKCpQWi+Xl88ldkYUdNhzfwJs/v8ZH+S+iEGX4KR9H6WGER3aAu2PykLWykmO33U5DUREuU+7ix+OHqK+pYdzD/2x17lS3rYhfPz+MwknG3/49FO/uHn9pnhISlyNFuZWsfGcfok3kxgei6D30bNnYfT+tZmPKQlTevtyo8qP2+1Uoe4XTc/lyZK6ujsoqT8KHMZjrAigxv4NZaOCB8JeYd/M7DPH/621Jf6uwA8DevXvHKhSKF0VRDOAKdRIkLntsgiAUWyyWlwcPHvzT+YSuuHYpoijyUdZHTCu5FYUox02xCaXsGNz8vzaNqSiKFD0/m4aiIpwH9CfTRUZ9TQ09Bw0hcuT1zXJFuRVsbizgMGpqH8mYSnRauvf2JO6OXsx5WssAAAvHSURBVGz9KpeNnx7C0+//27vz6KiqO4Dj3ztLZjKZyQoSEIIGJLIESEBbsWjaCqIQQFtaBauSKLZ1xYXa/tGetlgPnLbY5ZzW1q21xQWluHVRiqItRYVAjCZYTQhJIJmE7JktM5PbPzJRKIEkOMxkJr/POTknM/nlzf2dN+/93nv3vXttn0xGPmvBFZTv/CcNlR9RMXU6U7Kz6f64kob16xn3wAMnLix1Aiz6CQkv3k6ieRf453F9UyGPlD0SloI6GKEd3El3ckJESswdze06soueOjeXdM4B5SfZ+DjkroCphf3GN//+Ebp27MDgcNB13TVUlrxDQqKNBWtu/+RGpLZGN3/9bRk9Qc3ML43n/ItkJhkR33ILxjN13lgC/h7++pv36GrtfT7VYDCy6Na7MZkTKP/XG/jWFKEsFtqf30rr08/0v7C8b8DkBaSoR0EF+VL7hRyprOZAy4EIZiRE9MVcQX287HGKnb39pA7DXzA5THDFxn5jO7dvp2nTJgBs99/HG1ufBqDg+ptwZPSezbrafbz8q1J8rgATczO4+KvnRSALIaKr71GazOwUulp9vPSr/XhdvSMdZZw9gfmrbgTgjZefJ/n+dQA0rF+Pa/fb/S0Mlv4SU6IXu+ElDBgoalzOo2WPRiodIYaFmCqoHzR/gOFjLzM8k1GqA4fpud7RkGwnzsfrLS/n8H3rQGvS7riD13fvxO/1kDPvEmZ8cQEAns5uXnhoP+1NHkZNsLOweDoGg9zXIEYGo9nA4m/PJG1sEi1HXLz861K6vb0DM+RdvoSs3Nl4OjvYWb6P1KLVEAhw+M476T506MSFJY+DKzbgMD0DysUc1zSaPqihpqMmwlkJET0xVVD/+N4fKGpcDkCKcTOGvKsgZ9EJcd21tdR+81toj4fkpYXs8XfSVFNN2thxLLj5NpRSuDu6eeEX+2mtd5E+Lomld84mwRpzXcpCfCZWu5mld8zGkW7FebCDFx7qPVNVBgOLb78Xe8Yojvy3gg/Sk7AXFBBsb6em+Cb8TueJC5t1Dcbz55NsfBaA1c5lPFb2WIQzEiJ6Yqag1nbWYix1M757DKgjJKWWwuU/OSHO73RSs7qIQGMjtrlzqZo1jQ93vYnZmkjh2u9isdloc7p5fuMemuu6SB1jY9ldeSTaE6KQlRDRZ0+zsGxtHo4MK43VHWz7eQmuNh+2lFSW3v1djCYTpa/9jYYFBVhzc/HX1VFTVEygtfX4BSkFSx7CYX8LrZqY5JtAZ0k9je6TjLgkRJyJmYL61P4/s7LpSgDSTU+glj8EianHxfgbGqhZXYS/rg5rbi5HVyzjnRefQxkMFK69n9ETz+Xwf1vZ+tO9dBz1ctZEB1fdk48tWYqpGNlSRidy9b1zSMu00XzYxZYH36XhYDtjJ+ew8Jt3AvDms0/Sufo6LOdNpruykpriYgItLccvyDEGtfhB0k1PArDKeSVPvb850ukIERUxUVBbva2Y33GTGnQQNFSQOPc8mHzZcTG+qiqqr11Jd1UVlilTaL7mat7Y/AQAl930bc6Zmc/ev1fzwqZ9eDr9ZE1LZ9naPCmmQoTY0yxcdU8+Yyen4Grv5i8/K+H9Nw8z9QsFFFx/MwDb//gI7cU3YM7KwldewaGVq/AfPnz8gmZ8Bdt0BwHDQc4KpOPZ5cTld0UhIyEiKyYK6raS5yg8eikAmSmvoBYd/zyca/duDq1cRaC+HuusWRwqXMjOZ/8EwJeLvsWE6fPZtmkfu7dVoTXkL5rI4ltnSp+pEP8n0ZHAsrvyyL30bHoCmp2bP+SlX+5nykWXM+9rq9C6hx1PPYHzuhUknJ9Dd3U11deuxFNa+ulClEIV/pzMpN5hC5c3FrCtbGuUMhIicoZ9QfUEPJje6sSqE/CY/4NlxVqw9D6EroNBjj78O2qKigm2taHmz2PP9HMp2f43DEYjX7zxVtyuHJ5e/07vzDEOM4tvnclFyydhMA771IWICqPJwCXX5rDwpulYk8zUVrSy+Qe7MZg/R8H1t4BS7H5lG6UXzMQ4dw6Bxkaqr/sGLU/+6dOB8ZNGYb2qGLepFHuPDe/rR2QCchH3Bhx6UCm1AcgH0oEVWuuq04npM9ShB1/89zPMfikTTQ/j5uzAvOLHAHgrKqj//g/wlpURVNC87ErKnHX43C6sSQ6yL7iB2gOJ+H1BAKZePJZ5V0/GmiQzxwgxWK52H2898xGVJb03FlmTzGRmt3Bwz1P43F1Y7Q5mpZ5F2iuvYgBsc+eS+aMfYsnOBsD3h3twViwlSA8ffd3NwrzFQ/r8wQw9KMRwccprnkqpfCBfa70g9PvDwIKhxpyuYE8Q8+sdGBhHs/1NsgrX4Skro/mxx+j8+z/wmIzUT5pAbWYGroO9wwbaUnMI6gKqShOAIFnT07lwSTZjzk0OR5OEGFGSUiwsWjOD+sp2/rP1Y+or26kuc4D5GqyOf+LtPMjbXZ3YL84jq87JmNJ9uAuXkrJkCemrb8Ty1e/R8tNHGeW5CM8/qtGz9XFTJQoRT055hqqUWgegtd4Yet2qtU4basyxhnKG+ur6TUzrmkt3j48Dba/ibmnG6+3Ca9K4zQH8hu5P22HIwJT4BQzmbMwWI5PnjmHG/LOlkAoRJlprnAc7KN1RS9W+JoKBHnr8HxLw7EL3tH0SlxC0kBgwYfWDzWbHnp5BTuoVGJWJA6NKueze2wb9mXKGKmLJQHflZADvhiFmyHp8HsY2nwMWONC2m4r2cjACScdGJWAwn4PRMoOUMTmMz0lj4oxRZE1LJyFRbjgSIpyUUmRmp5CZnYLPE+DQ+0epfi+T+sqZtDeUE+wup8dfTbfRR7fRR7sFwAVtTpQazdTUz5NeOxodDKCMsn2K+DPQt7oZyD7mderpxCil1gBrALKysgbVsCAGjnirUUpR53WSbD8PY6KNhJQMEpPPIn3cBMZMmkRKRhIZ4+3SNypEBFkSTUy5IJMpF2QC4O64kNYGF60NnTirKmlrOISnoxlv21ECXhfV7hrOtuXQ6K9Fa4Vc9BXxaKBLvvnAhmP6RzdorfvrQz1lzLGGelNST8CPwSTFUohYdzrbslzyFbHklGeoWusSpVSJUuq10Fu3ACilsoG9Wuu0k8WEixRTIeKDbMsi3g3YkaG1/k4/71UBaaeKEUIIIUYSGd1ACCGECAMpqEIIIUQYSEEVQgghwkAKqhBCCBEGUlCFEEKIMJCCKoQQQoTBgLPNhP0DlWoCDg3hX0YBR89Qc4YryXlkkJwHNlFrPfpMNUaIcIp4QR0qpdSekTZSiuQ8MkjOQsQXueQrhBBChIEUVCGEECIMYqGg/i7aDYgCyXlkkJyFiCPDvg9VCCGEiAWxcIYqhBBCDHtSUEVEKaU2KKVeU0rtDU0DeFoxsWSQObeG/r5XKfVwpNsYbkqpNUqpdaf4e1ytYyFgmBfUkbZRxvuONzQBfX5oAvqbgRPaP5iYWDLInLOB7VrrOaGfsM4pHGmhuZFPut7ibR0L0WfYFtSRtlGOkB3vZcBr0Dt5PdDf84iDiYklg8knG8hWSm0JHVDlR7KB4Rb6Dp/quxlv61gIYBgX1BG4UY6EHW8GUBWGmFgymHxagAe11iuA7wBbzniroive1rEQAJii3YDPIAN4N9qNCKPB5NO3430uVEy3AJPOeMvCp5neg4I+qacZE0sGzCd0AFXS97tSKl0plaq1botQGyMt3taxEEAUC6pSak0/b1dprbcPchExt1EOkPNI2PFuBzYAG0MHBP2t68HExJIB8+m7T0BrvTF0Wb8lhtbp6Yi3dSwEEMWCqrX+rA94x9xGOUDOcb/jDR0ElIT6xyF0ST+Uy16tddrJYmLVIHPeGLqMvzcUsyIqjT2D4nkdC9FnWA/sEDqjS9Vabwy9/mSjDL3eAPT1I96itY7pfpn+8ukn5y18eiZ7c+isVQghRJQN64IqhBBCxIphe5evEEIIEUukoAohhBBhIAVVCCGECAMpqEIIIUQYSEEVQgghwkAKqhBCCBEGUlCFEEKIMPgfOiDkW4waQQgAAAAASUVORK5CYII=\n", 41 | "text/plain": [ 42 | "
" 43 | ] 44 | }, 45 | "metadata": { 46 | "needs_background": "light" 47 | }, 48 | "output_type": "display_data" 49 | } 50 | ], 51 | "source": [ 52 | "fig = plt.figure()\n", 53 | "ax = plt.subplot(111)\n", 54 | "\n", 55 | "x = np.linspace(-1, 1, 1000)\n", 56 | "i = 0\n", 57 | "for name in get_proposals():\n", 58 | " if name == 'gaussian':\n", 59 | " continue\n", 60 | " y = get_proposals()[name]().density(x)\n", 61 | " ax.plot(x, y, label=name.title(), linewidth=2)\n", 62 | " i += 1\n", 63 | "\n", 64 | "box = ax.get_position()\n", 65 | "ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n", 66 | "\n", 67 | "lgd = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=16)\n", 68 | "plt.savefig('images/kernels.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')\n", 69 | "plt.show()" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 4, 75 | "metadata": {}, 76 | "outputs": [ 77 | { 78 | "data": { 79 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAagAAAEYCAYAAAAJeGK1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3deXxU1fn48c+d7GHJwo4QYNhkhySAu6iJqHUXRK3WahXUX22rtSBqtbUqDdalVr81aN2qbSmhLriACSCguIQEkH0LEBbDlgRCkplMZs7vjzuJWWbLMnNnJs/bVyS598zch2GSJ+ec556jKaUQQgghgo3J6ACEEEIIVyRBCSGECEqSoIQQQgQlSVBCCCGCkiQoIYQQQSky0Bfs3r27GjhwYKAvK4QQIkgVFBQcV0r1aHo84Alq4MCBrFu3LtCXFUIIEaQ0Tdvv6rgM8QkhhAhKkqCEEEIEJUlQQgghgpIkKCGEEEFJEpQQQoigJAlKCCFEUAp4mbkvTp48yfHjx6mpqTE6FNFEREQEXbp0ITk5mZiYGKPDEUKEMa8JStO0mUCiUmq+m/NZQCqQDExXShW1JSCLxcKRI0fo168fcXFxaJrWlqcT7Ugphc1m49SpUxQXF5OSkiJJSgjhNx6H+DRNywWyPZxPBVKVUpnA3Z7a+urYsWP06NGD+Ph4SU5BRtM0oqOj6d69O0lJSZSWlhodkhAijHnsQSmlMut6UG6aZAC5zraFmqaltzUgi8VC79692/o0ws+6du3Kvn376NOnj9GhdGi1ZWVULPuc6vXrqdm3j9qyMhzVVahqC8rhAKUafwAohWxTGuSabCSrGvy/8XGFm1NuH+PT5X187OHJZqa+8UmrruGLts5BdQPyvTVyJrmZACkpKR7b1tbWEhkZlFNjooGoqCjsdrvRYYSspZt/AOCy0a1P8OWLF3Pkz1k4KiqI7NmT6EGDiBszBlN8HFpcHFqE8/tI00DTe8DUj0o0/Fz4l6K61kJVbRVVtkqqbFVU2y1Ya61Y7RasdisWu7X+a5ujllpHbauuZNJMmLQITJqJCC2CCC0Ck8mEhoZJM6EBmmbChIamaWiafk5zft3wuMn5HjFh0t8u9e1A/wq6jhrXjq9Tc23NBCcAc4OvXfa0lFILgAUA6enpXtOyDO0FP/k3apu2JCaAsv/+l5LHnyB+8mR6zZlNzIgR8m9ioCpbFcUVxew/tZ/iU/qfByoOcKTqCEerjmJz2Jo9JiYihoSYBBJjEkmI6UNiTCJdo7vSOaoziVHxxEfGEx/l/Ij88c+4yDhiImKIjogm0hRJdEQ00aZookxRRJgiDPjb+09bE1QekAXMd85H5bU9JCHCX2mlXqGa3Cm6xY+tOXiII08/Q6dzzqF/9qtoUVHtHZ7woKSyhG0ntrG9bDvbT2xnR9kODp0+1KhNj7ge9O/Sn/E9x9Mzvie94nvRM76n/hHXk6TYJGIjYw36G4SOFicoTdPMQIFSKsk571ToLKYAmNW+4QkRnu59twCAhbPObvFjT7z2Gjgc9Hn6KUlOfqaUYt+pfRQcKaDgSAGFRwo5XHkY0Ae5BnQdwJjuY7h+6PUM6DqAAV0HkNIlhfioeIMjDw9eE5RzeK7h10VAUoOv5/ghLuFCTk4O8+bNo7CwkIyMDLKzszGbzR7bT58+vdnxmTNnkp3d5oJL0QZ3n+/+380TR3U1Jz/6iK5XX0WUFKj4RY29hnUl6/ji4BesPri6vneUHJtMWq80bht5G6O7j2ZY0jBJRH4m1QghIi8vj+nTpzNt2jRmzZpFdnY2gwcPpqysjMRE10WWRUVFJCYmMnfu3EbHU1NTAxGy8CBjZK9WPe70qtWo6moSrrq6nSPq2BzKQX5JPh/t+YjlxcuptFUSGxHLWX3O4s7RdzKx90QGdh0o83wBJgkqRGRlZZGRkcGiRYsAuPHGGxk0aBALFixg9uzZLh+zZ88e0tPT3Z4XxjlaYQGgZ5eWzUOc/uILIhITiZ/Y5js6BHC06igLdyzkoz0fUVJZQqeoTmQOyCQjJYNJfSYRFxlndIgdmiSoEFBeXk5eXl6jYbnExEQyMjJYuHCh2wRUVFTkcQhQGOf+f60HWj4HVbW+kLi0NLSI8KrWCrQtJ7bwz63/ZNneZdiVnXPPOJffpv2WKf2nSPFCEAmNBPXZw1CyydgYeo+By/9syKXrVmxIT2/8W/PEiRPJy3NfOFlUpK86NXjw4PpkNWfOHGbOnOm/YIVP7p0yuMWPqT1xAtv+YpJuvNEPEXUM20u388r6V/ji4Bd0iurETWfexC0jbqF/l/5GhyZcCI0EFcJycnKYM0evIyktLSU5ORnQ54Hqhuu8qUs0TeeaEhMTKS8v9/i40tJSsrKyMJvNLFq0iFmz9EJLSVLGmjK8Z4sfU71J/yUtbpx/b44MR4dOH+KFghdYtm8ZXaK7cP+E+7n5zJvpEt3F6NCEB6GRoAzqubRVXWLZs2cPAAsWLGiWGAoLC0lLS3P7HFlZWa0uali0aBEZGRn1iS0jI4PS0lLpRQWBw+XVAPRN9H2Oo8b5PooZOtQvMYUjq93KW5vf4rVNr2HSTMwcO5PbR91O1+iuRocmfBAaCSpEJScnM23aNEDvSbmaD0pNTa1PYO6eo26Ir2lvqby83G0FH1B/7YZmzJhBTk6O18cK/3pg4QagZXNQ1qIiIrp3JyIhwV9hhZWNxzby6JePsv/UfjIHZDJ74mx6d5J1PkOJJCg/apgAFi5c6HZIz9dChqKioka9qfz8/Pohw6bKy8tZt24dGRkZXmMTgXf/xS3vBdXsKSJm0CA/RBNebA4br258ldc3vU7v+N5kZ2RzzhnnGB2WaAXZUTdA6ob7miosLHQuzuj6Y/78+Y0q9hrKy8urn1NqqrS0lMzMzGZFFAsXLpTKviBw3tDunDe0e4seY927l+jB8m/nSUllCbd/djsLvl/AleYrWXz1YklOIUx6UAFQWFjoNin4MsQHMGfOHDIzM+v/zMrKAhoXO8yfP5/c3Fxyc3Mxm81kZGQwffp05s6di9lsZuHCheTk5JCbm+vyWiJwik9UAZDSzbeVCOwVFThOniS6v1SbuZNfks9Dqx7Carfy3IXPcenAS40OSbSRJKgAyMvLIzMz0+15X3o0dTfpzps3j/nz55ORkUFBQUGjobr8/PxGPabc3FzmzJlDdnZ2/fBgbm6u22E/ETi/y9kI+D4HVVtSAkCk7JXm0sLtC/nzd3+mX5d+/PXiv2JOkJ5mONCUCuzWZenp6WrdunVuz2/bto0RI0YEMCLRWvJv1XrfFJ0A4CxzN5/an16zhgN3z2TAv94jXpaqqqeU4uUNL7Pg+wVc0O8C/nz+n6V0PARpmlaglGq2PIr0oIQwgK+JqY7tB32DwyjpQdWzO+w89e1T5OzM4boh1/H42Y8TaZIfaeFE/jWFMMCeY6cBGNyjs0/ta0uOgKYR2aOHP8MKGQ7l4Im1T/Dhng+5a8xd/GrCr2Qh1zAkCUoIAzzyP31VCF/noGwlJUT26CH7P6Enpye/fpIP93zIfePu497x9xodkvATSVBCGGD2ZcNb1L726FEie7Vui45wopTi2fxnWbxrMXePuZt7xt1jdEjCjyRBCWGAtAGub7B2x15aSkSPlt03FY7e2foO7257l1tH3Mr9E+6XYb0wJzfqCmGAHSUV7Cip8Ll9bXkZkUktS2rhJnd/Ls+te47MAZn8buLvJDl1ANKDEsIAj3+4GfB9DspeWkZEUpI/QwpqW05sYe6auYztMZZnznsGkya/W3cEkqCEMMAjV/h+/5ijqgplsRCR3DETVLmlnAdXPkhybDIvXfySbCjYgUiCEsIA4/r7vlivvawMgMgO2IOyO+w8vOZhjlUf453L3yE5tmMPcwYdpcCPQ62SoIQwwJbDJwEY1df71hm1pXqCinCzcn04y/4+m68Of8UTZz/B6O6jjQ5H2G1wMB92L4c9KyDlbLjsGb9dTgZyQ0hOTg5paWlomkZmZqbbFdJdmTVrVv3OvsJ4Ty7ZypNLtvrU1l7uTFAdrAe14egGsr/P5urBV3PD0BuMDqfjKtsH+f+A//wU5pvhzcvhy+chIgq6D/HrpaUHFSLy8vKYPn0606ZNY9asWWRnZzN48GDKysq87u1UVFTEggULmD17doCiFd48ftVIn9vanRtWdqQhvkpbJXPXzKVPpz7MnTRXKvYCyV4LB76FXctg5zI4tl0/ntAfRl0HQy6BQRdCnP/3lJMEFSKysrLqVzQHuPHGGxk0aJDHxFNYWMjdd99NYWFhIEMVPvBlaK+O/aQ+HBjRgTaZzPoui8OVh3lz6pt0jvZtOSjRBlWl+rDdzqWwOw8s5WCKhAHnQOrPYEgmdB/q1/kmVyRBhYDy8nLy8vLIzs6uP9ZwE0N3CSoxMZEZM2YwY8YMGd4LMhsPlAO+FUvYT+n3S5k6d4wf1KsPrub93e9z15i7SO0lK7f7zbGdsOMTvZd04FtQDojvDsOvgGFTYfBFEOv7L1L+IAkqBJQ6h3jS0xuvRj9x4sRmO+Y2ZDab65PXvHnz/BegaLFnPt0G+HYflKOiAlN8PFpk+H+7VtmqeOqbpzAnmLl3nKyx166Ugh82wrYl+sfxHfrx3mPh/N/CsMugbyqYgqc0ISTe8VnfZbG9dLuhMZyZfCZzJrW8F5KTk1PfeyktLa3fITc1NbV+uM6bumKIpnNNiYmJlJeXtzgmYbwnr/G9Is1+ugJTl46xx9HLG17mh8ofePuyt4mOiDY6nNDnsOu9o21LYNvHcLIYtAgYeC5MvAvO/AkknGF0lG6FRIIKVXWJpW5L9wULFjTaoh30eaK0tDS3z5GVlUWqbFAXdob39j3hOE5VYOoS/sN7W45v4b1t73HjsBtlaK8t7DbYu0pPSts/gcpjEBEDgy+GKXNg2OXQqWX7kRklJBJUa3ouwSA5OZlp06YBek/K1dbuqamp9QnM3XPUDfE17S2Vl5d7reATwalgv/5v6suisfbTFUR06ervkAzlUA6e/vZpkmOT+XXar40OJ/Q4HFC8FjYvhq0fQtUJiO4MQy+FEVfB0EyICb1eeEgkqFDVMHksXLjQ7ZCeq8TlSlFRUaPeVH5+fv2QoQgt85fq4/++zUGdDvtljj4u+phNxzfx9HlP0zU6vJNxu1EKDhXC5hzY8j5U/ABR8XqRw+jrYfAlEBXay0JJggoQdzfV+jLEN3v27PqKvboeGej3Rs2dO7fdYxX+98z1Y3xua684RXRKih+jMVaVrYoXC15kTPcxXGm+0uhwgptScGSL3lPavBjK90NEtN5TGn29XugQ3cnoKNuNJKgAKCwsdNtL8mWID2DOnDlkZmbW/5mVlQXQaE5r/vz55Obmkpub247RC3/wdat30HtQpq6hNzzjq9c3vc6x6mM8P+V5WaXcnYoS+P6/sPHfcHSrXuhgngJTHtYLHQwuB/cXSVABkJeXR2Zmptvzvgzx1d2kO2/ePObPn09GRgYFBQWNhhHz8/M9lp2L4PFN0QkAzjJ7nqxWSmGvqCAiTKv4SipLeHvL21wx6ArG9xxvdDjBxVatFzls/Le+7p1yQL+J8JPnYOS10Cn8N7CUBBUA7bXE0LRp0xoN8TXlqWy9zLkitggOL+TuBLzPQSmrFWw2TJ3DM0G9uvFVFIpfp0phBKAP4RV/DRv+pRc7WE/pSwyd9yCMu9nva98FG0lQQhjg2WnjfGrnqHCuIhGGZeb7T+3ng90fMGP4DPp27mt0OMY6eRDWvwcb3tPnlaI6wchrYPzNMOC8oLp5NpAkQQlhgJRu8T61c1RWAhARhsscvbLhFaIjorl77N1Gh2IMu01fZqjwbX39O+XQF2G96BG9NDyMih1aSxKUEAb4ctdxAM4b6nkewVFdDYAWF+f3mAJpZ9lOlu5dyp2j76R7XPjPpTRSuhfW/1PvMZ0ugc699SG81NsgaaDR0QUVrwlK07QsIBVIBqYrpZrVS2ualg3ULRQ3RyklM/VCePC3FbsAHxJUVRUApjjfelyh4uX1L9M5qjN3jL7D6FACo7YGtn+s95aKvgDNpK8QnvY8DJ0KEdJXcMXjq6JpWiqQqpTKdH6eDWQ2aZMBmJVSac42rwHub+wRQvDCDN8q1hxVeg/KFB8+CWpH6Q5WHljJfePvIyEmPMuj6508COvegIK3oeq4XvAw5RGYcGtQr4EXLLyl7QwgF0ApVahpWrqLNqUNPk8G1jVtoGnaTGAmQEoY33AohK/6Jvo2ZOeodvag4sNniO8fm/5BfGQ8t5x5i9Gh+IdSsHc1fLcAdnyqHxt2GaTfqa+HZ4owNr4Q4i1BdQPyPTVwJi40TdsDmIHpLtosABYApKenq1bGKkTY+GLHUQCmDO/psZ1yzkGZwmQOqvhUMcv2L+P2kbeHX+/JWgEb/wPfvaZvZRGXDOf8Sk9MSQOMji4keUtQJ9CTTp1mK5NqmjYbKHQOA5qBAiCn/UIUIvz8/Qt99RBvCapuDipciiTe3PImkVokt428zehQ2s+xHZD/Omz4N9RUQN8JcO3fYdT1Ib8WntG8Jag8IAuY75xfclX80A09kUHj4T4hhBt/u2WCT+1+nIMK/ZLjo1VH+XD3h1w75Fp6xPcwOpy2UQqKVsLXr+gl4hHRekKaNBP6yRR8e/GYoJzDd4WaptUt7jYLoK6npJRKAuYBizRNm+Fs02yITwjRWM8uvv1m7agf4gv938Tf3foudmXnjlEhXLlXa4VNi/TEdHQrdO4FFz0G6Xd0iKWHAs1rbaNSqtlmTM5S8yTn5+U0qewTQniWt/UIABkje3ls56iqQouJQYsI7Yn1Slsli3Yu4tIBl9K/a3+jw2m5yhN6Nd53C6DyKPQcpQ/jjb4BImOMji5sdcz1M0JUTk4OaWlpaJpGZmam2y08WvuYWbNm1W9PL/zrtTVFvLbG+7+fo7oqLErMP9rzEadtp7l15K1Gh9Iyx3fBxw/AC6Ng5VPQZxzc9gHc+xWMv0WSk59JggoReXl5TJ8+HbPZTHZ2NqWlpQwePLjZLrutfUxRURELFizw519BNPD3W9P4+63e5ypUVXXIV/A5lIN/bfsXY7qPYVwP39YgNNzBdfDvW+DldH3Fh7HT4b5v4NYcGHwRaJrREXYIkqBCRFZWVv2WGzNnzmT58uUkJiZ6TCq+PKZuw8TBgwcH4q8hnJI7RZPcKdprO0d1NVqI3wO19vBa9p3axy0jgvy+J6X0bS3euhJevwT2fwUXzIYHNsPVf4OeI4yOsMORBBUCysvL63tDdRITE+t32W3LYxITE5kxY0b9BogiMJZu/oGlm3/w2s5RVRXyFXzvbnuX7nHdmTpgqtGhuOaww5YPYMEU+Od1cGI3XPo0PLAFLn4UOnu+FUD4T0gsAFXyzDNYt203NIaYEWfS+5FHDLl2aalevZ+e3nghj4kTJ7rdoNDXx5jN5vr9qubNm9duMQvP3vxqHwCXje7jsZ2jOrSH+Pae3MtXh77ivvH3ERURZXQ4jdXWwPcL4au/woldkGyGq16CcTfJ3FKQCIkEFcpycnLqCw9KS0vrt3BPTU31uMFgQ3WFDQ13z6372t0cVGseIwLntdtdrRrWnKO6iqieniv9gtl/tv+HKFMU04cF0d0ntmooeAvW/g1OHYLeY2Dam/r+S7IMUVAJiQRlVM+lreqSxJ49+qoBCxYsYObMmY3a1M0BuZOVlUVqaqr/ghSG6BrrW29CVVWHbBVfdW01S/YsIXNAZnBsqVFTBQVvwpcv6qXiA87Ve0xDLpGihyAVEgkqVCUnJ9dv0Z6Tk4PZbG7WJjU1tT6BuXuOuuG6pj2f8vLyZj2kOnXXasljROAs2XgYgKvGed5J1lFVFbJFEp/v+5wKWwXThk0zNpCaKv0epq/+qiemQRfClLdhwDnGxiW8kgTlRw0TwcKFC90O6blKXK4UFRU16k3l5+fXDxk2VXe8JY8RgfPuN/sBHxJUdXXI7gWVszOHgV0Hkt7Lt+HMdldTCfn/gLUvQeUxSUwhSKr4AsTdDbKFhYU4V4N3+TF//ny3FXt5eXnMmjXL5fO25jEicN66YxJv3THJaztHdWgO8e0u282GYxuYNmwaWqCHz2oq9d7Si2Mh9/fQaxTcsRRu/0iSU4iRHlQAFBYWuu0l+TLEBzBnzhwyMzPr/6wrC284pzV//nxyc3PJzc31+THCGHHR3ifjVU0N1NaGZBXf4l2LiTRFctXgqwJ3UVu1vqr4ly/qmwOaL4IpD0PKWYGLQbQr6UEFQF5eHpmZ7pcrNJvNbj/qhgnrbrht+FwFBQWNhhHz8/MblZD78hhhjPfXH+T99Qc9tqnf7j3E5qAstRY+2vMRGSkZJMcGYDjZbtPnmF6aAJ8/plfl3fk5/OwDSU4hTnpQAVB3n1FbTZs2rb7owhVXc1zeHtNQWVlZq2MTLfOf7w4AcN2Efm7b1K9kHmJDfLn7czlVc8r/xREOO2xeDCufhrJ90H8y3PA6DDzPv9cVASMJSggDvHvXZK9t6hJUqG1W+L9d/yOlSwoTe0/0zwWU0rdSX/GUvuVFrzFwyyIYminl4mFGEpQQBoiK8D667gjB7d4PVhxk3ZF13D/hfkyaH2YQilbB8ifh0DroNsR5g+21YJLZinAkCUoIAyxapw/xTU93vzeSsloB0GJCZ9mdj4s+BuBK85Xt+8SHCiDvj7B3FXTtB1e/DONuhgj5ERbO5F9XCAPkFOgFEh4TlMUCgCk2NHbTVUqxZM8SJvaeSN/Onu/v8lnpXljxJ32uKb47XPZnSLsDokLjNRFtE5QJSikV+HsnRIsopYwOIaQtnHW21zYOi7MHFR0aPajvj39PcUUxd425q+1PVlUKq5+F716DiCh924tzfwUxXdr+3CJkBF2CioqKorq6mvgQq1zqaKqrq4kJoaGnUKSsdT2o0Hidl+xZQmxELJkD3N9S4ZWtGr7NhjXPQ00FTLgVpjwCXT2v+i7CU9AlqJ49e3Lo0CHOOOMM4uLipCcVRJRS1NbWUlFRwfHjx+nVK3RX2Tbav78rBuDmSSlu2zjq5qBCYIivxl7DZ3s/4+KUi+kc3bnlT+BwwKb/wvI/wamDMOwyyPiDbBLYwQVdguratSsAhw8fxmazGRyNaCoyMpLY2FhSUlKIDYEfnMHq4+/1xWI9JShlCZ0iidUHV3Oq5lTrVo7YswJyH4eSTdBnPFz3dxh0QfsHKUJO0CUo0JNUXaISIhy9d5f3FQ7qh/hCIEEt2bOE7nHdOatPC1ZuOLodPn8UdudBYgrc8A8Ydb2UjIt6QZmghBANiiSCvKdaZilj9aHV/PTMnxJp8uFHSlUpfDFPX2k8ujNc+hRMmim72IpmJEEJYYB/fr0PgNvOHui2jbJaQNPQoqMDElNrfb7vc2odtd6H9+w2PSl9MQ+sp/Ry8YsehU7dAhOoCDmSoIQwQN62o4DnBOWwWtFiYoK+UGjpvqWYE8wMSxrmvtGuPFj2CBzfAeYpMHUe9BoZqBBFiJIEJYQB3r7T+15QymIN+vmno1VHKThSwL3j7nWdSI/t1BPT7lxIHgw3/0ev0AvypCuCgyQoIYKUw2oJ+gq+z/d9jkIxddDUxieqSmFVlr4/U1QnuPRp5zxTcA9XiuAiCUoIA7zx5V4A7jxvkNs2ymIN+gKJpfuWMjxpOOYE54ac9looeFPfAsNyEtJ+7pxn6m5onCI0SYISwgBr9xwHvCQoqyWoh/gOnz7MxmMb+XXqr/UD+7+GT38HRzbp9zFNnQe9RxsbpAhpkqCEMMDrt3vfK8lhDe4e1LJ9ywCY2iMN/jcTvl+orzR+4zsw4mqZZxJtJglKiCClLFa0mOCds1m69zNGx/Sg/xtXgd0K5z8E5z8I0Z2MDk2ECUlQQhhgweo9AMy8YLDbNg6rhYjOwbl6d/GWHLaWbuOhE2Uw4Gx9G4xu7v8uQrSGJCghDFC4v9xrG2WxonXvEYBoWuDkQfj8MZYeXA7JiUzNeBbG3CTDecIvJEEJYYBXb0vz2kZZgqhIotYKX7+i79GkHCw1D2NC4gB6j73Z6MhEGJNVGYUIUo6amuAoktidB/93Niz/Iwy+mKKfLWaXrZypgy43OjIR5iRBCWGA//tiN//3xW6PbZTFYmyRxKnD8N/b4d0b9K9/uhhueo/csi0AbduYUAgfyBCfEAbYeviU1zYOqxVTjAE9KHst5L8GK54Gew1c9Ji+3bpztfHlxcsZ32M8PeN7Bj420aF4TVCapmUBqUAyMF0pVeSizUxgOmB2tils70CFCCcv35Lq8bxSSu9BBXq794MF8PFvoOR7GHwJXPFso+q8AxUH2Fa6jYfSHwpsXKJD8pigNE1LBVKVUpnOz7OBzCZtzMAspVSa8/NFgPcZYCGEezYbOByYAjUHVV0Oy5+EdW9A514w/S0YeW2z6rwVxSsAuCTlksDEJTo0bz2oDCAXQClVqGlauos204CFzjZFmqbJO1cIL15avguAX10y1OV5h9W5WWG0n3tQSsGmHH3F8arjMHmWvnZerOsdrfP25zEieQT9uvTzb1xC4D1BdQPyfWhj1jQtF30YcA6Q17CBcwhwJkBKSkrrIhUijBQdO+3xvKpLUP4c4ju+Gz55EPaugr6p8NNF0He82+bHqo6x4dgGfjn+l/6LSYgGvCWoE+jzSnUS3TV0DgMmAnuBpCbnFgALANLT01XrQhUifLx40wSP5+u2e/dLkYTNAl8+D1++AJGxcMVfIP1OMEV4fNjy4uUAZAzIaP+YhHDBW5l5Hs45J+ccVJ6LNrlAaTvHJUSHpqwWwA89qN3L4e9n63s1jbwGfrkOJt3tNTkB5BXnMbDrwB+31hDCzzz2oJzzToXO4TuAWVBfGFGglEpSSuVpmpapaVoBeg/rbv+GLEToe/7zHQA8eOlwl+cdFj1BtVuRRMURWPowbPmfvrPtbR/A4It8fni5pZx1Jeu4c/SdQb8FvQgfXsvMlVJzXBwrosEwnqs2Qgj3Dp+0eDyv2qtIQilY/y58/ijYqmHKXDj3NxDVssT3xcEvsCs7lwyQGigROHKjrhAG+Mv0cR7P1yeotqwkcWIPLPk17FsDKefAVX+FHm+IHzkAACAASURBVMNa9VR5+/Po06kPI5NHtj4eIVpIEpQQQaiuzLxVQ3x2G6x9Cb7I0osgrnwRUm8HU+tWNqu0VbL28FpmDJ8hw3sioCRBCWGArKXbAZhz2ZkuzytrDQBaS6v4DhbAkl/Bkc0w4iq4/Fno2qdNsa45uAabwyZr74mAkwQlhAHKq2o8nlc1dWXmPg7xWU/Dyqfh21f1lSBmvAcjrmxrmADk7s+lW2w3xvXwPCwpRHuTBCWEAeZdP9bj+boqPs2X/aB25cLHD8LJYkj/BWQ8AbEJ7REmlloLaw6t4UrzlUT4UIouRHuSBCVEEPpxiM9Dgjp9DJbNhU2LoPswuGOpvv16O/r68NdU11aTkSI354rAkwQlhAGe/mQrAI/+xHVVnMcyc6Vg47/19fOsp+HCh+H8B+u3w2hPecV5dInuwsQ+E9v9uYXwRhKUEAaw2Bwez9fPQTVdSaK0CD5+AIq+gP6T4aqXoKfrQou2qnXUsurgKi7sdyFRpii/XEMITyRBCWGAP1072uN5h9UKERFokc5vUXstfPMKrJwHpkjn+nm/aHXpuC/WH13PSetJLk652G/XEMITSVBCBCFlsf44/3R4A3x0v76J4PCf6JsIJpzh9xhWFK8g2hTNuX3P9fu1hHBFEpQQBvjjki0APHHVKJfnVY0VU3QUfP4YfP0KdOoBN74DI65utomgPyilWFG8gnP6nkN8VLzfryeEK/4bHxBCtJrj2D4020lY+zeYcBv8v+/01ccDtJLDjrIdHK48LMN7wlDSgxLCAO56TlSegM8fRe1cjhbZCX7+CQw8L7DBoe/9ZNJMXNj/woBfW4g6kqCECAZK6fczLX0YLCdRSWmYomIMSU6gzz+N7zGe5NhkQ64vBMgQnxCG+P0Hm/n9B5v1L8r2w3vT4H93Q9IgmLUaR8IgtPbaC6qFDlQcYGfZThneE4aTHpQQBoiNMqEpu14AseIp0Exw+XyYeBeYIhpX8QXYyuKVAJKghOEkQQlhgEfT7Hrp+Lr1MHQq/OQ5SOxff15ZrZjijameW3FgBcOShtG/S3/vjYXwI0lQQgSSrRpWZcFXL0F8Mkx7A0Zd36w6z1FjJSIpyc2T+E+ppZT1R9czc+zMgF9biKYkQQkRKEWr4OPfQGkR+Uk/4bM+9/H4aNdFEMpaY8gQ36oDq3AoBxf3l+E9YTxJUEL4W1UpfP572PCuXgTxs49YsbMXntKPslp93wuqHa0oXkGfTn04M9k/6/sJ0RKSoITwF6Vgy//gszl6kjrvAbhwDkTFMcfs+aEOq8X1SuZ+VGWrYu3htUwfPl22dhdBQRKUEP5QfgA+fQh2LoW+E+C296H3GJ8frqw1AS8zX3t4LTWOGi5JuSSg1xXCHUlQQrQnhx2+ew1W/AmUA6Y+A5PvgSa70T60aCMAf5nueht1I4b4VhSvICEmgQk9JwT0ukK4IwlKiPZyZIteOn6oAIZkwE+eh6QBLpv2TXDfO1JKoazWgA7x2Rw2Vh1cxZT+U4g0yY8FERzknShEW9mqYdV8WPsSxCbCDf+A0Td4XNj1wUuHuz2nbDbAy3bv7azwSCGnak7JzbkiqEiCEqIt9q6GJb/Wd7od/1O49Cn9/qY2UBYLAFoAh/hWFK8gNiKWc/qeE7BrCuGNJCghWqOqFHJ/D+vrSsc/BPMUnx/+m/+sB+DFm5rP9yhr3XbvgSmSUEqx4oC+91NcZFxArimELyRBCdESSsHmxfqq41WlcO5v9NLx6JYtS2Tu0dntOYe1BiBgc1BbS7dSUlnCL8f/MiDXE8JXkqCE8FX5Afjkt7BrmV46fuv/oM/YVj3Vry4Z6vacqtF7UIEa4ltRvELf+6mf7P0kgoskKCG8cdjhuwWw/E/611PnweRZzUrH20v9EF+AiiRWFK8grVcaibGJAbmeEL6SBCWEJyWbYcmvnKXjmXDl85CY0uan/eW/CgF4+ZbUZucc9UUS/p+DKj5VzO7y3cyZOMfv1xKipSRBCeFK3arja//mc+l4S4zs29XtOVU3BxWAIb6VB/S9ny5Kucjv1xKipSRBCdFUg1XH26t0vKn7pgxxe65uDioQQ3wrilcwInkEZ3Q+w+/XEqKlZMt3IepUnoAP7oN3rtar9X72IVz7f+2enLxxWOuKJPyboI5XH2f90fVc1F96TyI4SQ9KCKVgw3v6lhjWU3rp+JSHIcp/9wTd888CAF69La15OAEqM19RvAKFImNAhl+vI0RrSYISHdvR7fDJg7D/K+h/Flz5AvQa6ffLpg5wXzGnrHqRhCnWvwkqd38uA7sOZEii++FGIYwkCUp0TLZqWP2svvV6dCe46iWYcBuYAjPqPfOCwW7PBWKIr9xSTn5JPneMvkP2fhJBSxKU6Hh25+k33Jbtg7E36UUQnXsYHVW9H6v4/JegVh5YiV3ZZXhPBDWvCUrTtCwgFUgGpiulijy03QNkemojhGEqSmDpXH2X225D4GcfgdmY1RPuejsfgNdvn9jsXP2NutH+KzPPK87jjM5nMDLZ/8OZQrSWxwSlaVoqkKqUynR+ng1kumk7G/CykbUQBnDYYd0bsPxJqLXClEfgvN9AZGC3VG/onMHd3Z5TNVb9fquoKL9cu6Kmgq8Pf83NZ94sw3siqHnrQWUAuQBKqUJN09JdNdI0zYyeuArdnJ8JzARISWn7XfhC+OyH7/V7mg4VwKAL9SKIbu7nfwLlzvMGuT3nsFjRYmP9ljxWHVyFzWEjc4DL3zWFCBreZoS7Ab4M12UDs9ydVEotUEqlK6XSe/QInrF+Ecasp2HZo7BgCpQXw/Wv6fc1BUFy8kZZrf4d3tufR8+4nozt0bqFboUIFG89qBM0HrZrVhvr7B3lKqWKZLhAGE4pfY5p2WNQcRjS7oCMJyAuyejIGrn9je8AePvOSc3OqRqr3wokqmxVfHXoK64dci0mTe7TF8HNW4LKA7KA+c45qDwXbdIAs6ZpmejJbJGmaR6LKYTwi2M74dOHYO8q6D0Wbnwb+jdPAMEgY0RPt+ccVv8lqC8PfYnFbpHhPRESPCYo57xToaZpuc5Ds6B+zqlAKZWklKof2tM0rQAvlX5CtDvraf2epq9f0TcOvOIvkH6n37bDaA+3nT3Q7TllrfHbTbp5+/NIikkitVfzVdSFCDZey8yVUs3W4XcmoGZjJkqp5uu2COEvSsHWD/S5plOHYPytkPGHoLqnqTWUxeKXZY6sdiurDq7i8kGXE2mSWyBF8JN3qQhNx3fpw3lFX0DvMTDtTUiZbHRUPvvp698A8N5dZzU75/DTHNTaQ2upqq2Sm3NFyJAEJUJLTaU+nLf2ZYiKh8uf1YfzIkLrrXzl2L5uzylrjV/2glq6bykJMQlM7hM6iVx0bKH1XS06LqVg20ew9BE4dRDG3QKZf4TO7osNgtnNk9zfD6isViK6dGnX61XXVrPywEquGHQFUSb/3AAsRHuTBCWC39FtsPRhfTiv12i44XUYcLbRUfmNqtFv1G1Pqw+uprq2mssHXd6uzyuEP0mCEsGrqhS++DPkvw4xneHy+ZD+i5AbznNlRvbXACyc1TzROizWdh/iW7p3Kd1iu5Hey+ViMEIEpdD/Thfhx14LhW/BiqfBUq7fbHvRo9Cpm9GRtZtpaf3cnlNWa7tu93665jSrD65m2rBpRARx6b0QTUmCEsFl72r47GE4ugUGng+X/Rl6jzY6qnY3Pb2/23PKam3XMvOVB1ZS46iR4T0RciRBieBQtk/fcn3bR5CQAje+AyOu1lf1DkM2uwOAqIjmyw05amratcx86b6l9OnUR9beEyFHEpQwVk0lfPmCvrOtKQIuegzO+SVExRkdmV/d+vq3QPM5KKWUPsTXTitJnLSeZO2htdw28jZZe0+EHElQwhgOB2z6L+T9UV/UdcyN+ioQCWcYHVlA3DTJzRCfzQYOR7sN8eXtz6NW1XLZoMva5fmECCRJUCLw9q6Bzx+FHzZC3wkw/a2QWgWiPVw3wXWRhMNiAcAU1z5l5p/t+4yULimMSB7RLs8nRCBJghKBc2wn5D0BOz6Frv30PZpGTwNTxxt6qq6xAxAX3biqzlGtJygttu1DnEcqj/DdD98xc+xM2TlXhCRJUML/Ko/r9zOte0NfnuiSJ+Cse8N+nsmTn7+p7wfVbA7KUg2AKb7tr82nez9Fobhq8FVtfi4hjCAJSviPzQLf/h3WPK8XQ6TfARc+HPKrjbeHW88a4PK4o1pPUG1dSUIpxUd7PmJcj3EM6Or6WkIEO0lQov05HLB5MSz/I5w8AMMu19fN6zHc6MiCxlXjXC8WW5egTG0c4tteup3d5bv5/Vm/b9PzCGEkSVCife1ZqSemw+v1XW2veQXMFxodVdA5ZbEB0DW28cKtqp2KJD7a8xFRpiimDpzapucRwkiSoET7OFSoJ6aiLyChP1z7Koyd0SELIHxx99vrgOZzUO1RJFHrqOXTvZ9yYb8LSYhJaH2QQhhMEpRom+O7YMWfYOuHEN9NX5oo/U6I9M+W5eHijnMHujxeXyTRhh7U2sNrKbWUSnGECHmSoETrnDwEq7Jg/bt6Nd6FD8PZ/w9iuxodWUi4bHQfl8frelCmNhRJLNmzhMSYRM4/4/xWP4cQwUASlGiZqlJ9aaLvFoDDDpPuhvMfksq8FiqtrAEguVPjbTUczh6UFte6Ib5TNadYeWAl1w25jqgI2ZhQhDZJUMI31gr49lX46m9gPQXjboIpcyFJSphb4953CwAX90HVV/G1rgf1SdEnWO1Wrh16bdsCFCIISIISntVUwnevwVd/hepSvWT8kt9Dr1FGRxbS7j7f7PL4j0USLU9QSikW71zMiOQRjOom/z4i9EmCEq7VVMG6f8CXL0LVcRiSqfeY+qUZHVlYyBjZy+Vxh6UaLTYWrRXVj1tPbGVH2Q4em/xYW8MTIihIghKN2SxQ8Ka++kPlUTBfBBc9Av0nGR1ZWDlaofeUenZp3FNS1ZZWD+/l7MohNiKWK8xXtDk+IYKBJCihq7VC4Tuw5jmo+EHfzfbGt2HAOUZHFpbu/9d6wMV9UBZLqwokqmxVfFr0KZcOvJQu0V3aJUYhjCYJqqOzVeul4l++CKcOQsrZcP0CGHSB0ZGFtXunDHZ5XFmqW9WDWrZvGVW1VUwbNq2toQkRNCRBdVTW0/rq4mv/pg/l9Z8M17wM5ilhu816MJkyvKfL446qarRW3KSbsysHc4KZ8T3GtzU0IYKGJKiOproMvl2grzJeXaYnpAvehAHnSmIKoMPlejl538TGw3kOi6XFC8VuObGF7499z5yJc2TfJxFWJEF1FKePwTevwHevQ02FXi5+wUPQL93oyDqkBxZuAFzfB2Xq1KlFz/Wvbf8iLjKOa4Zc027xCREMJEGFu5OH9GG8greg1gKjroXzfwu9xxgdWYd2/8VDXR53WCxEdO/u8/OUWkpZuncp1w29ToojRNiRBBWujmzRE9OmRaCUvvLDeQ9Ad9c/GEVgnTfUdRJyWKoxtaCKb/HOxdQ4arjlzFvaKzQhgoYkqHCiFOxdDWtfgt15+vbq6b/QF3GVJYmCSvGJKgBSusU3Oq6qLT6vZF7rqGXhjoVM7jMZc6LrlSmECGWSoMKBvRa2fqAnph82QqcecPFjenKKTzY6OuHC73I2Am7ug/KxSGLlgZUcqTrCI5Mfaff4hAgGkqBCmfU0rP8nfP1/cLIYug2Fq/4KY2+CqLbtyCr864HMYS6PO6p9vw/q3a3vckbnM7iwn+xYLMKTJKhQVLYf8l/TV36wnNRvrr08C4ZdJjvYhoizzN2aHVM2G9hsPt0HteHoBgqPFjJn4hwiTBH+CFEIw0mCChVKwb418G027PgU0GDEVXD2L6H/RKOjEy2059hpAAb36Fx/zGG1Avh0H9Qbm98gISaB64de758AhQgCkqCCXU2VXon3bTYc3QJxyXDub2DiXZBwhtHRiVZ65H+bgMZzUI5KvXDCFO85QRWVF7HywEruGXcP8VHxHtsKEcq8JihN07KAVCAZmK6UKnLRJhswOz/mKKVy2jvQDqf8AOS/DoVv6ys+9BoNV78MY6bpW6yLkDb7suHNjjkqKwG83qj71pa3iI2I5eYzb/ZLbEIEC48JStO0VCBVKZXp/DwbyGzSJgPA2SYR2AtIgmoNhwP2rNDXyNu5FFBw5k9g8j2yFFGYSRvQvLryxwTVudm5Okcqj7CkaAnThk4jOVYqNEV489aDygByAZRShZqmuVoXpwjIcrYp1zSttH1D7ABOH9Wr8QregvJiiO8O59wPE38BiSlGRyf8YEdJBQDDe/+4+oMvPah3tr6DUorbR93u3wCFCALeElQ3IN9Tg7ohP03TzMAinMmqIU3TZgIzAVJS5Acu8ONNtevegO0fg6NW34Mp4w9w5pUQGWN0hMKPHv9wM9B0DkovnHCXoI5VHWPhjoVcMegK+nXp5/8ghTCYtwR1An1eqU6iq0aaps0GZgB3K6UKm55XSi0AFgCkp6er1oUaJipPwMZ/67vWntgNsYkwaRak/Rx6uL43RoSfR64Y0ezYjz0o14UPb2x+g1pHLfeMu8evsQkRLLwlqDz0HtF85xxUXtMGzjmoTKVUmh/iCw/2Wn3poQ3vwo6l4LDp+y9d8DsYeY0UPXRA4/o3/12vLkFFdG4+B1VSWcJ/d/yXqwdfTUpXGYUQHYPHBOWcdyrUNC3XeWgW1A/nFSilktCLJtI1TdvT4HGutwvtaI7t1JPSxv/A6SP63NKkmTDhp9BrlNHRCQNtOXwSgFF9E+qPeZqDen3T6ziUg5ljZwYmQCGCgNcyc6XUHBfHioCkBuebtemwLCdh8/9gw3twMB+0CBg2Fcb/FIZeCpHRRkcogsCTS7YCjeeg7KdPg8mE1mQ184MVB1m8azHXDLlG5p5EhyI36rYHu00vD//+v7D9E6ithh4j4NKnYOwM6Ox6e2/RcT1+1chmxxyVVZg6dWq2K+5fC/9KpBbJvePuDVR4QgQFSVCtpRQc+FZPSlveh+pSiEuC8bfoQ3h9U+W+JeFWw6G9Oo7KymbDexuPbWTpvqXMGjuLXp16BSo8IYKCJKiWOrpNT0qbc/R7liLjYPjlMPZGGHyJDOEJn2w8UA40LpZomqCUUjy37jm6xXbjjtF3BDxGIYwmCcoXpXth64ewKQeObALNBOaL4KJH9ZUeYmSrbdEyz3y6DWhyH9Tp05g6/5ig8orzWH90PU+c/QSdojwvfyREOJIE5c6JPfomgFs/1DcBBDgjDS7LgtHXy7ySaJMnrxnd7JijspIIZw+qylbF/Pz5DE0ayrVDrg10eEIEBUlQDR3fpSelLR/qPSWAM9Ih80/6/UqybbpoJw2XOKpjr6ggspc+z/Tq969SUlnC/AvmE2mSb1PRMXXsd75ScHQrbPtYT0xH9dJf+k+Gqc/AiKshsb+xMYqwVLBfX7Ky4aKx9lMniejald1lu/nnln9y3ZDrmNBzglEhCmG4jpeg7DbY/xXs+Ezf+K+8GND0XWkvy9I3AZR9loSfzV+6A/hxDkophaP8JKaErjz17VN0iu7EA2kPGBmiEIbrGAmqulxfamjHp7ArD6wnITIWzFPg/N/qW6V36W10lKIDeeb6MY2+VhYLymZjc80+Co4U8Mdz/khSbJJB0QkRHMIzQSmlFznsztV7Svu/0lcLj+8OI6+C4VfoySlaKqOEMRpu9Q5gP6kvfbTsxJecO+lcrhtynRFhCRFUwidBWU/r21fsztM/yvfrx7sPh7N/qSelfulgijA2TiGAb4pOAHCWuRsAtvIyAKrjIvnD2X9otpqEEB1R6CaougKH3XmwKxeKv9FXCY/qBIMu0Df8G3IJJJu9P5cQAfZC7k7gxzmoTzf+l1HA1RNuoXcnGW4WAkIxQVlOwbK5sHs5VPygH+s5Cs66F4ZkQMpZstmfCHrPThtX//mGoxtY+n0Oo4DzzpxqXFBCBJnQS1DRnaH4W70UfEiG3kvq2tfoqIRokZRu+qaE5ZZyfrf6d5zjSABOEJHgck9QITqk0EtQJhP8Ml8WYhUh7ctdx3EoB/8p/gMnqk9w4xnTgXeISOhqdGhCBI3QS1AgyUmEvL+t2MWRyMWciFzDY5Mfo8cnhzgRGelys0IhOiqT0QEI0RFdfvZBTkQuY8bwGdw4/EZqT5wgsls3NJN8SwpRR74bhAiw/JJ8/rrxGSb3mcycSXPQNI3a48eI7N7d6NCECCqSoIQIoC0ntnD/ivvpFt2X68+YS5QpCoDa48clQQnRhCQoIQJkT/ke7sm9h4ToBLqW/z/e/vJo/Tn7seNE9JAEJURDoVkkIUSIKSov4u7P7yZCi+C1S18jVvtxPzHlcFBbWio9KCGakAQlhJ9tO7GNWbmzMGkmXrv0NVK6pjQ6by8vB7udyG6SoIRoSIb4hPCjdSXr+MWyXxATGcPbl7/N0KShAORtPULe1iMA1B47BkBk926GxSlEMJIelBB+8v6u93nymyfp17kf2ZnZ9O3844onr60pAiBjZC9shw8DENWnjyFxChGsJEEJ0c7sDjsvFr7IW1ve4qw+Z/GXC/9CQkxCozZ/vzWt/nPbgYMARPWX3ZuFaEgSlBDtqKSyhLlr5rLuyDpuGn4TsyfNri8lbyi5U3T957ZDB9Hi4ohITm7WToiOTBKUEO1kefFynlj7BDX2Gp469ymuGXKN27ZLN+sr8V82ug81Bw4S3a+f7AElRBOSoIRoo+PVx5n/3Xw+2/cZI5JH8OyFzzKg6wCPj3nzq32AnqBsBw8S1a9fACIVIrRIghKilewOO+/vfp/nC57HUmvhvvH3cdfou4iKaD6k19Rrt6cDoOx2aoqL6XT22f4OV4iQIwlKiBZSSrHm0BpeKHiB3eW7SeuVxuNnP445wffdm7vG6knMWlSEsliIOfNMf4UrRMiSBCWEj+oS0z82/YPCo4WkdEnhuQufI3NAZovnj5Zs1EvLLzi4DYDYEZKghGhKEpQQXlhqLXy+/3Pe2vIWu8p20btTbx6Z/AjThk1zWaHni3e/2Q/A5NPbISqKGLPvvS8hOgpJUEK4saN0B4t3Lebjoo+pqKlgSOIQnj7vaS4fdHmrE1Odt+6YBMCR2/9G7JlnokVHe3mEEB2PJCghnJRS7CzbSV5xHnn789hdvpsoUxQZAzK4YegNTOw9EZPWPquDxUVHYK+ooPr77+l2113t8pxChBtJUKJDO2k9SX5JPt/88A1rD6/lQMUBNDRSe6Uyd9Jcrhh0BYmxie1+3ffXH6TLt6s5w26n0znntPvzCxEOJEGJDsPusLPv1D42Hd/E5uOb+f7Y92wv3Y5CERcZR3qvdH4+6udcnHIx3eP8u7L4f747wE8/+IDIXr2IT0v167WECFWSoETYcSgHh04fYu/JvRSVF1F0Uv/YXb6bSlslAJ2jOjOq2yjuHXcvZ/U9i9HdR7d5Xqkl/nFeAgde3EzivfeiRcq3oRCuyHeGCCnVtdWUWcoos5RRainlaNVRDlcepqSyhB8qf+CH0z9QUlVCraO2/jHJscmYE8xcab6SMd3HMKb7GAYmDGy3+aSWclRXc+zxx4lISCD59p8ZEoMQocBrgtI0LQtIBZKB6Uqpota0ER2LUopaVYvNbsNqt1Jjr9E/HDWNvq6urea07TSVtkq3H+XWcj0pWcuorq1udi2TZqJnfE/6dOrDmB5jyOyUSUqXFMwJZswJZr/MIbWGcjioWreOo/OfpXrrVop/8wTDEhK8P1CIDspjgtI0LRVIVUplOj/PBjJb2qa9ffLgjaAUqtFR5fLT+kOq4cGGbZWro83OuXhCF49RLq9df7BRiE0a+nAtl9dz8byai8Yun8H5GirlqP8TFA6lfvxTKRQK5enPus9ROBwOHMqOHYfbv5Pm4a9qApJMUfQwRRClRRFpiiQqIpoYUzQxEb2JjogmJiKm/s+4yFhiImIwNdp785jzo4AqpahyeSX3QSiP/xZuH+T+IVYrtiNHsO3fj/3kSSK6dWPhlf+P7aRwmfsrCdHheetBZQC5AEqpQk3T0lvTRtO0mcBMgJSUlKanW6xv7ia355Tzp7Or+/rd/mxpsApA08cpd0/mgfK5feOGvj+u+WOaxd3ilbE1NE3/+2o0flINTT+uNb6SVvf/Bsc1zYRGRP2zaJrJ2UT/r/5zTXNeUz9u0jQ0TPpxl7HbQLM1O+oAqhvG7PKv5uacp9eoNefcHY6MIqpXL2IvvZT4iel0yczkj3Fx7p9fCAF4T1DdgPy2tlFKLQAWAKSnp3v49dQ3EzZta+tTCCGECHLeZolPAA3XYHE1mO9LGyGEEKJFvCWoPJzzSc75pbxWthFCCCFaxOMQn3NOqVDTtFznoVkAmqaZgQKlVJK7NkIIIURbeC0zV0rNcXGsCEjy1EYIIYRoC2PuVBRCCCG8kAQlhBAiKEmCEkIIEZQkQQkhhAhKkqCEEEIEJUlQQgghgpLmcWFMf1xQ044B+9vhqboDx9vheQJF4vW/UItZ4vWvUIsXQi/m9op3gFKqR9ODAU9Q7UXTtHVKKVeL1wYlidf/Qi1mide/Qi1eCL2Y/R2vDPEJIYQISpKghBBCBKVQTlALjA6ghSRe/wu1mCVe/wq1eCH0YvZrvCE7ByWEECK8hXIPSgghRBiTBCWEECIohUyC0jRtpqZpsz2cz9I0LVfTtALnflUej/s5Vo/X1DRttvNc3Yeqa6dpWlmD49mBiNeXmD3FFoyvsbNNtrPNHk3Tpnn7exgQX8i8Z51tDHs92xBzyLxng+3nQlD8zFVKBf0HkAsoYLab86lArq+f+znWFl0TMAOLmn4e4NfXa8zuYgvW1xjIALKdnycCZYF6jX2ML6Tes0a+nm2IOaTes+5iN+I1DpafuSHRg1JKZeJ5p94M9BcUpVQhkO7luD+19JrZwN3Oz82AWdO0Rc7fQFL9F2YjvsTsdYoTAwAAAcpJREFULrZgfY2LgCxnm3Kg1Hk8EK+xL/GF2nvWyNfTlXB8zzZk6M+FYPmZGxIJygfd0L+BfD1uRCzNOIdJcp3f8KB/089TSk0H5gCL/BNiM77E7C62oHyNlVJFSqkiTdPMmqYV4PzhSmBeY19ek5B6zxr8eroSdu/ZOkH0c8GTgLx/vW75Hgiaps10cbhIKZXn41OcQP8to06il+Nt4iXellxzLnBJ3RfO3zgK6z7XNC1Z07TEBm9Uw2J2F5svjzUiXudzzAZmAHc74/fra9yAL/EF9D3rRbC/nq2KOdDvWS+C8udCGwTm/RvIcc02jonOJIzmoJz/iAVNjs2u+zs6z+8J0Gvry3i+y9iC9TXGOdTg69/DgPhC6j1r5OvZhphD6j3bIM5g+blg+M/coOhBtYazOqRAKZWk9N8qCjVNy3WengX1v200O+5P7q7ZMF7n8WnAwiaPne8cZy5wHpru73h9jdldbEH8GmcC6Zqm7WnwuMGBeI19fD1D7T1r2OvZ2phD8D0LQfRzoSkj3r+ykoQQQoigFC5FEkIIIcKMJCghhBBBSRKUEEKIoCQJSgghRFCSBCWEECIoSYISQggRlCRBCSGECEr/H/lmezK+zJw8AAAAAElFTkSuQmCC\n", 80 | "text/plain": [ 81 | "
" 82 | ] 83 | }, 84 | "metadata": { 85 | "needs_background": "light" 86 | }, 87 | "output_type": "display_data" 88 | } 89 | ], 90 | "source": [ 91 | "x = np.linspace(-1, 1, 1000)\n", 92 | "plt.plot([0, 0], [0, 1], ':')\n", 93 | "for temp in [0.5, 0.1, 0.01]:\n", 94 | " plt.plot(x, np.exp(-smoothed_log_indicator(torch.tensor(x), temp)).numpy(), label=f'$\\\\tau$={temp}')\n", 95 | "plt.legend(fontsize=16)\n", 96 | "plt.tight_layout()\n", 97 | "plt.savefig('images/smoothed_indicator.pdf')" 98 | ] 99 | } 100 | ], 101 | "metadata": { 102 | "kernelspec": { 103 | "display_name": "Python 3", 104 | "language": "python", 105 | "name": "python3" 106 | }, 107 | "language_info": { 108 | "codemirror_mode": { 109 | "name": "ipython", 110 | "version": 3 111 | }, 112 | "file_extension": ".py", 113 | "mimetype": "text/x-python", 114 | "name": "python", 115 | "nbconvert_exporter": "python", 116 | "pygments_lexer": "ipython3", 117 | "version": "3.6.9" 118 | } 119 | }, 120 | "nbformat": 4, 121 | "nbformat_minor": 2 122 | } 123 | -------------------------------------------------------------------------------- /images/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/.DS_Store -------------------------------------------------------------------------------- /images/kernels.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/kernels.pdf -------------------------------------------------------------------------------- /images/mnist/latent_N_N.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/mnist/latent_N_N.png -------------------------------------------------------------------------------- /images/mnist/latent_U_U.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/mnist/latent_U_U.png -------------------------------------------------------------------------------- /images/moses_FCD.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/moses_FCD.pdf -------------------------------------------------------------------------------- /images/moses_SNN.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/moses_SNN.pdf -------------------------------------------------------------------------------- /images/smoothed_indicator.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/smoothed_indicator.pdf -------------------------------------------------------------------------------- /images/synthetic/N_N.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/synthetic/N_N.png -------------------------------------------------------------------------------- /images/synthetic/U_T.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/synthetic/U_T.png -------------------------------------------------------------------------------- /images/synthetic/U_U.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/synthetic/U_U.png -------------------------------------------------------------------------------- /images/zinc/DD_VAE_GAUSSIAN_molecule_0.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/DD_VAE_GAUSSIAN_molecule_0.pdf -------------------------------------------------------------------------------- /images/zinc/DD_VAE_GAUSSIAN_molecule_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/DD_VAE_GAUSSIAN_molecule_1.pdf -------------------------------------------------------------------------------- /images/zinc/DD_VAE_GAUSSIAN_molecule_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/DD_VAE_GAUSSIAN_molecule_2.pdf -------------------------------------------------------------------------------- /images/zinc/DD_VAE_GAUSSIAN_top50_molecules.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/DD_VAE_GAUSSIAN_top50_molecules.pdf -------------------------------------------------------------------------------- /images/zinc/DD_VAE_TRICUBE_molecule_0.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/DD_VAE_TRICUBE_molecule_0.pdf -------------------------------------------------------------------------------- /images/zinc/DD_VAE_TRICUBE_molecule_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/DD_VAE_TRICUBE_molecule_1.pdf -------------------------------------------------------------------------------- /images/zinc/DD_VAE_TRICUBE_molecule_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/DD_VAE_TRICUBE_molecule_2.pdf -------------------------------------------------------------------------------- /images/zinc/DD_VAE_TRICUBE_top50_molecules.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/DD_VAE_TRICUBE_top50_molecules.pdf -------------------------------------------------------------------------------- /images/zinc/VAE_GAUSSIAN_molecule_0.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/VAE_GAUSSIAN_molecule_0.pdf -------------------------------------------------------------------------------- /images/zinc/VAE_GAUSSIAN_molecule_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/VAE_GAUSSIAN_molecule_1.pdf -------------------------------------------------------------------------------- /images/zinc/VAE_GAUSSIAN_molecule_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/VAE_GAUSSIAN_molecule_2.pdf -------------------------------------------------------------------------------- /images/zinc/VAE_GAUSSIAN_top50_molecules.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/VAE_GAUSSIAN_top50_molecules.pdf -------------------------------------------------------------------------------- /images/zinc/VAE_TRICUBE_molecule_0.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/VAE_TRICUBE_molecule_0.pdf -------------------------------------------------------------------------------- /images/zinc/VAE_TRICUBE_molecule_1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/VAE_TRICUBE_molecule_1.pdf -------------------------------------------------------------------------------- /images/zinc/VAE_TRICUBE_molecule_2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/VAE_TRICUBE_molecule_2.pdf -------------------------------------------------------------------------------- /images/zinc/VAE_TRICUBE_top50_molecules.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/insilicomedicine/DD-VAE/13498d098bae2c8177abec61ab80d8b618d274f3/images/zinc/VAE_TRICUBE_top50_molecules.pdf -------------------------------------------------------------------------------- /moses_prepare_metrics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Distribution learning on MOSES dataset: calculating metrics\n", 8 | "\n", 9 | "You can calculate metrics from checkpoints using this notebook. Note that training the models takes ~30h per model on Titan X (Pascal); computing MOSES metrics for checkpoints takes ~40h per model.\n", 10 | "\n", 11 | "To reproduce the models and statistics, run the following bash script:\n", 12 | "```{bash}\n", 13 | "for SEED in 1 2 3\n", 14 | "do\n", 15 | " for PROPOSAL in gaussian triweight\n", 16 | " do\n", 17 | " python train.py --config configs/moses/VAE_$PROPOSAL\\_seed$SEED.ini --device cuda:0\n", 18 | " python train.py --config configs/moses/DD-VAE_$PROPOSAL\\_seed$SEED.ini --device cuda:0\n", 19 | " done\n", 20 | "done\n", 21 | "```\n", 22 | "\n", 23 | "This script will save models into `models/moses` folder and tensorboard logs into `logs/moses` folder.\n", 24 | "\n", 25 | "The notebook below will create files with all MOSES metrics for each checkpoint. `moses_plots.ipynb` will use logs and MOSES metrics to build final plots." 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 1, 31 | "metadata": {}, 32 | "outputs": [ 33 | { 34 | "name": "stderr", 35 | "output_type": "stream", 36 | "text": [ 37 | "RDKit WARNING: [11:32:06] Enabling RDKit 2019.09.3 jupyter extensions\n" 38 | ] 39 | } 40 | ], 41 | "source": [ 42 | "import os\n", 43 | "import glob\n", 44 | "import pickle\n", 45 | "import gc\n", 46 | "from time import sleep\n", 47 | "\n", 48 | "import rdkit\n", 49 | "import pandas as pd\n", 50 | "from tqdm.auto import tqdm\n", 51 | "import numpy as np\n", 52 | "import torch\n", 53 | "from moses.metrics import get_all_metrics\n", 54 | "\n", 55 | "from dd_vae.vae_rnn import VAE_RNN\n", 56 | "from dd_vae.utils import prepare_seed\n", 57 | "\n", 58 | "rdkit.rdBase.DisableLog('rdApp.*')" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": 2, 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "DEVICE = 'cuda:0'\n", 68 | "N_JOBS = 32\n", 69 | "\n", 70 | "def load_csv(path):\n", 71 | " df = pd.read_csv(path, compression='gzip', dtype='str', header=None)\n", 72 | " return list(df[0].values)" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 3, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "test = load_csv('data/moses/test.csv.gz')\n", 82 | "test_scaffolds = load_csv('data/moses/test_scaffolds.csv.gz')\n", 83 | "train = load_csv('data/moses/train.csv.gz')\n", 84 | "\n", 85 | "test_stats = np.load('data/moses/test_stats.npz', allow_pickle=True)['stats'].item()\n", 86 | "test_scaffold_stats = np.load('data/moses/test_scaffolds_stats.npz', allow_pickle=True)['stats'].item()" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": 4, 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "def prepare_metrics(name, checkpoint_id, overwrite=False, device='cpu', n_jobs=1):\n", 96 | " path = f'models/moses/{name}/checkpoint_{checkpoint_id}.pt'\n", 97 | " output_path = f'metrics/{name}/{checkpoint_id}.pkl'\n", 98 | " os.makedirs(f'metrics/{name}/', exist_ok=True)\n", 99 | " if os.path.exists(output_path) and not overwrite:\n", 100 | " raise ValueError(f\"Metrics file {output_path} already exists\")\n", 101 | " model = VAE_RNN.load(path).to(device)\n", 102 | " prepare_seed(1)\n", 103 | " with torch.no_grad():\n", 104 | " smiles = sum([model.sample(100) for _ in tqdm(range(300))], [])\n", 105 | " model.to(device)\n", 106 | " del model\n", 107 | " torch.cuda.empty_cache()\n", 108 | " gc.collect()\n", 109 | " torch.cuda.empty_cache()\n", 110 | " if device == 'cpu':\n", 111 | " gpu = -1\n", 112 | " else:\n", 113 | " gpu = int(device.split(':')[1])\n", 114 | "\n", 115 | " metrics = get_all_metrics(\n", 116 | " test=test, gen=smiles,\n", 117 | " test_scaffolds=test_scaffolds, gpu=gpu, n_jobs=n_jobs,\n", 118 | " ptest=test_stats,\n", 119 | " ptest_scaffolds=test_scaffold_stats,\n", 120 | " train=train)\n", 121 | "\n", 122 | " with open(output_path, 'wb') as f:\n", 123 | " pickle.dump(metrics, f)\n", 124 | " torch.cuda.empty_cache()\n", 125 | " gc.collect()\n", 126 | " torch.cuda.empty_cache()" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": 5, 132 | "metadata": {}, 133 | "outputs": [], 134 | "source": [ 135 | "def get_epoch_id(path):\n", 136 | " try:\n", 137 | " return int(path.split('_')[-1][:-3])\n", 138 | " except ValueError:\n", 139 | " return None" 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": 6, 145 | "metadata": { 146 | "scrolled": false 147 | }, 148 | "outputs": [], 149 | "source": [ 150 | "checkpoints = glob.glob('models/moses/*/*.pt')\n", 151 | "checkpoints = [x for x in checkpoints if get_epoch_id(x) is not None]\n", 152 | "for checkpoint in checkpoints:\n", 153 | " try:\n", 154 | " epoch_id = int(checkpoint.split('_')[-1][:-3])\n", 155 | " except ValueError:\n", 156 | " continue\n", 157 | " config_id = checkpoint.split('/')[-2]\n", 158 | " print(f\"Processing {checkpoint}\")\n", 159 | " try:\n", 160 | " prepare_metrics(config_id, epoch_id, device=DEVICE, n_jobs=N_JOBS)\n", 161 | " except ValueError:\n", 162 | " pass" 163 | ] 164 | } 165 | ], 166 | "metadata": { 167 | "kernelspec": { 168 | "display_name": "Python 3", 169 | "language": "python", 170 | "name": "python3" 171 | }, 172 | "language_info": { 173 | "codemirror_mode": { 174 | "name": "ipython", 175 | "version": 3 176 | }, 177 | "file_extension": ".py", 178 | "mimetype": "text/x-python", 179 | "name": "python", 180 | "nbconvert_exporter": "python", 181 | "pygments_lexer": "ipython3", 182 | "version": "3.6.9" 183 | } 184 | }, 185 | "nbformat": 4, 186 | "nbformat_minor": 2 187 | } 188 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | 4 | setup( 5 | name='dd_vae', 6 | packages=find_packages(), 7 | python_requires='>=3.5.0', 8 | version='0.1', 9 | install_requires=[ 10 | 'tqdm', 'numpy', 11 | 'pandas', 'scipy', 12 | 'torch', 'networkx', 13 | 'Theano' 14 | ], 15 | description=('DD-VAE'), 16 | ) 17 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import ast 3 | import sys 4 | from functools import partial 5 | from configparser import ConfigParser 6 | from tqdm import tqdm 7 | import os 8 | import pandas as pd 9 | 10 | import torch 11 | from torch.utils.data import DataLoader 12 | from tensorboardX import SummaryWriter 13 | from torchvision.datasets import MNIST 14 | 15 | from dd_vae.vae_mnist import VAE_MNIST 16 | from dd_vae.vae_rnn import VAE_RNN 17 | from dd_vae.utils import CharVocab, collate, StringDataset, \ 18 | LinearGrowth, combine_loss, prepare_seed 19 | from torchvision import transforms 20 | from torch.optim.lr_scheduler import MultiStepLR, StepLR 21 | 22 | 23 | def parse_args(args): 24 | parser = argparse.ArgumentParser() 25 | parser.add_argument('--config', 26 | type=str, required=True, 27 | help='Path to configuration file') 28 | parser.add_argument('--device', 29 | type=str, required=True, default='cpu', 30 | help='Training device') 31 | parsed_args, errors = parser.parse_known_args(args[1:]) 32 | if len(errors) != 0: 33 | raise ValueError(f"Unknown arguments {errors}") 34 | return parsed_args 35 | 36 | 37 | def infer_config_types(parameters): 38 | return dict({ 39 | k: ast.literal_eval(v) 40 | for k, v in parameters.items() 41 | }) 42 | 43 | 44 | def parse_config(config_path): 45 | config = ConfigParser() 46 | paths = config.read(config_path) 47 | if len(paths) == 0: 48 | raise ValueError(f"Config file {config_path} does not exist") 49 | 50 | infered_config = { 51 | k: infer_config_types(v) 52 | for k, v in config.items() 53 | } 54 | return infered_config 55 | 56 | 57 | def add_dict(left, right): 58 | for key, value in right.items(): 59 | left[key] = left.get(key, 0) + value.item() 60 | 61 | 62 | def train_epoch(model, loss_weights, epoch, data_loader, 63 | backward, temperature, logger, 64 | optimizer, verbose=True, clamp=None, fine_tune=False): 65 | if backward: 66 | label = '/train' 67 | else: 68 | label = '/test' 69 | total_loss = {} 70 | iterations = 0 71 | for batch in tqdm( 72 | data_loader, postfix=f'Epoch {epoch} {label}', 73 | disable=not verbose): 74 | iterations += 1 75 | loss_components = model.get_loss_components(batch, temperature) 76 | loss = combine_loss(loss_components, loss_weights) 77 | loss_components['loss'] = loss 78 | add_dict(total_loss, loss_components) 79 | if backward: 80 | optimizer['encoder'].zero_grad() 81 | optimizer['decoder'].zero_grad() 82 | loss.backward() 83 | if clamp is not None: 84 | for param in model.parameters(): 85 | param.grad.clamp_(-clamp, clamp) 86 | if not fine_tune: 87 | optimizer['encoder'].step() 88 | optimizer['decoder'].step() 89 | 90 | for key, value in total_loss.items(): 91 | logger.add_scalar(key + label, 92 | value / iterations, 93 | global_step=epoch) 94 | 95 | 96 | def prepare_mnist(config): 97 | transform = transforms.Compose([ 98 | transforms.ToTensor(), 99 | transforms.Lambda(lambda x: (x > 0.3).float()) 100 | ]) 101 | train_dataset = MNIST('data/mnist/', train=True, 102 | download=True, transform=transform) 103 | test_dataset = MNIST('data/mnist/', train=False, 104 | transform=transform) 105 | batch_size = config['train']['batch_size'] 106 | train_loader = DataLoader( 107 | train_dataset, batch_size=batch_size, shuffle=True) 108 | test_loader = DataLoader( 109 | test_dataset, batch_size=batch_size, shuffle=False) 110 | if 'load' in config['model']: 111 | model = VAE_MNIST.load(config['model']['load']) 112 | else: 113 | model = VAE_MNIST(**config['model']) 114 | return train_loader, test_loader, model 115 | 116 | 117 | def load_csv(path): 118 | if path.endswith('.csv'): 119 | return [x.strip() for x in open(path)] 120 | if path.endswith('.csv.gz'): 121 | df = pd.read_csv(path, compression='gzip', 122 | dtype='str', header=None) 123 | return list(df[0].values) 124 | raise ValueError("Unknown format") 125 | 126 | 127 | def prepare_rnn(config): 128 | data_config = config['data'] 129 | train_config = config['train'] 130 | train_data = load_csv(data_config['train_path']) 131 | vocab = CharVocab.from_data(train_data) 132 | if 'load' in config['model']: 133 | print("LOADING") 134 | model = VAE_RNN.load(config['model']['load']) 135 | vocab = model.vocab 136 | else: 137 | model = VAE_RNN(vocab=vocab, **config['model']) 138 | collate_pad = partial(collate, pad=vocab.pad) 139 | train_dataset = StringDataset(vocab, train_data) 140 | train_loader = DataLoader( 141 | train_dataset, collate_fn=collate_pad, 142 | batch_size=train_config['batch_size'], shuffle=True) 143 | if 'test_path' in data_config: 144 | test_data = load_csv(data_config['test_path']) 145 | test_dataset = StringDataset(vocab, test_data) 146 | test_loader = DataLoader( 147 | test_dataset, collate_fn=collate_pad, 148 | batch_size=train_config['batch_size']) 149 | else: 150 | test_loader = None 151 | return train_loader, test_loader, model 152 | 153 | 154 | def train(config_path, device): 155 | """ 156 | Trains a deterministic VAE model. 157 | 158 | Parameters: 159 | config_path: path to .ini file with model configuration 160 | device: device for training ('cpu' for CPU, 'cuda:n' for GPU #n) 161 | train_data: list of train dataset strings 162 | test_data: list of test dataset strings 163 | """ 164 | config = parse_config(config_path) 165 | prepare_seed(seed=config['train'].get('seed', 777)) 166 | 167 | data_config = config['data'] 168 | if data_config['title'].lower() == 'mnist': 169 | train_loader, test_loader, model = prepare_mnist(config) 170 | else: 171 | train_loader, test_loader, model = prepare_rnn(config) 172 | 173 | model = model.to(device) 174 | 175 | train_config = config['train'] 176 | save_config = config['save'] 177 | kl_config = config['kl'] 178 | temperature_config = config['temperature'] 179 | model_dir = save_config['model_dir'] 180 | os.makedirs(model_dir, exist_ok=True) 181 | os.makedirs(save_config['log_dir'], exist_ok=True) 182 | 183 | optimizer = { 184 | 'encoder': torch.optim.Adam(model.encoder_parameters(), 185 | lr=train_config['lr']), 186 | 'decoder': torch.optim.Adam(model.decoder_parameters(), 187 | lr=train_config['lr']) 188 | } 189 | scheduler_class = ( 190 | MultiStepLR 191 | if isinstance(train_config['lr_reduce_epochs'], (list, tuple)) 192 | else StepLR 193 | ) 194 | scheduler = { 195 | 'encoder': scheduler_class( 196 | optimizer['encoder'], 197 | train_config['lr_reduce_epochs'], 198 | train_config['lr_reduce_gamma']), 199 | 'decoder': scheduler_class( 200 | optimizer['decoder'], 201 | train_config['lr_reduce_epochs'], 202 | train_config['lr_reduce_gamma']) 203 | } 204 | 205 | logger = SummaryWriter(save_config['log_dir']) 206 | 207 | kl_weight = LinearGrowth(**kl_config) 208 | temperature = LinearGrowth(**temperature_config) 209 | epoch_verbose = train_config.get('verbose', None) == 'epoch' 210 | batch_verbose = not epoch_verbose 211 | 212 | pretrain = train_config.get('pretrain', 0) 213 | if pretrain != 0: 214 | pretrain_weight = LinearGrowth(0, 1, 0, pretrain) 215 | fine_tune = train_config.get('fune_tune', 0) 216 | for epoch in tqdm(range(train_config['epochs'] + pretrain + fine_tune), 217 | disable=not epoch_verbose): 218 | fine_tune = epoch >= train_config['epochs'] + pretrain 219 | current_temperature = temperature(epoch) 220 | if epoch < pretrain: 221 | w = pretrain_weight(epoch) 222 | loss_weights = {'argmax_nll': w, 223 | 'sample_nll': 1 - w} 224 | elif train_config['mode'] == 'argmax': 225 | loss_weights = {'argmax_nll': 1} 226 | logger.add_scalar('temperature', current_temperature, epoch) 227 | else: 228 | loss_weights = {'sample_nll': 1} 229 | loss_weights['kl_loss'] = kl_weight(epoch) 230 | logger.add_scalar('kl_weight', loss_weights['kl_loss'], epoch) 231 | 232 | scheduler['encoder'].step() 233 | scheduler['decoder'].step() 234 | 235 | train_epoch( 236 | model, loss_weights, epoch, train_loader, True, 237 | current_temperature, logger, 238 | optimizer, batch_verbose, 239 | clamp=train_config.get('clamp'), 240 | fine_tune=fine_tune 241 | ) 242 | 243 | if test_loader is not None: 244 | with torch.no_grad(): 245 | train_epoch( 246 | model, loss_weights, epoch, test_loader, False, 247 | current_temperature, logger, 248 | optimizer, batch_verbose, 249 | clamp=train_config.get('clamp'), 250 | fine_tune=fine_tune 251 | ) 252 | if train_config.get("checkpoint", "epoch") == "epoch": 253 | path = f"{model_dir}/checkpoint_{epoch+1}.pt" 254 | else: 255 | path = f"{model_dir}/checkpoint.pt" 256 | model.save(path) 257 | 258 | model.save(f"{model_dir}/checkpoint.pt") 259 | logger.close() 260 | 261 | 262 | if __name__ == "__main__": 263 | parsed_args = parse_args(sys.argv) 264 | train(parsed_args.config, parsed_args.device) 265 | -------------------------------------------------------------------------------- /unit_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | import numpy as np 3 | import torch 4 | 5 | from dd_vae.proposals import get_proposals 6 | 7 | 8 | class TestUtils(TestCase): 9 | def test_kl(self): 10 | np.random.seed(0) 11 | batch_size = 100000 12 | m = np.random.randn(32, 1)/3 13 | s = np.exp(np.random.randn(32, 1))/3 14 | m_rep = torch.tensor(np.tile(m, (1, batch_size))) 15 | s_rep = torch.tensor(np.tile(s, (1, batch_size))) 16 | m_t = torch.tensor(m) 17 | s_t = torch.tensor(s) 18 | for name, proposal_class in get_proposals().items(): 19 | with self.subTest(name=name): 20 | proposal = proposal_class() 21 | samples = proposal.sample(m_rep, s_rep) 22 | density = proposal.density((samples-m_rep) / s_rep) / s_rep 23 | kl = proposal.kl(m_t, s_t) 24 | kl_mc = ( 25 | np.log(density * np.sqrt(2 * np.pi)) + 26 | samples**2 / 2 27 | ).mean(1) 28 | self.assertLess( 29 | torch.abs(kl - kl_mc).max().item(), 0.05, 30 | f"Failed proposal {name} for Gaussian prior" 31 | ) 32 | 33 | def test_kl_uniform(self): 34 | np.random.seed(0) 35 | batch_size = 100000 36 | m = np.random.randn(32, 1)/3 37 | s = np.exp(np.random.randn(32, 1))/3 38 | m_rep = torch.tensor(np.tile(m, (1, batch_size))) 39 | s_rep = torch.tensor(np.tile(s, (1, batch_size))) 40 | m_t = torch.tensor(m) 41 | s_t = torch.tensor(s) 42 | for name, proposal_class in get_proposals().items(): 43 | if name == 'gaussian': 44 | continue 45 | with self.subTest(name=name): 46 | proposal = proposal_class() 47 | samples = proposal.sample(m_rep, s_rep) 48 | density = proposal.density((samples-m_rep) / s_rep) / s_rep 49 | kl = proposal.kl_uniform(m_t, s_t) 50 | kl_mc = np.log(density * 2).mean(1) 51 | self.assertLess( 52 | torch.abs(kl - kl_mc).max().item(), 0.05, 53 | f"Failed proposal {name} for uniform prior" 54 | ) 55 | --------------------------------------------------------------------------------