├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── examples ├── example_filter.py └── timeseries.ipynb ├── imgs └── particle_equations.png ├── pfilter ├── __init__.py └── pfilter.py ├── requirements.txt ├── setup.cfg ├── setup.py └── tests └── test_filter.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | .vscode 34 | 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *,cover 49 | .hypothesis/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | local_settings.py 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # IPython Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # dotenv 82 | .env 83 | 84 | # virtualenv 85 | venv/ 86 | ENV/ 87 | 88 | # Spyder project settings 89 | .spyderproject 90 | 91 | # Rope project settings 92 | .ropeproject 93 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | # Setting sudo to false opts in to Travis-CI container-based builds. 4 | sudo: false 5 | 6 | python: 7 | - 3.6 8 | 9 | before_install: 10 | - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh 11 | - chmod +x miniconda.sh 12 | - ./miniconda.sh -b -p $HOME/miniconda 13 | - export PATH=/home/travis/miniconda/bin:$PATH 14 | - conda update --yes conda 15 | 16 | install: 17 | - conda create --yes -n test python=$TRAVIS_PYTHON_VERSION 18 | - source activate test 19 | - pip install -U pip 20 | - pip install -r requirements.txt 21 | - python setup.py install 22 | 23 | script: pytest 24 | 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pfilter 2 | Basic Python particle filter. Plain SIR filtering, with various resampling algorithms. Written to be simple and clear; not necessarily most efficient or most flexible implementation. Depends on [NumPy](http://numpy.org) only. 3 | 4 | ## Uses 5 | 6 | This repo is useful for understanding how a particle filter works, or a quick way to develop a custom filter of your own from a relatively simple codebase. 7 | ### Alternatives 8 | There are more mature and sophisticated packages for probabilistic filtering in Python (especially for Kalman filtering) if you want an off-the-shelf solution: 9 | 10 | #### Particle filtering 11 | * [particles](https://github.com/nchopin/particles) Extensive particle filtering, including smoothing and quasi-SMC algorithms 12 | * [FilterPy](https://github.com/rlabbe/filterpy) Provides extensive Kalman filtering and basic particle filtering. 13 | * [pyfilter](https://github.com/tingiskhan/pyfilter) provides Unscented Kalman Filtering, Sequential Importance Resampling and Auxiliary Particle Filter models, and has a number of advanced algorithms implemented, with PyTorch backend. 14 | 15 | #### Kalman filtering 16 | * [pykalman](https://github.com/pykalman/pykalman) Easy to use Kalman Filter, Extended Kalman Filter and Unscented Kalman Filter implementations 17 | * [simdkalman](https://github.com/oseiskar/simdkalman) Fast implmentations of plain Kalman filter banks. 18 | * [torch-kalman](https://github.com/strongio/torch-kalman) PyTorch implementation of Kalman filters, including Pandas dataframe support. 19 | 20 | 21 | ## Installation 22 | 23 | Available via PyPI: 24 | 25 | pip install pfilter 26 | 27 | Or install the git version: 28 | 29 | pip install git+https://github.com/johnhw/pfilter.git 30 | 31 | ## Usage 32 | Create a `ParticleFilter` object, then call `update(observation)` with an observation array to update the state of the particle filter. 33 | 34 | Calling `update()` without an observation will update the model without any data, i.e. perform a prediction step only. 35 | 36 | ### Model 37 | 38 | * Internal state space of `d` dimensions 39 | * Observation space of `h` dimensions 40 | * `n` particles estimating state in each time step 41 | 42 | Particles are represented as an `(n,d)` matrix of states, one state per row. Observations are generated from this matrix into an `(n,h)` matrix of hypothesized observations via the observation function. 43 | 44 | ### Functions 45 | You need to specify at the minimum: 46 | 47 | * an **observation function** `observe_fn(state (n,d)) => observation matrix (n,h)` which will return a predicted observation for an internal state. 48 | * a function that samples from an **initial distributions** `prior_fn => state matrix (n,d)` for all of the internal state variables. These are usually distributions from `scipy.stats`. The utility function `independent_sample` makes it easy to concatenate sampling functions to sample the whole state vector. 49 | * a **weight function** `weight_fn(hyp_observed (n,h), real_observed (h,)) => weight vector (n,)` which specifies how well each of the `hyp_observed` arrays match the real observation `real_observed`. This must produce a strictly positive weight value for each hypothesized observation, where larger means more similar. This is often an RBF kernel or similar. 50 | 51 | 52 | 53 | --- 54 | 55 | Typically, you would also specify: 56 | * **dynamics** a function `dynamics_fn(state (n,d)) => predicted_state (n,d)` to update the state based on internal (forward prediction) dynamics, and a 57 | * **diffusion** a function `noise_fn(predicted_state (n,d)) => noisy_state (n,d)` to add diffusion into the sampling process (though you could also merge into the dynamics). 58 | 59 | --- 60 | 61 | You might also specify: 62 | 63 | * **Internal weighting** a function `internal_weight_fn(state (n,d)) => weight vector (n,)` which provides a weighting to apply on top of the weight function based on *internal* state. This is useful to impose penalties or to include learned inverse models in the inference. 64 | * **Post-processing transform function** a function `transform_fn(state (n,d), weights (n,)) => states(n, k)` which can apply a post-processing transform and store the result in `transformed_particles` 65 | 66 | #### Missing observations 67 | If you want to be able to deal with partial missing values in the observations, the weight function should support masked arrays. The `squared_error(a,b)` function in `pfilter.py` does this, for example. 68 | 69 | ### Passing values to functions 70 | 71 | Sometimes it is useful to pass inputs to callback functions like `dynamics_fn(x)` at each time step. You can do this by giving keyword arguments to `update()`. 72 | 73 | If you call `pf.update(y, t=5)` **all** of the functions `dynamics_fn, weight_fn, noise_fn, internal_weight_fn, observe_fn` will receive the keyword argument `t=5`. ALl `kwargs` are forwarded to these calls. You can just ignore them if not used (e.g. define `dynamics_fn = lambda x, **kwargs: real_dynamics(x)`) but this can be useful for propagating inputs that are neither internal states nor observed states to the filter. If no `kwargs` are given to `update`, then no extra arguments are passed to any of callbacks. 74 | 75 | ## Attributes 76 | 77 | The `ParticleFilter` object will have the following useful attributes after updating: 78 | 79 | * `original_particles` the `(n,d)` collection of particles in the last update step 80 | * `mean_state` the `(d,)` expectation of the state 81 | * `mean_hypothesized` the `(h,)` expectation of the hypothesized observations 82 | * `cov_state` the `(d,d)` covariance matrix of the state 83 | * `map_state` the `(d,)` most likely state 84 | * `map_hypothesized` the `(h,)` most likely hypothesized observation 85 | * `weights` the `(n,)` normalised weights of each particle 86 | 87 | ## In equations 88 | 89 | 90 | ![](imgs/particle_equations.png) 91 | 92 | ### Example 93 | 94 | For example, assuming we observe 32x32 images and want to track a moving circle. Assume the internal state we are estimating is the 4D vector (x, y, dx, dy), with 200 particles 95 | 96 | ```python 97 | from pfilter import ParticleFilter, gaussian_noise, squared_error, independent_sample 98 | columns = ["x", "y", "radius", "dx", "dy"] 99 | from scipy.stats import norm, gamma, uniform 100 | 101 | # prior sampling function for each variable 102 | # (assumes x and y are coordinates in the range 0-32) 103 | prior_fn = independent_sample([uniform(loc=0, scale=32).rvs, 104 | uniform(loc=0, scale=32).rvs, 105 | gamma(a=2,loc=0,scale=10).rvs, 106 | norm(loc=0, scale=0.5).rvs, 107 | norm(loc=0, scale=0.5).rvs]) 108 | 109 | # very simple linear dynamics: x += dx 110 | def velocity(x): 111 | xp = np.array(x) 112 | xp[0:2] += xp[3:5] 113 | return xp 114 | 115 | # create the filter 116 | pf = pfilter.ParticleFilter( 117 | prior_fn=prior_fn, 118 | observe_fn=blob, 119 | n_particles=200, 120 | dynamics_fn=velocity, 121 | noise_fn=lambda x: 122 | gaussian_noise(x, sigmas=[0.2, 0.2, 0.1, 0.05, 0.05]), 123 | weight_fn=lambda x,y:squared_error(x, y, sigma=2), 124 | resample_proportion=0.1, 125 | column_names = columns) 126 | 127 | # assuming image of the same dimensions/type as blob will produce 128 | pf.update(image) 129 | ``` 130 | 131 | 132 | * `blob` (200, 4) -> (200, 1024) which draws a blob on an image of size 32x32 (1024 pixels) for each internal state, our observation function 133 | * `velocity` (200, 4) -> (200, 4), our dynamics function, which just applies a single Euler step integrating the velocity 134 | * `prior_fn` which generates a (200,4) initial random state 135 | * `gaussian_noise` (200, 4) -> (200,4) which adds noise to the internal state 136 | * `squared_error` ((200,1024), (1024,)) -> (200,) the similarity measurement 137 | 138 | 139 | See the notebook [examples/example_filter.py](examples/test_filter.py) for a working example using `skimage` and `OpenCV` which tracks a moving white circle. 140 | 141 | 142 | 143 | 144 | 145 | --- 146 | 168 | -------------------------------------------------------------------------------- /examples/example_filter.py: -------------------------------------------------------------------------------- 1 | # %% 2 | # press ESC to exit the demo! 3 | from pfilter import ( 4 | ParticleFilter, 5 | gaussian_noise, 6 | cauchy_noise, 7 | t_noise, 8 | squared_error, 9 | independent_sample, 10 | ) 11 | import numpy as np 12 | 13 | # testing only 14 | from scipy.stats import norm, gamma, uniform 15 | import skimage.draw 16 | import cv2 17 | 18 | 19 | img_size = 48 20 | 21 | 22 | def blob(x): 23 | """Given an Nx3 matrix of blob positions and size, 24 | create N img_size x img_size images, each with a blob drawn on 25 | them given by the value in each row of x 26 | 27 | One row of x = [x,y,radius].""" 28 | y = np.zeros((x.shape[0], img_size, img_size)) 29 | for i, particle in enumerate(x): 30 | rr, cc = skimage.draw.circle( 31 | particle[0], particle[1], max(particle[2], 1), shape=(img_size, img_size) 32 | ) 33 | y[i, rr, cc] = 1 34 | return y 35 | 36 | 37 | # %% 38 | 39 | # names (this is just for reference for the moment!) 40 | columns = ["x", "y", "radius", "dx", "dy"] 41 | 42 | 43 | # prior sampling function for each variable 44 | # (assumes x and y are coordinates in the range 0-img_size) 45 | prior_fn = independent_sample( 46 | [ 47 | norm(loc=img_size / 2, scale=img_size / 2).rvs, 48 | norm(loc=img_size / 2, scale=img_size / 2).rvs, 49 | gamma(a=1, loc=0, scale=10).rvs, 50 | norm(loc=0, scale=0.5).rvs, 51 | norm(loc=0, scale=0.5).rvs, 52 | ] 53 | ) 54 | 55 | # very simple linear dynamics: x += dx 56 | def velocity(x): 57 | dt = 1.0 58 | xp = ( 59 | x 60 | @ np.array( 61 | [ 62 | [1, 0, 0, dt, 0], 63 | [0, 1, 0, 0, dt], 64 | [0, 0, 1, 0, 0], 65 | [0, 0, 0, 1, 0], 66 | [0, 0, 0, 0, 1], 67 | ] 68 | ).T 69 | ) 70 | 71 | return xp 72 | 73 | 74 | def example_filter(): 75 | # create the filter 76 | pf = ParticleFilter( 77 | prior_fn=prior_fn, 78 | observe_fn=blob, 79 | n_particles=100, 80 | dynamics_fn=velocity, 81 | noise_fn=lambda x: t_noise(x, sigmas=[0.15, 0.15, 0.05, 0.05, 0.15], df=100.0), 82 | weight_fn=lambda x, y: squared_error(x, y, sigma=2), 83 | resample_proportion=0.05, 84 | column_names=columns, 85 | ) 86 | 87 | # np.random.seed(2018) 88 | # start in centre, random radius 89 | s = np.random.uniform(2, 8) 90 | 91 | # random movement direction 92 | dx = np.random.uniform(-0.25, 0.25) 93 | dy = np.random.uniform(-0.25, 0.25) 94 | 95 | # appear at centre 96 | x = img_size // 2 97 | y = img_size // 2 98 | scale_factor = 20 99 | 100 | # create window 101 | cv2.namedWindow("samples", cv2.WINDOW_NORMAL) 102 | cv2.resizeWindow("samples", scale_factor * img_size, scale_factor * img_size) 103 | 104 | for i in range(1000): 105 | # generate the actual image 106 | low_res_img = blob(np.array([[x, y, s]])) 107 | pf.update(low_res_img) 108 | 109 | # resize for drawing onto 110 | img = cv2.resize( 111 | np.squeeze(low_res_img), (0, 0), fx=scale_factor, fy=scale_factor 112 | ) 113 | 114 | cv2.putText( 115 | img, 116 | "ESC to exit", 117 | (50, 50), 118 | cv2.FONT_HERSHEY_SIMPLEX, 119 | 1, 120 | (255, 255, 255), 121 | 2, 122 | cv2.LINE_AA, 123 | ) 124 | 125 | color = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_GRAY2RGB) 126 | 127 | x_hat, y_hat, s_hat, dx_hat, dy_hat = pf.mean_state 128 | 129 | # draw individual particles 130 | for particle in pf.original_particles: 131 | 132 | xa, ya, sa, _, _ = particle 133 | sa = np.clip(sa, 1, 100) 134 | cv2.circle( 135 | color, 136 | (int(ya * scale_factor), int(xa * scale_factor)), 137 | max(int(sa * scale_factor), 1), 138 | (1, 0, 0), 139 | 1, 140 | ) 141 | 142 | # x,y exchange because of ordering between skimage and opencv 143 | cv2.circle( 144 | color, 145 | (int(y_hat * scale_factor), int(x_hat * scale_factor)), 146 | max(int(sa * scale_factor), 1), 147 | (0, 1, 0), 148 | 1, 149 | lineType=cv2.LINE_AA, 150 | ) 151 | 152 | cv2.line( 153 | color, 154 | (int(y_hat * scale_factor), int(x_hat * scale_factor)), 155 | ( 156 | int(y_hat * scale_factor + 5 * dy_hat * scale_factor), 157 | int(x_hat * scale_factor + 5 * dx_hat * scale_factor), 158 | ), 159 | (0, 0, 1), 160 | lineType=cv2.LINE_AA, 161 | ) 162 | 163 | cv2.imshow("samples", color) 164 | result = cv2.waitKey(20) 165 | # break on escape 166 | if result == 27: 167 | break 168 | x += dx 169 | y += dy 170 | 171 | cv2.destroyAllWindows() 172 | 173 | 174 | if __name__ == "__main__": 175 | example_filter() 176 | 177 | # %% 178 | -------------------------------------------------------------------------------- /imgs/particle_equations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/johnhw/pfilter/c428e5106b46ad0ced734e02702179d35788cbdb/imgs/particle_equations.png -------------------------------------------------------------------------------- /pfilter/__init__.py: -------------------------------------------------------------------------------- 1 | from .pfilter import * 2 | -------------------------------------------------------------------------------- /pfilter/pfilter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.ma as ma 3 | 4 | # return a new function that has the heat kernel (given by delta) applied. 5 | def make_heat_adjusted(sigma): 6 | def heat_distance(d): 7 | return np.exp(-(d**2) / (2.0 * sigma**2)) 8 | 9 | return heat_distance 10 | 11 | 12 | ## Resampling based on the examples at: https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python/blob/master/12-Particle-Filters.ipynb 13 | ## originally by Roger Labbe, under an MIT License 14 | def systematic_resample(weights): 15 | n = len(weights) 16 | positions = (np.arange(n) + np.random.uniform(0, 1)) / n 17 | return create_indices(positions, weights) 18 | 19 | 20 | def stratified_resample(weights): 21 | n = len(weights) 22 | positions = (np.random.uniform(0, 1, n) + np.arange(n)) / n 23 | return create_indices(positions, weights) 24 | 25 | 26 | def residual_resample(weights): 27 | n = len(weights) 28 | indices = np.zeros(n, np.uint32) 29 | # take int(N*w) copies of each weight 30 | num_copies = (n * weights).astype(np.uint32) 31 | k = 0 32 | for i in range(n): 33 | for _ in range(num_copies[i]): # make n copies 34 | indices[k] = i 35 | k += 1 36 | # use multinormial resample on the residual to fill up the rest. 37 | residual = weights - num_copies # get fractional part 38 | residual /= np.sum(residual) 39 | cumsum = np.cumsum(residual) 40 | cumsum[-1] = 1 41 | indices[k:n] = np.searchsorted(cumsum, np.random.uniform(0, 1, n - k)) 42 | return indices 43 | 44 | 45 | def create_indices(positions, weights): 46 | n = len(weights) 47 | indices = np.zeros(n, np.uint32) 48 | cumsum = np.cumsum(weights) 49 | i, j = 0, 0 50 | while i < n: 51 | if positions[i] < cumsum[j]: 52 | indices[i] = j 53 | i += 1 54 | else: 55 | j += 1 56 | 57 | return indices 58 | 59 | 60 | ### end rlabbe's resampling functions 61 | 62 | 63 | def multinomial_resample(weights): 64 | return np.random.choice(np.arange(len(weights)), p=weights, size=len(weights)) 65 | 66 | 67 | # resample function from http://scipy-cookbook.readthedocs.io/items/ParticleFilter.html 68 | def resample(weights): 69 | n = len(weights) 70 | indices = [] 71 | C = [0.0] + [np.sum(weights[: i + 1]) for i in range(n)] 72 | u0, j = np.random.random(), 0 73 | for u in [(u0 + i) / n for i in range(n)]: 74 | while u > C[j]: 75 | j += 1 76 | indices.append(j - 1) 77 | return indices 78 | 79 | 80 | # identity function for clearer naming 81 | identity = lambda x: x 82 | 83 | 84 | def squared_error(x, y, sigma=1): 85 | """ 86 | RBF kernel, supporting masked values in the observation 87 | Parameters: 88 | ----------- 89 | x : array (N,D) array of values 90 | y : array (N,D) array of values 91 | 92 | Returns: 93 | ------- 94 | 95 | distance : scalar 96 | Total similarity, using equation: 97 | 98 | d(x,y) = e^((-1 * (x - y) ** 2) / (2 * sigma ** 2)) 99 | 100 | summed over all samples. Supports masked arrays. 101 | """ 102 | dx = (x - y) ** 2 103 | d = np.ma.sum(dx, axis=1) 104 | return np.exp(-d / (2.0 * sigma**2)) 105 | 106 | 107 | def gaussian_noise(x, sigmas): 108 | """Apply diagonal covaraiance normally-distributed noise to the N,D array x. 109 | Parameters: 110 | ----------- 111 | x : array 112 | (N,D) array of values 113 | sigmas : array 114 | D-element vector of std. dev. for each column of x 115 | """ 116 | n = np.random.normal(np.zeros(len(sigmas)), sigmas, size=(x.shape[0], len(sigmas))) 117 | return x + n 118 | 119 | 120 | def t_noise(x, sigmas, df=1.0): 121 | """Apply diagonal covaraiance t-distributed noise to the N,D array x. 122 | Parameters: 123 | ----------- 124 | x : array 125 | (N,D) array of values 126 | sigmas : array 127 | D-element vector of std. dev. for each column of x 128 | df : degrees of freedom (shape of the t distribution) 129 | Must be a scalar 130 | """ 131 | n = np.random.standard_t(df, size=(x.shape[0], len(sigmas))) * sigmas 132 | return x + n 133 | 134 | 135 | def cauchy_noise(x, sigmas): 136 | """Apply diagonal covaraiance Cauchy-distributed noise to the N,D array x. 137 | Parameters: 138 | ----------- 139 | x : array 140 | (N,D) array of values 141 | sigmas : array 142 | D-element vector of std. dev. for each column of x 143 | """ 144 | n = np.random.standard_cauchy(size=(x.shape[0], len(sigmas))) * np.array(sigmas) 145 | return x + n 146 | 147 | 148 | def independent_sample(fn_list): 149 | """Take a list of functions that each draw n samples from a distribution 150 | and concatenate the result into an n, d matrix 151 | Parameters: 152 | ----------- 153 | fn_list: list of functions 154 | A list of functions of the form `sample(n)` that will take n samples 155 | from a distribution. 156 | Returns: 157 | ------- 158 | sample_fn: a function that will sample from all of the functions and concatenate 159 | them 160 | """ 161 | 162 | def sample_fn(n): 163 | return np.stack([fn(n) for fn in fn_list]).T 164 | 165 | return sample_fn 166 | 167 | 168 | class ParticleFilter(object): 169 | """A particle filter object which maintains the internal state of a population of particles, and can 170 | be updated given observations. 171 | 172 | Attributes: 173 | ----------- 174 | 175 | n_particles : int 176 | number of particles used (N) 177 | d : int 178 | dimension of the internal state 179 | resample_proportion : float 180 | fraction of particles resampled from prior at each step 181 | particles : array 182 | (N,D) array of particle states 183 | original_particles : array 184 | (N,D) array of particle states *before* any random resampling replenishment 185 | This should be used for any computation on the previous time step (e.g. computing 186 | expected values, etc.) 187 | mean_hypothesis : array 188 | The current mean hypothesized observation 189 | mean_state : array 190 | The current mean hypothesized internal state D 191 | map_hypothesis: 192 | The current most likely hypothesized observation 193 | map_state: 194 | The current most likely hypothesized state 195 | n_eff: 196 | Normalized effective sample size, in range 0.0 -> 1.0 197 | weight_informational_energy: 198 | Informational energy of the distribution (Onicescu's) 199 | weight_entropy: 200 | Entropy of the weight distribution (in nats) 201 | hypotheses : array 202 | The (N,...) array of hypotheses for each particle 203 | weights : array 204 | N-element vector of normalized weights for each particle. 205 | """ 206 | 207 | def __init__( 208 | self, 209 | prior_fn, 210 | observe_fn=None, 211 | resample_fn=None, 212 | n_particles=200, 213 | dynamics_fn=None, 214 | noise_fn=None, 215 | weight_fn=None, 216 | resample_proportion=None, 217 | column_names=None, 218 | internal_weight_fn=None, 219 | transform_fn=None, 220 | n_eff_threshold=1.0, 221 | ): 222 | """ 223 | 224 | Parameters: 225 | ----------- 226 | 227 | prior_fn : function(n) = > states 228 | a function that generates N samples from the prior over internal states, as 229 | an (N,D) particle array 230 | observe_fn : function(states) => observations 231 | transformation function from the internal state to the sensor state. Takes an (N,D) array of states 232 | and returns the expected sensor output as an array (e.g. a (N,W,H) tensor if generating W,H dimension images). 233 | resample_fn: A resampling function weights (N,) => indices (N,) 234 | n_particles : int 235 | number of particles in the filter 236 | dynamics_fn : function(states) => states 237 | dynamics function, which takes an (N,D) state array and returns a new one with the dynamics applied. 238 | noise_fn : function(states) => states 239 | noise function, takes a state vector and returns a new one with noise added. 240 | weight_fn : function(hypothesized, real) => weights 241 | computes the distance from the real sensed variable and that returned by observe_fn. Takes 242 | a an array of N hypothesised sensor outputs (e.g. array of dimension (N,W,H)) and the observed output (e.g. array of dimension (W,H)) and 243 | returns a strictly positive weight for the each hypothesis as an N-element vector. 244 | This should be a *similarity* measure, with higher values meaning more similar, for example from an RBF kernel. 245 | internal_weight_fn : function(states, observed) => weights 246 | Reweights the particles based on their *internal* state. This is function which takes 247 | an (N,D) array of internal states and the observation and 248 | returns a strictly positive weight for the each state as an N-element vector. 249 | Typically used to force particles inside of bounds, etc. 250 | transform_fn: function(states, weights) => transformed_states 251 | Applied at the very end of the update step, if specified. Updates the attribute 252 | `transformed_particles`. Useful when the particle state needs to be projected 253 | into a different space. 254 | resample_proportion : float 255 | proportion of samples to draw from the initial on each iteration. 256 | n_eff_threshold=1.0: float 257 | effective sample size at which resampling will be performed (0.0->1.0). Values 258 | <1.0 will allow samples to propagate without the resampling step until 259 | the effective sample size (n_eff) drops below the specified threshold. 260 | column_names : list of strings 261 | names of each the columns of the state vector 262 | 263 | """ 264 | self.resample_fn = resample_fn or resample 265 | self.column_names = column_names 266 | self.prior_fn = prior_fn 267 | self.n_particles = n_particles 268 | self.init_filter() 269 | self.n_eff_threshold = n_eff_threshold 270 | self.d = self.particles.shape[1] 271 | self.observe_fn = observe_fn or identity 272 | self.dynamics_fn = dynamics_fn or identity 273 | self.noise_fn = noise_fn or identity 274 | self.weight_fn = weight_fn or squared_error 275 | self.weights = np.ones(self.n_particles) / self.n_particles 276 | self.transform_fn = transform_fn 277 | self.transformed_particles = None 278 | self.resample_proportion = resample_proportion or 0.0 279 | self.internal_weight_fn = internal_weight_fn 280 | self.original_particles = np.array(self.particles) 281 | self.original_weights = np.array(self.weights) 282 | 283 | def copy(self): 284 | """Copy this filter at its current state. Returns 285 | an exact copy, that can be run forward indepedently of the first. 286 | Beware that if your passed in functions (e.g. dynamics) are stateful, behaviour 287 | might not be independent! (tip: write stateless functions!) 288 | 289 | Returns: 290 | --------- 291 | A new, independent copy of this filter. 292 | """ 293 | # construct the filter 294 | new_copy = ParticleFilter( 295 | observe_fn=self.observe_fn, 296 | resample_fn=self.resample_fn, 297 | n_particles=self.n_particles, 298 | prior_fn=self.prior_fn, 299 | dynamics_fn=self.dynamics_fn, 300 | weight_fn=self.weight_fn, 301 | resample_proportion=self.resample_proportion, 302 | column_names=self.column_names, 303 | internal_weight_fn=self.internal_weight_fn, 304 | transform_fn=self.transform_fn, 305 | n_eff_threshold=self.n_eff_threshold, 306 | ) 307 | 308 | # copy particle state 309 | for array in ["particles", "original_particles", "original_weights", "weights"]: 310 | setattr(new_copy, array, np.array(getattr(self, array))) 311 | 312 | # copy any attributes 313 | for array in [ 314 | "mean_hypothesis", 315 | "mean_state", 316 | "map_state", 317 | "map_hypothesis", 318 | "hypotheses", 319 | "n_eff", 320 | "weight_informational_energy", 321 | "weight_entropy", 322 | ]: 323 | if hasattr(self, array): 324 | setattr(new_copy, array, getattr(self, array).copy()) 325 | 326 | return new_copy 327 | 328 | def predictor(self, n=None, observed=None): 329 | """Return an generator that runs a copy of the filter forward for prediction. 330 | Yields the copied filter object at each step. Useful for making predictions 331 | without inference. 332 | 333 | By default, the filter will run without observations. Pass observed to set the initial observation. 334 | Use send() to send new observations to the filter. If no send() is used on any given iteration, the filter 335 | will revert to prediction without observation. 336 | 337 | If n is specified, runs for n steps; otherwise, runs forever. 338 | 339 | Parameters: 340 | ---------- 341 | 342 | n: integer 343 | Number of steps to run for. If None, run forever. 344 | 345 | observed: array 346 | The initial observed output, in the same format as observe_fn() will produce. This is typically the 347 | input from the sensor observing the process (e.g. a camera image in optical tracking). 348 | If None, then the observation step is skipped 349 | 350 | """ 351 | copy = self.copy() 352 | observed = None 353 | if n is not None: 354 | for i in range(n): 355 | copy.update(observed) 356 | observed = yield copy 357 | else: 358 | while True: 359 | copy.update(observed) 360 | observed = yield copy 361 | 362 | 363 | def init_filter(self, mask=None): 364 | """Initialise the filter by drawing samples from the prior. 365 | 366 | Parameters: 367 | ----------- 368 | mask : array, optional 369 | boolean mask specifying the elements of the particle array to draw from the prior. None (default) 370 | implies all particles will be resampled (i.e. a complete reset) 371 | """ 372 | new_sample = self.prior_fn(self.n_particles) 373 | 374 | # resample from the prior 375 | if mask is None: 376 | self.particles = new_sample 377 | else: 378 | self.particles[mask, :] = new_sample[mask, :] 379 | 380 | def update(self, observed=None, **kwargs): 381 | """Update the state of the particle filter given an observation. 382 | 383 | Parameters: 384 | ---------- 385 | 386 | observed: array 387 | The observed output, in the same format as observe_fn() will produce. This is typically the 388 | input from the sensor observing the process (e.g. a camera image in optical tracking). 389 | If None, then the observation step is skipped, and the filter will run one step in prediction-only mode. 390 | 391 | kwargs: any keyword arguments specified will be passed on to: 392 | observe_fn(y, **kwargs) 393 | weight_fn(x, **kwargs) 394 | dynamics_fn(x, **kwargs) 395 | noise_fn(x, **kwargs) 396 | internal_weight_function(x, y, **kwargs) 397 | transform_fn(x, **kwargs) 398 | """ 399 | 400 | # apply dynamics and noise 401 | self.particles = self.noise_fn( 402 | self.dynamics_fn(self.particles, **kwargs), **kwargs 403 | ) 404 | 405 | # hypothesise observations 406 | self.hypotheses = self.observe_fn(self.particles, **kwargs) 407 | 408 | if observed is not None: 409 | # compute similarity to observations 410 | # force to be positive 411 | if type(observed)==list or type(observed)==tuple or type(observed)==float or type(observed)==int: 412 | observed = np.array(observed, dtype=np.float64) 413 | 414 | weights = np.clip( 415 | self.weights 416 | * np.array( 417 | self.weight_fn( 418 | self.hypotheses.reshape(self.n_particles, -1), 419 | observed.reshape(1, -1), 420 | **kwargs 421 | ) 422 | ), 423 | 0, 424 | np.inf, 425 | ) 426 | else: 427 | # we have no observation, so all particles weighted the same 428 | weights = self.weights * np.ones((self.n_particles,)) 429 | 430 | # apply weighting based on the internal state 431 | # most filters don't use this, but can be a useful way of combining 432 | # forward and inverse models 433 | if self.internal_weight_fn is not None: 434 | internal_weights = self.internal_weight_fn( 435 | self.particles, observed, **kwargs 436 | ) 437 | internal_weights = np.clip(internal_weights, 0, np.inf) 438 | internal_weights = internal_weights / np.sum(internal_weights) 439 | weights *= internal_weights 440 | 441 | # normalise weights to resampling probabilities 442 | self.weight_normalisation = np.sum(weights) 443 | self.weights = weights / self.weight_normalisation 444 | 445 | # Compute effective sample size and entropy of weighting vector. 446 | # These are useful statistics for adaptive particle filtering. 447 | self.n_eff = (1.0 / np.sum(self.weights**2)) / self.n_particles 448 | self.weight_informational_energy = np.sum(self.weights**2) 449 | self.weight_entropy = np.sum(self.weights * np.log(self.weights)) 450 | 451 | # preserve current sample set before any replenishment 452 | self.original_particles = np.array(self.particles) 453 | 454 | # store mean (expected) hypothesis 455 | self.mean_hypothesis = np.sum(self.hypotheses.T * self.weights, axis=-1).T 456 | self.mean_state = np.sum(self.particles.T * self.weights, axis=-1).T 457 | self.cov_state = np.cov(self.particles, rowvar=False, aweights=self.weights) 458 | 459 | # store MAP estimate 460 | argmax_weight = np.argmax(self.weights) 461 | self.map_state = self.particles[argmax_weight] 462 | self.map_hypothesis = self.hypotheses[argmax_weight] 463 | self.original_weights = np.array(self.weights) # before any resampling 464 | 465 | # apply any post-processing 466 | if self.transform_fn: 467 | self.transformed_particles = self.transform_fn( 468 | self.original_particles, self.weights, **kwargs 469 | ) 470 | else: 471 | self.transformed_particles = self.original_particles 472 | 473 | # resampling (systematic resampling) step 474 | if self.n_eff < self.n_eff_threshold: 475 | indices = self.resample_fn(self.weights) 476 | self.particles = self.particles[indices, :] 477 | self.weights = np.ones(self.n_particles) / self.n_particles 478 | 479 | # randomly resample some particles from the prior 480 | if self.resample_proportion > 0: 481 | random_mask = ( 482 | np.random.random(size=(self.n_particles,)) < self.resample_proportion 483 | ) 484 | self.resampled_particles = random_mask 485 | self.init_filter(mask=random_mask) 486 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | pytest 3 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | with open("README.md") as f: 4 | readme = f.read() 5 | 6 | 7 | setup( 8 | name="pfilter", # This is the name of your PyPI-package. 9 | version="0.2.5", # Update the version number for new releases 10 | install_requires=['numpy'], 11 | packages=[ 12 | "pfilter" 13 | ], # The name of your scipt, and also the command you'll be using for calling it 14 | description="A basic particle filter", 15 | author="John H Williamson", 16 | long_description_content_type="text/markdown", 17 | long_description=readme, 18 | author_email="johnhw@gmail.com", 19 | url="https://github.com/johnhw/pfilter", # use the URL to the github repo 20 | download_url="https://github.com/johnhw/pfilter/tarball/0.2.5", 21 | keywords=["particle", "probabilistic", "stochastic", "filter", "filtering"], 22 | ) 23 | 24 | -------------------------------------------------------------------------------- /tests/test_filter.py: -------------------------------------------------------------------------------- 1 | from pfilter import ( 2 | ParticleFilter, 3 | squared_error, 4 | t_noise, 5 | gaussian_noise, 6 | cauchy_noise, 7 | make_heat_adjusted, 8 | systematic_resample, 9 | stratified_resample, 10 | multinomial_resample, 11 | residual_resample, 12 | ) 13 | import numpy as np 14 | 15 | 16 | def test_init(): 17 | # silly initialisation, but uses all parameters 18 | pf = ParticleFilter( 19 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), 20 | observe_fn=lambda x: x, 21 | n_particles=100, 22 | dynamics_fn=lambda x: x, 23 | noise_fn=lambda x: x, 24 | weight_fn=lambda x, y: np.ones(len(x)), 25 | resample_proportion=0.2, 26 | column_names=["test"], 27 | internal_weight_fn=lambda x, y: np.ones(len(x)), 28 | n_eff_threshold=1.0, 29 | ) 30 | pf.update(np.array([1])) 31 | 32 | 33 | # pure, basic multinomial sampling 34 | def basic_resample(weights): 35 | return np.random.choice(np.arange(len(weights)), p=weights, size=len(weights)) 36 | 37 | def test_copy(): 38 | pf = ParticleFilter( 39 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), 40 | observe_fn=lambda x: x, 41 | n_particles=100, 42 | dynamics_fn=lambda x: x, 43 | noise_fn=lambda x: x, 44 | weight_fn=lambda x, y: np.ones(len(x)), 45 | resample_proportion=0.2, 46 | column_names=["test"], 47 | internal_weight_fn=lambda x, y: np.ones(len(x)), 48 | n_eff_threshold=1.0, 49 | ) 50 | 51 | original = np.array(pf.particles) 52 | original_weights = np.array(pf.weights) 53 | 54 | a_copy = pf.copy() 55 | 56 | pf.update() 57 | 58 | b_copy = pf.copy() 59 | 60 | pf.update(np.array([1])) 61 | 62 | c_copy = pf.copy() 63 | 64 | assert np.allclose(a_copy.particles, original) 65 | assert np.allclose(a_copy.weights, original_weights) 66 | assert np.allclose(c_copy.mean_hypothesis, pf.mean_hypothesis) 67 | assert not c_copy.mean_hypothesis is pf.mean_hypothesis 68 | 69 | 70 | 71 | 72 | def test_predict(): 73 | # silly initialisation, but uses all parameters 74 | pf = ParticleFilter( 75 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), 76 | observe_fn=lambda x: x, 77 | n_particles=100, 78 | dynamics_fn=lambda x: x + 1, 79 | noise_fn=lambda x: x + np.random.normal(0, 1, x.shape), 80 | weight_fn=lambda x, y: np.sum(y**2, axis=1), 81 | resample_proportion=0.2, 82 | column_names=["test"], 83 | internal_weight_fn=lambda x, y: np.ones(len(x)), 84 | n_eff_threshold=1.0, 85 | ) 86 | old_weights = np.array(pf.original_weights) 87 | old_particles = np.array(pf.original_particles) 88 | states = [] 89 | for i in pf.predictor(10): 90 | states.append([pf.weights, pf.particles]) 91 | 92 | assert len(states)==10 93 | # make sure the state hasn't changed 94 | assert np.allclose(old_weights, pf.original_weights) 95 | assert np.allclose(old_particles, pf.original_particles) 96 | pf.update(np.array([1])) 97 | #assert not np.allclose(old_weights, pf.original_weights) 98 | assert not np.allclose(old_particles, pf.original_particles) 99 | 100 | 101 | 102 | 103 | 104 | def test_resampler(): 105 | pf = ParticleFilter( 106 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), 107 | n_particles=100, 108 | resample_fn=None, # should use default 109 | ) 110 | for i in range(10): 111 | pf.update(np.array([1])) 112 | 113 | pf = ParticleFilter( 114 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), 115 | n_particles=100, 116 | resample_fn=basic_resample, 117 | ) 118 | for i in range(10): 119 | pf.update(np.array([1])) 120 | 121 | for sampler in [ 122 | stratified_resample, 123 | systematic_resample, 124 | residual_resample, 125 | multinomial_resample, 126 | ]: 127 | pf = ParticleFilter( 128 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), 129 | n_particles=100, 130 | resample_fn=sampler, 131 | ) 132 | for i in range(10): 133 | pf.update(np.array([1])) 134 | 135 | 136 | def test_weights(): 137 | # verify weights sum to 1.0 138 | pf = ParticleFilter( 139 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), n_particles=100 140 | ) 141 | for i in range(10): 142 | pf.update(np.array([1])) 143 | assert len(pf.weights) == len(pf.particles) == 100 144 | assert pf.particles.shape == (100, 1) 145 | assert np.allclose(np.sum(pf.weights), 1.0) 146 | 147 | 148 | def test_no_observe(): 149 | # check that 150 | pf = ParticleFilter( 151 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), n_particles=10 152 | ) 153 | for i in range(10): 154 | pf.update(None) 155 | assert len(pf.weights) == len(pf.particles) == 10 156 | assert pf.particles.shape == (10, 1) 157 | assert np.allclose(np.sum(pf.weights), 1.0) 158 | 159 | 160 | import numpy.ma as ma 161 | 162 | 163 | def test_partial_missing(): 164 | # check that 165 | pf = ParticleFilter( 166 | prior_fn=lambda n: np.random.normal(0, 1, (n, 4)), n_particles=100 167 | ) 168 | for i in range(10): 169 | masked_input = ma.masked_equal(np.array([1, 999, 0, 999]), 999) 170 | pf.update(masked_input) 171 | pf.update(np.array([1, 1, 1, 1])) 172 | assert np.allclose(np.sum(pf.weights), 1.0) 173 | assert len(pf.weights) == len(pf.particles) == 100 174 | 175 | 176 | def test_transform_fn(): 177 | # silly initialisation, but uses all parameters 178 | pf = ParticleFilter( 179 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), 180 | observe_fn=lambda x: x, 181 | n_particles=100, 182 | dynamics_fn=lambda x: x, 183 | transform_fn=lambda x, w: 2 * x, 184 | noise_fn=lambda x: x, 185 | weight_fn=lambda x, y: np.ones(len(x)), 186 | resample_proportion=0.2, 187 | column_names=["test"], 188 | internal_weight_fn=lambda x, y: np.ones(len(x)), 189 | n_eff_threshold=1.0, 190 | ) 191 | for i in range(10): 192 | pf.update(np.array([1])) 193 | assert np.allclose(pf.original_particles * 2.0, pf.transformed_particles) 194 | 195 | 196 | def test_kwargs(): 197 | def check_kwargs(x, **kwargs): 198 | assert "test_1" in kwargs 199 | assert "t" in kwargs 200 | assert kwargs["test_1"] == "ok" 201 | assert kwargs["t"] == 1.0 202 | return x 203 | 204 | # silly initialisation, but uses all parameters 205 | pf = ParticleFilter( 206 | prior_fn=lambda n: np.random.normal(0, 1, (n, 1)), 207 | observe_fn=lambda x, **kwargs: check_kwargs(x, **kwargs), 208 | n_particles=100, 209 | dynamics_fn=lambda x, **kwargs: check_kwargs(x, **kwargs), 210 | transform_fn=lambda x, w, **kwargs: check_kwargs(x, **kwargs), 211 | noise_fn=lambda x, **kwargs: check_kwargs(x, **kwargs), 212 | weight_fn=lambda x, y, **kwargs: check_kwargs(np.ones(len(x)), **kwargs), 213 | resample_proportion=0.2, 214 | column_names=["test"], 215 | internal_weight_fn=lambda x, y, **kwargs: check_kwargs( 216 | np.ones(len(x)), **kwargs 217 | ), 218 | n_eff_threshold=1.0, 219 | ) 220 | pf.update(np.array([[1]]), test_1="ok", t=1.0) 221 | 222 | 223 | def test_gaussian_noise(): 224 | np.random.seed(2012) 225 | for shape in [10, 10], [100, 1000], [500, 50]: 226 | val = np.random.normal(0, 10) 227 | x = np.full(shape, val) 228 | noisy = gaussian_noise(x, np.ones(shape[1])) 229 | assert (np.mean(noisy) - np.mean(x)) ** 2 < 1.0 230 | assert (np.std(noisy) - 1.0) ** 2 < 0.1 231 | noisy = gaussian_noise(x, np.full(shape[1], 10.0)) 232 | assert (np.std(noisy) - 10.0) ** 2 < 0.1 233 | 234 | 235 | def test_cauchy_noise(): 236 | np.random.seed(2012) 237 | for shape in [10, 10], [100, 1000], [500, 50]: 238 | val = np.random.normal(0, 10) 239 | x = np.full(shape, val) 240 | noisy = cauchy_noise(x, np.ones(shape[1])) 241 | 242 | def test_t_noise(): 243 | np.random.seed(2012) 244 | for shape in [10, 10], [100, 1000], [500, 50]: 245 | val = np.random.normal(0, 10) 246 | x = np.full(shape, val) 247 | noisy = t_noise(x, sigmas=np.ones(shape[1]), df=1.0) 248 | noisy = t_noise(x, sigmas=np.ones(shape[1]), df=10.0) 249 | noisy = t_noise(x, sigmas=np.ones(shape[1]), df=0.1) 250 | 251 | 252 | def test_squared_error(): 253 | for shape in [1, 1], [1, 10], [10, 1], [10, 10], [200, 10], [10, 200]: 254 | x = np.random.normal(0, 1, shape) 255 | y = np.random.normal(0, 1, shape) 256 | assert np.allclose(squared_error(x, y, sigma=1), squared_error(x, y)) 257 | assert np.all(squared_error(x, y, sigma=0.5) < squared_error(x, y)) 258 | assert np.all(squared_error(x, y, sigma=2.0) > squared_error(x, y)) 259 | 260 | 261 | def test_heat_kernel(): 262 | kernel = make_heat_adjusted(1.0) 263 | assert kernel(0) == 1.0 264 | assert kernel(1) < 1.0 265 | assert kernel(1000) < 1e-4 266 | assert np.allclose(kernel(3), np.exp(-3 ** 2 / 2.0)) 267 | assert kernel(-1) == kernel(1) 268 | assert kernel(2) < kernel(1) 269 | a = np.zeros((10, 10)) 270 | b = np.ones((10, 10)) 271 | assert np.all(kernel(a) == 1.0) 272 | assert np.all(kernel(b) < 1.0) 273 | kernel_small = make_heat_adjusted(0.5) 274 | kernel_large = make_heat_adjusted(2.0) 275 | for k in -10, -5, -1, -0.5, 0.5, 1, 5, 10: 276 | assert kernel_small(k) < kernel(k) < kernel_large(k) 277 | 278 | --------------------------------------------------------------------------------