├── .gitignore ├── hawkesbook ├── __init__.py └── hawkes.py ├── setup.py ├── LICENSE ├── tests ├── earthquake_example.py └── basic.py └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | dist/* 3 | hawkesbook.egg-info/* 4 | *__pycache__* -------------------------------------------------------------------------------- /hawkesbook/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.1.0" 2 | 3 | from .hawkes import * -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | from setuptools import setup 3 | 4 | HERE = pathlib.Path(__file__).parent 5 | README = (HERE / "README.md").read_text(encoding="utf-8") 6 | 7 | setup( 8 | name="hawkesbook", 9 | version="0.1.0", 10 | description="Hawkes process methods for inference, simulation, and related calculations", 11 | long_description=README, 12 | long_description_content_type="text/markdown", 13 | url="https://github.com/Pat-Laub/hawkesbook", 14 | author="Patrick Laub", 15 | author_email="patrick.laub@gmail.com", 16 | license="MIT", 17 | classifiers=[ 18 | "License :: OSI Approved :: MIT License", 19 | "Programming Language :: Python :: 3", 20 | "Programming Language :: Python :: 3.7", 21 | "Intended Audience :: Science/Research", 22 | ], 23 | packages=["hawkesbook"], 24 | include_package_data=True, 25 | install_requires=["numba", "numpy", "scipy", "tqdm"], 26 | ) 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Pat Laub 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/earthquake_example.py: -------------------------------------------------------------------------------- 1 | import hawkesbook as hawkes 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import scipy.stats as stats 6 | import matplotlib.pyplot as plt 7 | from statsmodels.graphics.gofplots import qqplot 8 | 9 | # Load data to fit 10 | quakes = pd.read_csv("japanese-earthquakes.csv") 11 | quakes.index = pd.to_datetime(quakes.Day.astype(str) + "/" + quakes.Month.astype(str) + "/" + quakes.Year.astype(str) + " " + quakes.Time, dayfirst=True) 12 | quakes.sort_index(inplace=True) 13 | 14 | # Calculate each arrival as a (fractional) number of days since the 15 | # beginning of the observation period 16 | timeToQuake = quakes.index - pd.Timestamp("1/1/1973") 17 | ts = np.array(timeToQuake.total_seconds() / 60 / 60 / 24) 18 | 19 | # Calculate the length of the observation period 20 | obsPeriod = pd.Timestamp("31/12/2020") - pd.Timestamp("1/1/1973") 21 | T = obsPeriod.days 22 | 23 | # Calculate the maximum likelihood estimate for the Hawkes process 24 | # with an exponentially decaying intensity 25 | 𝛉_exp_mle = hawkes.exp_mle(ts, T) 26 | print("Exp Hawkes MLE fit: ", 𝛉_exp_mle) 27 | 28 | # Calculate the EM estimate or the same type of Hawkes process 29 | 𝛉_exp_em = hawkes.exp_em(ts, T, iters=100) 30 | print("Exp Hawkes EM fit: ", 𝛉_exp_mle) 31 | 32 | # Get the likelihoods of each fit to find the better one 33 | ll_mle = hawkes.exp_log_likelihood(ts, T, 𝛉_exp_mle) 34 | ll_em = hawkes.exp_log_likelihood(ts, T, 𝛉_exp_em) 35 | 36 | if ll_mle > ll_em: 37 | print("MLE was a better fit than EM in this case") 38 | 𝛉_exp = 𝛉_exp_mle 39 | ll_exp = ll_mle 40 | else: 41 | print("EM was a better fit than MLE in this case") 42 | 𝛉_exp = 𝛉_exp_em 43 | ll_exp = ll_em 44 | 45 | # Fit instead the Hawkes with a power-law decay 46 | 𝛉_pl = hawkes.power_mle(ts, T) 47 | ll_pl = hawkes.power_log_likelihood(ts, T, 𝛉_pl) 48 | 49 | # Compare the BICs 50 | BIC_exp = 3 * np.log(len(ts)) - 2 * ll_exp 51 | BIC_pl = 4 * np.log(len(ts)) - 2 * ll_pl 52 | if BIC_exp < BIC_pl: 53 | print(f"The exponentially-decaying Hawkes was the better fit with BIC={BIC_exp:.2f}.") 54 | print(f"The power-law Hawkes had BIC={BIC_pl:.2f}.") 55 | else: 56 | print(f"The power-law Hawkes was the better fit with BIC={BIC_pl:.2f}.") 57 | print(f"The exponentially-decaying Hawkes had BIC={BIC_exp:.2f}.") 58 | 59 | # Create a Q-Q plot for the exponential-decay fit by 60 | # first transforming the points to a unit-rate Poisson 61 | # process as outlined by the random time change theorem 62 | tsShifted = hawkes.exp_hawkes_compensators(ts, 𝛉_exp) 63 | iat = np.diff(np.insert(tsShifted, 0, 0)) 64 | qqplot(iat, dist=stats.expon, fit=False, line="45") 65 | plt.show() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # _hawkesbook_ Python package for Hawkes Process inference, simulation, etc. 2 | 3 | This package implements inference, simulation, and other related method for Hawkes processes and some mutually-exciting Hawkes processes. 4 | It is meant to accompany [_The Elements of Hawkes Processes_](https://link.springer.com/book/10.1007/978-3-030-84639-8) written by Patrick J. Laub, Young Lee, and Thomas Taimre. 5 | To install simply run `pip install hawkesbook`. 6 | 7 | The main design goal for this package was simplicity and readability. 8 | Unicode characters are used to match the mathematics to the code as much as possible. 9 | For example, the Hawkes process conditional intensity is 10 | 11 | $$\lambda^\ast(t) = \lambda + \sum_{t_i \in \mathcal{H}_t} \mu(t - t_i)$$ 12 | 13 | and this is rendered in Python as 14 | ```python 15 | def hawkes_intensity(t, ℋ_t, 𝛉): 16 | λ, μ, _ = 𝛉 17 | λˣ = λ 18 | for t_i in ℋ_t: 19 | λˣ += μ(t - t_i) 20 | return λˣ 21 | ``` 22 | 23 | Some functions are JIT-compiled to C and parallelised with `numba` so the computational performance is not completely neglected. 24 | Everything that can be `numpy`-vectorised has been. 25 | 26 | Our main dependencies are `numba`, `numpy`, and `scipy` (for the minimize function). 27 | 28 | As an example, in the book we have a case study which fits various Hawkes process to the arrival times of earthquakes. 29 | The code for the fitting and analysis of that data is like: 30 | 31 | ```python 32 | import hawkesbook as hawkes 33 | 34 | import numpy as np 35 | import pandas as pd 36 | import scipy.stats as stats 37 | import matplotlib.pyplot as plt 38 | from statsmodels.graphics.gofplots import qqplot 39 | 40 | # Load data to fit 41 | quakes = pd.read_csv("japanese-earthquakes.csv") 42 | quakes.index = pd.to_datetime(quakes.Day.astype(str) + "/" + quakes.Month.astype(str) + "/" + quakes.Year.astype(str) + " " + quakes.Time, dayfirst=True) 43 | quakes.sort_index(inplace=True) 44 | 45 | # Calculate each arrival as a (fractional) number of days since the 46 | # beginning of the observation period 47 | timeToQuake = quakes.index - pd.Timestamp("1/1/1973") 48 | ts = np.array(timeToQuake.total_seconds() / 60 / 60 / 24) 49 | 50 | # Calculate the length of the observation period 51 | obsPeriod = pd.Timestamp("31/12/2020") - pd.Timestamp("1/1/1973") 52 | T = obsPeriod.days 53 | 54 | # Calculate the maximum likelihood estimate for the Hawkes process 55 | # with an exponentially decaying intensity 56 | 𝛉_exp_mle = hawkes.exp_mle(ts, T) 57 | print("Exp Hawkes MLE fit: ", 𝛉_exp_mle) 58 | 59 | # Calculate the EM estimate or the same type of Hawkes process 60 | 𝛉_exp_em = hawkes.exp_em(ts, T, iters=100) 61 | print("Exp Hawkes EM fit: ", 𝛉_exp_mle) 62 | 63 | # Get the likelihoods of each fit to find the better one 64 | ll_mle = hawkes.exp_log_likelihood(ts, T, 𝛉_exp_mle) 65 | ll_em = hawkes.exp_log_likelihood(ts, T, 𝛉_exp_em) 66 | 67 | if ll_mle > ll_em: 68 | print("MLE was a better fit than EM in this case") 69 | 𝛉_exp = 𝛉_exp_mle 70 | ll_exp = ll_mle 71 | else: 72 | print("EM was a better fit than MLE in this case") 73 | 𝛉_exp = 𝛉_exp_em 74 | ll_exp = ll_em 75 | 76 | # Fit instead the Hawkes with a power-law decay 77 | 𝛉_pl = hawkes.power_mle(ts, T) 78 | ll_pl = hawkes.power_log_likelihood(ts, T, 𝛉_pl) 79 | 80 | # Compare the BICs 81 | BIC_exp = 3 * np.log(len(ts)) - 2 * ll_exp 82 | BIC_pl = 4 * np.log(len(ts)) - 2 * ll_pl 83 | if BIC_exp < BIC_pl: 84 | print(f"The exponentially-decaying Hawkes was the better fit with BIC={BIC_exp:.2f}.") 85 | print(f"The power-law Hawkes had BIC={BIC_pl:.2f}.") 86 | else: 87 | print(f"The power-law Hawkes was the better fit with BIC={BIC_pl:.2f}.") 88 | print(f"The exponentially-decaying Hawkes had BIC={BIC_exp:.2f}.") 89 | 90 | # Create a Q-Q plot for the exponential-decay fit by 91 | # first transforming the points to a unit-rate Poisson 92 | # process as outlined by the random time change theorem 93 | tsShifted = hawkes.exp_hawkes_compensators(ts, 𝛉_exp) 94 | iat = np.diff(np.insert(tsShifted, 0, 0)) 95 | qqplot(iat, dist=stats.expon, fit=False, line="45") 96 | plt.show() 97 | ``` 98 | -------------------------------------------------------------------------------- /tests/basic.py: -------------------------------------------------------------------------------- 1 | import hawkesbook as hawkes 2 | 3 | import numpy as np 4 | import numpy.random as rnd 5 | from tqdm import tqdm 6 | 7 | from numpy.testing import assert_allclose 8 | 9 | empMean, empVar, empAutoCov = hawkes.empirical_moments([1, 2, 2.1, 2.3, 4.5, 9.9], T=10, τ=2, lag=1) 10 | assert min(empMean, empVar) > 0 11 | 12 | assert hawkes.hawkes_intensity(1, [], [1, None, None]) == 1 13 | assert hawkes.hawkes_intensity(2, [1], [1, lambda x: np.exp(-x), None]) == 1 + np.exp(-1) 14 | 15 | assert hawkes.exp_hawkes_intensity(1, [0.5], [1.0, 2.0, 3.0]) == hawkes.hawkes_intensity(1, [0.5], (1, lambda t: 2*np.exp(-3*t), None)) 16 | 17 | testα = 3 18 | testβ = 4 19 | testμ = lambda x: testα*np.exp(-testβ * x) 20 | testM = lambda t: (testα/testβ) * (1 - np.exp(-testβ*t)) 21 | 22 | testα = testβ = 1 23 | testμ = lambda x: testα*np.exp(-testβ * x) 24 | testM = lambda t: (testα/testβ) * (1 - np.exp(-testβ*t)) 25 | 26 | rnd.seed(1) 27 | simTimes = hawkes.simulate_inverse_compensator([1, testμ, testM], hawkes.hawkes_compensator, 10) 28 | print(simTimes) 29 | 30 | print("Testing log likelihoods") 31 | testObs = np.array([0.5, 0.75]) 32 | testT = 1.0 33 | test𝛉 = np.array([1.0, 2.0, 3.0]) 34 | assert hawkes.exp_log_likelihood(testObs, testT, test𝛉) == hawkes.log_likelihood(testObs, testT, test𝛉, 35 | hawkes.exp_hawkes_intensity, hawkes.exp_hawkes_compensator) 36 | 37 | assert_allclose(hawkes.exp_log_likelihood(testObs, testT, test𝛉), 38 | hawkes.log_likelihood(testObs, testT, test𝛉, hawkes.exp_hawkes_intensity, hawkes.exp_hawkes_compensator), 39 | 0.1) 40 | 41 | print(f"Passed! Exp version = {hawkes.exp_log_likelihood(testObs, testT, test𝛉)} == general version = {hawkes.log_likelihood(testObs, testT, test𝛉, hawkes.exp_hawkes_intensity, hawkes.exp_hawkes_compensator)}") 42 | 43 | # Test simulation methods for exponential case 44 | sim𝛉 = np.array([1.0, 2.0, 3.0]) 45 | testT = 100 46 | 47 | hawkes.numba_seed(1) 48 | N_T = [] 49 | max_t = [] 50 | for r in tqdm(range(10_000)): 51 | times = hawkes.exp_simulate_by_composition(sim𝛉, 1_000) 52 | N_T.append(len(times[times < testT])) 53 | max_t.append(times[-1]) 54 | print(f"Over [0, 100] we had {np.mean(N_T)} arrivals on average by composition method") 55 | 56 | hawkes.numba_seed(1) 57 | N_T = [] 58 | for r in tqdm(range(10_000)): 59 | N_T.append(len(hawkes.exp_simulate_by_composition_alt(sim𝛉, testT))) 60 | print(f"Over [0, 100] we had {np.mean(N_T)} arrivals on average by alternative composition method") 61 | 62 | 63 | hawkes.numba_seed(1) 64 | N_T = [] 65 | for r in tqdm(range(10_000)): 66 | N_T.append(len(hawkes.exp_simulate_by_thinning(sim𝛉, testT))) 67 | print(f"Over [0, 100] we had {np.mean(N_T)} arrivals on average by thinning method") 68 | 69 | testλ, testα, testβ = sim𝛉 70 | testμ = lambda x: testα*np.exp(-testβ * x) 71 | testM = lambda t: (testα/testβ) * (1 - np.exp(-testβ*t)) 72 | 73 | rnd.seed(1) 74 | N_T = [] 75 | max_t = [] 76 | for r in tqdm(range(10)): 77 | times = hawkes.simulate_inverse_compensator([testλ, testμ, testM], hawkes.hawkes_compensator, 500) 78 | N_T.append(len(times[times < testT])) 79 | max_t.append(times[-1]) 80 | print(f"Over [0, 100] we had {np.mean(N_T)} arrivals on average by inverse compensator method") 81 | 82 | hawkes.numba_seed(1) 83 | 84 | sim𝛉 = np.array([1.0, 2.0, 3.1]) 85 | 86 | testT = 1_000 87 | testObs = hawkes.exp_simulate_by_thinning(sim𝛉, testT) 88 | print(f"Testing log likelihoods on larger sample (of size {len(testObs)})") 89 | 90 | print(f"Exp version = {hawkes.exp_log_likelihood(testObs, testT, test𝛉)} == general version = {hawkes.log_likelihood(testObs, testT, test𝛉, hawkes.exp_hawkes_intensity, hawkes.exp_hawkes_compensator)}") 91 | 92 | assert_allclose(hawkes.exp_log_likelihood(testObs, testT, test𝛉), 93 | hawkes.log_likelihood(testObs, testT, test𝛉, hawkes.exp_hawkes_intensity, hawkes.exp_hawkes_compensator), 94 | 0.1) 95 | print("Passed!") 96 | 97 | testMean, testVar, testAutoCov = testMoments = hawkes.exp_moments([1, 2, 3], τ=2, lag=1) 98 | assert min(testMean, testVar) > 0 99 | 100 | assert hawkes.exp_gmm_loss(np.array([1.0, 2.0, 3.0]), 2, 1, testMoments + 2, np.eye(3)) == 12.0 101 | 102 | print(hawkes.exp_gmm(np.array([1.0, 1.1, 1.2, 5.0]), 10, 2, 1)) 103 | 104 | print("Testing EM algorithm") 105 | print(hawkes.exp_em(np.array([1.0, 1.1, 1.2, 5.0]), 6.0, np.array([1.0, 2.0, 3.0]), 10, 1, True)) 106 | 107 | fit = hawkes.exp_mle(np.array([1.0, 1.1, 1.2, 5.0]), 10) -------------------------------------------------------------------------------- /hawkesbook/hawkes.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import numpy as np 3 | import numpy.random as rnd 4 | from scipy.optimize import fsolve, minimize 5 | 6 | from tqdm import tqdm 7 | from numba import njit, prange 8 | 9 | 10 | @njit() 11 | def numba_seed(seed): 12 | rnd.seed(seed) 13 | 14 | 15 | # Intensities and compensators 16 | 17 | 18 | def hawkes_intensity(t, ℋ_t, 𝛉): 19 | λ, μ, _ = 𝛉 20 | λˣ = λ 21 | for t_i in ℋ_t: 22 | λˣ += μ(t - t_i) 23 | return λˣ 24 | 25 | 26 | def hawkes_compensator(t, ℋ_t, 𝛉): 27 | if t <= 0: return 0 28 | λ, _, M = 𝛉 29 | 30 | Λ = λ * t 31 | for t_i in ℋ_t: 32 | Λ += M(t - t_i) 33 | return Λ 34 | 35 | 36 | def exp_hawkes_intensity(t, ℋ_t, 𝛉): 37 | λ, α, β = 𝛉 38 | λˣ = λ 39 | for t_i in ℋ_t: 40 | λˣ += α * np.exp(-β * (t - t_i)) 41 | return λˣ 42 | 43 | 44 | def exp_hawkes_compensator(t, ℋ_t, 𝛉): 45 | if t <= 0: return 0 46 | λ, α, β = 𝛉 47 | Λ = λ * t 48 | for t_i in ℋ_t: 49 | Λ += (α/β) * (1 - np.exp(-β*(t - t_i))) 50 | return Λ 51 | 52 | 53 | @njit(nogil=True) 54 | def exp_hawkes_compensators(ℋ_t, 𝛉): 55 | λ, α, β = 𝛉 56 | 57 | Λ = 0 58 | λˣ_prev = λ 59 | t_prev = 0 60 | 61 | Λs = np.empty(len(ℋ_t), dtype=np.float64) 62 | for i, t_i in enumerate(ℋ_t): 63 | Λ += λ * (t_i - t_prev) + ( 64 | (λˣ_prev - λ)/β * 65 | (1 - np.exp(-β*(t_i - t_prev)))) 66 | Λs[i] = Λ 67 | 68 | λˣ_prev = λ + (λˣ_prev - λ) * ( 69 | np.exp(-β * (t_i - t_prev))) + α 70 | t_prev = t_i 71 | return Λs 72 | 73 | 74 | @njit(nogil=True) 75 | def power_hawkes_intensity(t, ℋ_t, 𝛉): 76 | λ, k, c, p = 𝛉 77 | λˣ = λ 78 | for t_i in ℋ_t: 79 | λˣ += k / (c + (t-t_i))**p 80 | return λˣ 81 | 82 | 83 | @njit(nogil=True) 84 | def power_hawkes_compensator(t, ℋ_t, 𝛉): 85 | λ, k, c, p = 𝛉 86 | Λ = λ * t 87 | for t_i in ℋ_t: 88 | Λ += ((k * (c * (c + (t-t_i)))**-p * 89 | (-c**p * (c + (t-t_i)) + c * (c + (t-t_i))**p)) / 90 | (p - 1)) 91 | return Λ 92 | 93 | 94 | @njit(nogil=True, parallel=True) 95 | def power_hawkes_compensators(ℋ_t, 𝛉): 96 | Λs = np.empty(len(ℋ_t), dtype=np.float64) 97 | for i in prange(len(ℋ_t)): 98 | t_i = ℋ_t[i] 99 | ℋ_i = ℋ_t[:i] 100 | Λs[i] = power_hawkes_compensator(t_i, ℋ_i, 𝛉) 101 | return Λs 102 | 103 | 104 | # Likelihood 105 | 106 | def log_likelihood(ℋ_T, T, 𝛉, λˣ, Λ): 107 | ℓ = 0.0 108 | for i, t_i in enumerate(ℋ_T): 109 | ℋ_i = ℋ_T[:i] 110 | λˣ_i = λˣ(t_i, ℋ_i, 𝛉) 111 | ℓ += np.log(λˣ_i) 112 | ℓ -= Λ(T, ℋ_T, 𝛉) 113 | return ℓ 114 | 115 | 116 | @njit(nogil=True, parallel=True) 117 | def power_log_likelihood(ℋ_T, T, 𝛉): 118 | ℓ = 0.0 119 | for i in prange(len(ℋ_T)): 120 | t_i = ℋ_T[i] 121 | ℋ_i = ℋ_T[:i] 122 | λˣ_i = power_hawkes_intensity(t_i, ℋ_i, 𝛉) 123 | ℓ += np.log(λˣ_i) 124 | ℓ -= power_hawkes_compensator(T, ℋ_T, 𝛉) 125 | return ℓ 126 | 127 | 128 | @njit() 129 | def exp_log_likelihood(ℋ_T, T, 𝛉): 130 | λ, α, β = 𝛉 131 | 𝐭 = ℋ_T 132 | N_T = len(𝐭) 133 | 134 | A = np.empty(N_T, dtype=np.float64) 135 | A[0] = 0 136 | for i in range(1, N_T): 137 | A[i] = np.exp(-β*(𝐭[i] - 𝐭[i-1])) * (1 + A[i-1]) 138 | 139 | ℓ = -λ*T 140 | for i, t_i in enumerate(ℋ_T): 141 | ℓ += np.log(λ + α * A[i]) - \ 142 | (α/β) * (1 - np.exp(-β*(T-t_i))) 143 | return ℓ 144 | 145 | 146 | def exp_mle(𝐭, T, 𝛉_start=np.array([1.0, 2.0, 3.0])): 147 | eps = 1e-5 148 | 𝛉_bounds = ((eps, None), (eps, None), (eps, None)) 149 | loss = lambda 𝛉: -exp_log_likelihood(𝐭, T, 𝛉) 150 | 𝛉_mle = minimize(loss, 𝛉_start, bounds=𝛉_bounds).x 151 | return np.array(𝛉_mle) 152 | 153 | 154 | def power_mle(𝐭, T, 𝛉_start=np.array([1.0, 1.0, 2.0, 3.0])): 155 | eps = 1e-5 156 | 𝛉_bounds = ((eps, None), (eps, None), (eps, None), 157 | (1+eps, 100)) 158 | loss = lambda 𝛉: -power_log_likelihood(𝐭, T, 𝛉) 159 | 𝛉_mle = minimize(loss, 𝛉_start, bounds=𝛉_bounds).x 160 | return np.array(𝛉_mle) 161 | 162 | 163 | # Simulation 164 | 165 | 166 | def simulate_inverse_compensator(𝛉, Λ, N): 167 | ℋ = np.empty(N, dtype=np.float64) 168 | 169 | tˣ_1 = -np.log(rnd.rand()) 170 | exp_1 = lambda t_1: Λ(t_1, ℋ[:0], 𝛉) - tˣ_1 171 | 172 | t_1_guess = 1.0 173 | t_1 = fsolve(exp_1, t_1_guess)[0] 174 | 175 | ℋ[0] = t_1 176 | t_prev = t_1 177 | for i in range(1, N): 178 | Δtˣ_i = -np.log(rnd.rand()) 179 | 180 | Λ_i = Λ(t_prev, ℋ, 𝛉) 181 | exp_i = lambda t_next: Λ(t_next, ℋ[:i], 𝛉) - Λ_i - Δtˣ_i 182 | 183 | t_next_guess = t_prev + 1.0 184 | t_next = fsolve(exp_i, t_next_guess)[0] 185 | 186 | ℋ[i] = t_next 187 | t_prev = t_next 188 | return ℋ 189 | 190 | @njit(nogil=True) 191 | def exp_simulate_by_composition(𝛉, N): 192 | λ, α, β = 𝛉 193 | λˣ_k = λ 194 | t_k = 0 195 | 196 | ℋ = np.empty(N, dtype=np.float64) 197 | for k in range(N): 198 | U_1 = rnd.rand() 199 | U_2 = rnd.rand() 200 | 201 | # Technically the following works, but without @njit 202 | # it will print out "RuntimeWarning: invalid value encountered in log". 203 | # This is because 1 + β/(λˣ_k + α - λ)*np.log(U_2) can be negative 204 | # so T_2 can be np.NaN. The Dassios & Zhao (2013) algorithm checks if this 205 | # expression is negative and handles it separately, though the lines 206 | # below have the same behaviour as t_k = min(T_1, np.NaN) will be T_1. 207 | T_1 = t_k - np.log(U_1) / λ 208 | T_2 = t_k - np.log(1 + β/(λˣ_k + α - λ)*np.log(U_2))/β 209 | 210 | t_prev = t_k 211 | t_k = min(T_1, T_2) 212 | ℋ[k] = t_k 213 | 214 | if k > 0: 215 | λˣ_k = λ + (λˣ_k + α - λ) * ( 216 | np.exp(-β * (t_k - t_prev))) 217 | else: 218 | λˣ_k = λ 219 | 220 | return ℋ 221 | 222 | 223 | @njit(nogil=True) 224 | def exp_simulate_by_thinning(𝛉, T): 225 | λ, α, β = 𝛉 226 | 227 | λˣ = λ 228 | times = [] 229 | 230 | t = 0 231 | 232 | while True: 233 | M = λˣ 234 | Δt = rnd.exponential() / M 235 | t += Δt 236 | if t > T: 237 | break 238 | 239 | λˣ = λ + (λˣ - λ) * np.exp(-β * Δt) 240 | 241 | u = M * rnd.rand() 242 | if u > λˣ: 243 | continue # This potential arrival is 'thinned' out 244 | 245 | times.append(t) 246 | λˣ += α 247 | 248 | return np.array(times) 249 | 250 | 251 | @njit(nogil=True) 252 | def power_simulate_by_thinning(𝛉, T): 253 | λ, k, c, p = 𝛉 254 | 255 | λˣ = λ 256 | times = [] 257 | 258 | t = 0 259 | 260 | while True: 261 | M = λˣ 262 | Δt = rnd.exponential() / M 263 | t += Δt 264 | if t > T: 265 | break 266 | 267 | λˣ = power_hawkes_intensity(t, np.array(times), 𝛉) 268 | 269 | u = M * rnd.rand() 270 | if u > λˣ: 271 | continue # This potential arrival is 'thinned' out 272 | 273 | times.append(t) 274 | λˣ += k / (c ** p) 275 | 276 | return np.array(times) 277 | 278 | 279 | # Moment matching 280 | 281 | 282 | def empirical_moments(𝐭, T, τ, lag): 283 | bins = np.arange(0, T, τ) 284 | N = len(bins) - 1 285 | count = np.zeros(N) 286 | 287 | for i in range(N): 288 | count[i] = np.sum((bins[i] <= 𝐭) & (𝐭 < bins[i+1])) 289 | 290 | empMean = np.mean(count) 291 | empVar = np.std(count)**2 292 | empAutoCov = np.mean((count[:-lag] - empMean) \ 293 | * (count[lag:] - empMean)) 294 | 295 | return np.array([empMean, empVar, empAutoCov]).reshape(3,1) 296 | 297 | 298 | 299 | def exp_moments(𝛉, τ, lag): 300 | """ 301 | Consider an exponential Hawkes process with parameter 𝛉. 302 | Look at intervals of length τ, i.e. N(t+τ) - N(t). 303 | Calculate the limiting (t->∞) mean and variance. 304 | Also, get the limiting autocovariance: 305 | E[ (N(t + τ) - N(t)) (N(t + lag*τ + τ) - N(t + lag*τ)) ]. 306 | """ 307 | λ, α, β = 𝛉 308 | κ = β - α 309 | δ = lag*τ 310 | 311 | mean = (λ*β/κ)*τ 312 | var = (λ*β/κ)*(τ*(β/κ) + (1 - β/κ)*((1 - np.exp(-κ*τ))/κ)) 313 | autoCov = (λ*β*α*(2*β-α)*(np.exp(-κ*τ) - 1)**2/(2*κ**4)) \ 314 | *np.exp(-κ*δ) 315 | 316 | return np.array([mean, var, autoCov]).reshape(3,1) 317 | 318 | 319 | def exp_gmm_loss(𝛉, τ, lag, empMoments, W): 320 | moments = exp_moments(𝛉, τ, lag) 321 | 𝐠 = empMoments - moments 322 | return (𝐠.T).dot(W).dot(𝐠)[0,0] 323 | 324 | def exp_gmm(𝐭, T, τ=5, lag=5, iters=2, 𝛉_start=np.array([1.0, 2.0, 3.0])): 325 | empMoments = empirical_moments(𝐭, T, τ, lag) 326 | 327 | W = np.eye(3) 328 | bounds = ((0, None), (0, None), (0, None)) 329 | 330 | 𝛉 = minimize(exp_gmm_loss, x0=𝛉_start, 331 | args=(τ, lag, empMoments, W), 332 | bounds=bounds).x 333 | 334 | for i in range(iters): 335 | moments = exp_moments(𝛉, τ, lag) 336 | 337 | 𝐠 = empMoments - moments 338 | S = 𝐠.dot(𝐠.T) 339 | 340 | W = np.linalg.inv(S) 341 | W /= np.max(W) # Avoid overflow of the loss function 342 | 343 | 𝛉 = minimize(exp_gmm_loss, x0=𝛉, 344 | args=(τ, lag, empMoments, W), 345 | bounds=bounds).x 346 | 347 | return 𝛉 348 | 349 | 350 | # Fit EM 351 | 352 | 353 | @njit(nogil=True, parallel=True) 354 | def em_responsibilities(𝐭, 𝛉): 355 | λ, α, β = 𝛉 356 | 357 | N = len(𝐭) 358 | resp = np.empty((N,N), dtype=np.float64) 359 | 360 | for i in prange(0,N): 361 | if i == 0: 362 | resp[i, 0] = 1.0 363 | for j in range(1, N): 364 | resp[i, j] = 0.0 365 | else: 366 | resp[i, 0] = λ 367 | rowSum = λ 368 | 369 | for j in range(1, i+1): 370 | resp[i, j] = α*np.exp(-β*(𝐭[i] - 𝐭[j-1])) 371 | rowSum += resp[i, j] 372 | 373 | for j in range(0, i+1): 374 | resp[i, j] /= rowSum 375 | 376 | for j in range(i+1, N): 377 | resp[i, j] = 0.0 378 | return resp 379 | 380 | 381 | def exp_em(𝐭, T, 𝛉_start=np.array([1.0, 2.0, 3.0]), iters=100, verbosity=None, calcLikelihoods=False): 382 | """ 383 | Run an EM fit on the '𝐭' arrival times up until final time 'T'. 384 | """ 385 | 𝛉 = 𝛉_start.copy() 386 | 387 | llIterations = np.zeros(iters) 388 | iters = tqdm(range(iters)) if verbosity else range(iters) 389 | 390 | for i in iters: 391 | 𝛉, ll = exp_em_iter(𝐭, T, 𝛉, calcLikelihoods) 392 | llIterations[i] = ll 393 | 394 | if verbosity and i % verbosity == 0: 395 | print(𝛉[0], 𝛉[1], 𝛉[2]) 396 | 397 | if calcLikelihoods: 398 | return 𝛉, llIterations 399 | else: 400 | return 𝛉 401 | 402 | 403 | @njit(nogil=True, parallel=True) 404 | def exp_em_iter(𝐭, T, 𝛉, calcLikelihoods): 405 | λ, α, β = 𝛉 406 | N = len(𝐭) 407 | 408 | # E step 409 | resp = em_responsibilities(𝐭, 𝛉) 410 | 411 | # M step: Update λ 412 | λ = np.sum(resp[:,0])/T 413 | 414 | # M step: Update α 415 | numer = np.sum(resp[:,1:]) 416 | denom = np.sum(1 - np.exp(-β*(T - 𝐭))) 417 | α = β*numer/denom 418 | 419 | # M step: Update β 420 | numer = np.sum(1 - np.exp(-β*(T - 𝐭)))/β - np.sum((T - 𝐭)*np.exp(-β*(T - 𝐭))) 421 | 422 | denom = 0 423 | for j in prange(1, N): 424 | denom += np.sum((𝐭[j] - 𝐭[:j])*resp[j,1:j+1]) 425 | 426 | β = α*numer/denom 427 | 428 | if calcLikelihoods: 429 | ll = exp_log_likelihood(𝐭, T, 𝛉) 430 | else: 431 | ll = 0.0 432 | 433 | 𝛉[0] = λ 434 | 𝛉[1] = α 435 | 𝛉[2] = β 436 | 437 | return 𝛉, ll 438 | 439 | 440 | ## Mutually exciting Hawkes with exponential decay 441 | @njit() 442 | def mutual_hawkes_intensity(t, ℋ_t, 𝛉): 443 | """ 444 | Each μ[i] is an m-vector-valued function, which takes as argument 445 | the time passed since an arrival to process i, and returns the 446 | lasting effect on each of the m processes 447 | """ 448 | λ, μ = 𝛉 449 | 450 | λˣ = λ 451 | for (t_i, d_i) in ℋ_t: 452 | λˣ += μ[d_i](t - t_i) 453 | return λˣ 454 | 455 | 456 | @njit(nogil=True) 457 | def mutual_exp_hawkes_intensity(t, times, ids, 𝛉): 458 | """ 459 | The λ is an m-vector which shows the starting intensity for 460 | each process. 461 | 462 | Each α[i] is an m-vector which shows the jump in intensity 463 | for each of the processes when an arrival comes to process i. 464 | 465 | The β is an m-vector which shows the intensity decay rates for 466 | each processes intensity. 467 | """ 468 | λ, α, β = 𝛉 469 | 470 | λˣ = λ.copy() 471 | for (t_i, d_i) in zip(times, ids): 472 | λˣ += α[d_i] * np.exp(-β * (t - t_i)) 473 | 474 | return λˣ 475 | 476 | 477 | @njit(nogil=True) 478 | def mutual_exp_hawkes_compensator(t, times, ids, 𝛉): 479 | """ 480 | The λ is an m-vector which shows the starting intensity for 481 | each process. 482 | 483 | Each α[i] is an m-vector which shows the jump in intensity 484 | for each of the processes when an arrival comes to process i. 485 | 486 | The β is an m-vector which shows the intensity decay rates for 487 | each processes intensity. 488 | """ 489 | # if t <= 0: return np.zeros(m) 490 | 491 | λ, α, β = 𝛉 492 | 493 | Λ = λ * t 494 | 495 | for (t_i, d_i) in zip(times, ids): 496 | # Λ += M(t - t_i, d_i) 497 | Λ += (α[d_i]/β) * (1 - np.exp(-β*(t - t_i))) 498 | return Λ 499 | 500 | 501 | @njit(nogil=True) 502 | def mutual_exp_hawkes_compensators(times, ids, 𝛉): 503 | """ 504 | The λ is an m-vector which shows the starting intensity for 505 | each process. 506 | 507 | Each α[i] is an m-vector which shows the jump in intensity 508 | for each of the processes when an arrival comes to process i. 509 | 510 | The β is an m-vector which shows the intensity decay rates for 511 | each processes intensity. 512 | """ 513 | 514 | λ, α, β = 𝛉 515 | m = len(λ) 516 | 517 | Λ = np.zeros(m) 518 | λˣ_prev = λ 519 | t_prev = 0 520 | 521 | Λs = np.zeros((len(times), m), dtype=np.float64) 522 | 523 | for i in range(len(times)): 524 | t_i = times[i] 525 | d_i = ids[i] 526 | 527 | Λ += λ * (t_i - t_prev) + (λˣ_prev - λ)/β * (1 - np.exp(-β*(t_i - t_prev))) 528 | Λs[i,:] = Λ 529 | 530 | λˣ_prev = λ + (λˣ_prev - λ) * np.exp(-β * (t_i - t_prev)) + α[d_i,:] 531 | t_prev = t_i 532 | 533 | return Λs 534 | 535 | 536 | @njit(nogil=True) 537 | def mutual_log_likelihood(ℋ_T, T, 𝛉, λˣ, Λ): 538 | m = len(𝛉) 539 | ℓ = 0 540 | for (t_i, d_i) in ℋ_T: 541 | if t_i > T: 542 | raise RuntimeError("T is too small for this data") 543 | 544 | # Get the history of arrivals before time t_i 545 | ℋ_i = [(t_s, d_s) for (t_s, d_s) in ℋ_T if t_s < t_i] 546 | λˣ_i = λˣ(t_i, ℋ_i, 𝛉) 547 | ℓ += np.log(λˣ_i[d_i]) 548 | 549 | ℓ -= np.sum(Λ(T, ℋ_T, 𝛉)) 550 | return ℓ 551 | 552 | 553 | @njit(nogil=True) 554 | def mutual_exp_log_likelihood(times, ids, T, 𝛉): 555 | if np.max(times) > T: 556 | raise RuntimeError("T is too small for this data") 557 | 558 | λ, α, β = 𝛉 559 | 560 | if np.min(λ) <= 0 or np.min(α) < 0 or np.min(β) <= 0: return -np.inf 561 | 562 | ℓ = 0 563 | λˣ = 𝛉[0] 564 | 565 | t_prev = 0 566 | for t_i, d_i in zip(times, ids): 567 | λˣ = λ + (λˣ - λ) * np.exp(-β * (t_i - t_prev)) 568 | ℓ += np.log(λˣ[d_i]) 569 | 570 | λˣ += α[d_i,:] 571 | t_prev = t_i 572 | 573 | ℓ -= np.sum(mutual_exp_hawkes_compensator(T, times, ids, 𝛉)) 574 | 575 | return ℓ 576 | 577 | 578 | def mutual_exp_simulate_by_thinning(𝛉, T): 579 | 580 | """ 581 | The λ is an m-vector which shows the starting intensity for 582 | each process. 583 | 584 | Each α[i] is an m-vector which shows the jump in intensity 585 | for each of the processes when an arrival comes to process i. 586 | 587 | The β is an m-vector which shows the intensity decay rates for 588 | each processes intensity. 589 | """ 590 | λ, α, β = 𝛉 591 | m = len(λ) 592 | 593 | λˣ = λ 594 | times = [] 595 | 596 | t = 0 597 | 598 | while True: 599 | M = np.sum(λˣ) 600 | Δt = rnd.exponential() / M 601 | t += Δt 602 | if t > T: 603 | break 604 | 605 | λˣ = λ + (λˣ - λ) * np.exp(-β * Δt) 606 | 607 | u = M * rnd.rand() 608 | if u > np.sum(λˣ): 609 | continue # No arrivals (they are 'thinned' out) 610 | 611 | cumulativeλˣ = 0 612 | 613 | for i in range(m): 614 | cumulativeλˣ += λˣ[i] 615 | if u < cumulativeλˣ: 616 | times.append((t, i)) 617 | λˣ += α[i] 618 | break 619 | 620 | return times 621 | 622 | 623 | def flatten_theta(𝛉): 624 | return np.hstack([𝛉[0], np.hstack(𝛉[1]), 𝛉[2]]) 625 | 626 | 627 | def unflatten_theta(𝛉_flat, m): 628 | λ = 𝛉_flat[:m] 629 | α = 𝛉_flat[m:(m + m**2)].reshape((m,m)) 630 | β = 𝛉_flat[(m + m**2):] 631 | 632 | return (λ, α, β) 633 | 634 | 635 | def mutual_exp_mle(𝐭, ids, T, 𝛉_start): 636 | 637 | m = len(𝛉_start[0]) 638 | 𝛉_start_flat = flatten_theta(𝛉_start) 639 | 640 | def loss(𝛉_flat): 641 | return -mutual_exp_log_likelihood(𝐭, ids, T, unflatten_theta(𝛉_flat, m)) 642 | 643 | def print_progress(𝛉_i, itCount = []): 644 | itCount.append(None) 645 | i = len(itCount) 646 | 647 | if i % 100 == 0: 648 | ll = -loss(𝛉_i) 649 | print(f"Iteration {i} loglikelihood {ll:.2f}") 650 | 651 | res = minimize(loss, 𝛉_start_flat, options={"disp": True, "maxiter": 100_000}, 652 | callback = print_progress, method = 'Nelder-Mead') 653 | 654 | 𝛉_mle = unflatten_theta(res.x, m) 655 | logLike = -res.fun 656 | 657 | return 𝛉_mle, logLike 658 | 659 | 660 | # More advanced MLE methods for the exponential case 661 | 662 | 663 | @njit() 664 | def ozaki_recursion(𝐭, 𝛉, n): 665 | """ 666 | Calculate sum_{j=1}^{i-1} t_j^n * exp(-β * (t_i - t_j)) recursively 667 | """ 668 | λ, α, β = 𝛉 669 | N_T = len(𝐭) 670 | 671 | A_n = np.empty(N_T, dtype=np.float64) 672 | A_n[0] = 0 673 | for i in range(1, N_T): 674 | A_n[i] = np.exp(-β*(𝐭[i] - 𝐭[i-1])) * (𝐭[i-1]**n + A_n[i-1]) 675 | 676 | return A_n 677 | 678 | 679 | @njit() 680 | def deriv_exp_log_likelihood(ℋ_T, T, 𝛉): 681 | λ, α, β = 𝛉 682 | 683 | 𝐭 = ℋ_T 684 | N_T = len(𝐭) 685 | 686 | A = ozaki_recursion(𝐭, 𝛉, 0) 687 | A_1 = ozaki_recursion(𝐭, 𝛉, 1) 688 | 689 | B = np.empty(N_T, dtype=np.float64) 690 | B[0] = 0 691 | 692 | for i in range(1, N_T): 693 | B[i] = 𝐭[i] * A[i] - A_1[i] 694 | 695 | dℓdλ = -T 696 | dℓdα = 0 697 | dℓdβ = 0 698 | 699 | for i, t_i in enumerate(ℋ_T): 700 | dℓdα += (1/β) * (np.exp(-β*(T-t_i)) - 1) + A[i] / (λ + α * A[i]) 701 | dℓdβ += -α * ( (1/β) * (T - t_i) * np.exp(-β*(T-t_i)) \ 702 | + (1/β**2) * (np.exp(-β*(T-t_i))-1) ) \ 703 | - (α * B[i] / (λ + α * A[i])) 704 | dℓdλ += 1 / (λ + α * A[i]) 705 | 706 | d = np.empty(3, dtype=np.float64) 707 | d[0] = dℓdλ 708 | d[1] = dℓdα 709 | d[2] = dℓdβ 710 | return d 711 | 712 | 713 | @njit() 714 | def hess_exp_log_likelihood(ℋ_T, T, 𝛉): 715 | λ, α, β = 𝛉 716 | 717 | 𝐭 = ℋ_T 718 | N_T = len(𝐭) 719 | 720 | A = ozaki_recursion(𝐭, 𝛉, 0) 721 | A_1 = ozaki_recursion(𝐭, 𝛉, 1) 722 | A_2 = ozaki_recursion(𝐭, 𝛉, 2) 723 | 724 | # B is sum (t_i - t_j) * exp(- ...) 725 | # C is sum (t_i - t_j)**2 * exp(- ...) 726 | B = np.empty(N_T, dtype=np.float64) 727 | C = np.empty(N_T, dtype=np.float64) 728 | B[0] = 0 729 | C[0] = 0 730 | 731 | for i in range(1, N_T): 732 | B[i] = 𝐭[i] * A[i] - A_1[i] 733 | C[i] = 𝐭[i]**2 * A[i] - 2*𝐭[i]*A_1[i] + A_2[i] 734 | 735 | d2ℓdα2 = 0 736 | d2ℓdαdβ = 0 737 | d2ℓdβ2 = 0 738 | 739 | d2ℓdλ2 = 0 740 | d2ℓdαdλ = 0 741 | d2ℓdβdλ = 0 742 | 743 | for i, t_i in enumerate(ℋ_T): 744 | d2ℓdα2 += - ( A[i] / (λ + α * A[i]) )**2 745 | d2ℓdαdβ += - ( (1/β) * (T - t_i) * np.exp(-β*(T-t_i)) \ 746 | + (1/β**2) * (np.exp(-β*(T-t_i))-1) ) \ 747 | + ( -B[i]/(λ + α * A[i]) + (α * A[i] * B[i]) / (λ + α * A[i])**2 ) 748 | 749 | d2ℓdβ2 += α * ( (1/β) * (T - t_i)**2 * np.exp(-β*(T-t_i)) + \ 750 | (2/β**2) * (T - t_i) * np.exp(-β*(T-t_i)) + \ 751 | (2/β**3) * (np.exp(-β*(T-t_i)) - 1) ) + \ 752 | ( α*C[i] / (λ + α * A[i]) - (α*B[i] / (λ + α * A[i]))**2 ) 753 | 754 | 755 | d2ℓdλ2 += -1 / (λ + α * A[i])**2 756 | d2ℓdαdλ += -A[i] / (λ + α * A[i])**2 757 | d2ℓdβdλ += α * B[i] / (λ + α * A[i])**2 758 | 759 | H = np.empty((3,3), dtype=np.float64) 760 | H[0,0] = d2ℓdλ2 761 | H[1,1] = d2ℓdα2 762 | H[2,2] = d2ℓdβ2 763 | H[0,1] = H[1,0] = d2ℓdαdλ 764 | H[0,2] = H[2,0] = d2ℓdβdλ 765 | H[1,2] = H[2,1] = d2ℓdαdβ 766 | return H 767 | 768 | 769 | def exp_mle_with_grad(𝐭, T, 𝛉_start=np.array([1.0, 2.0, 3.0])): 770 | eps = 1e-5 771 | 𝛉_bounds = ((eps, None), (eps, None), (eps, None)) 772 | loss = lambda 𝛉: -exp_log_likelihood(𝐭, T, 𝛉) 773 | grad = lambda 𝛉: -deriv_exp_log_likelihood(𝐭, T, 𝛉) 774 | 𝛉_mle = minimize(loss, 𝛉_start, bounds=𝛉_bounds, jac=grad).x 775 | 776 | return 𝛉_mle 777 | 778 | 779 | def exp_mle_with_hess(𝐭, T, 𝛉_start=np.array([1.0, 2.0, 3.0])): 780 | eps = 1e-5 781 | 𝛉_bounds = ((eps, None), (eps, None), (eps, None)) 782 | loss = lambda 𝛉: -exp_log_likelihood(𝐭, T, 𝛉) 783 | grad = lambda 𝛉: -deriv_exp_log_likelihood(𝐭, T, 𝛉) 784 | hess = lambda 𝛉: -hess_exp_log_likelihood(𝐭, T, 𝛉) 785 | 𝛉_mle = minimize(loss, 𝛉_start, bounds=𝛉_bounds, jac=grad, hess=hess, 786 | method="trust-constr").x 787 | 788 | return 𝛉_mle 789 | 790 | 791 | # Alternative simulation method 792 | 793 | 794 | @njit(nogil=True) 795 | def exp_simulate_by_composition_alt(𝛉, T): 796 | """ 797 | This is simply an alternative to 'exp_simulate_by_composition' 798 | where the simulation stops after time T rather than stopping after 799 | observing N arrivals. 800 | """ 801 | λ, α, β = 𝛉 802 | λˣ_k = λ 803 | t_k = 0 804 | 805 | ℋ = [] 806 | while t_k < T: 807 | U_1 = rnd.rand() 808 | U_2 = rnd.rand() 809 | 810 | # Technically the following works, but without @njit 811 | # it will print out "RuntimeWarning: invalid value encountered in log". 812 | # This is because 1 + β/(λˣ_k + α - λ)*np.log(U_2) can be negative 813 | # so T_2 can be np.NaN. The Dassios & Zhao (2013) algorithm checks if this 814 | # expression is negative and handles it separately, though the lines 815 | # below have the same behaviour as t_k = min(T_1, np.NaN) will be T_1. 816 | T_1 = t_k - np.log(U_1) / λ 817 | T_2 = t_k - np.log(1 + β/(λˣ_k + α - λ)*np.log(U_2))/β 818 | 819 | t_prev = t_k 820 | t_k = min(T_1, T_2) 821 | ℋ.append(t_k) 822 | 823 | if len(ℋ) > 1: 824 | λˣ_k = λ + (λˣ_k + α - λ) * ( 825 | np.exp(-β * (t_k - t_prev))) 826 | else: 827 | λˣ_k = λ 828 | 829 | return np.array(ℋ[:-1]) 830 | --------------------------------------------------------------------------------