New Research: Supply Chain Attack on Axios Pulls Malicious Dependency from npm.Details →
Socket
Book a DemoSign in
Socket

filterpy

Package Overview
Dependencies
Maintainers
1
Versions
47
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

filterpy - pypi Package Compare versions

Comparing version
1.2.4
to
1.3
+5
-1
filterpy.egg-info/PKG-INFO
Metadata-Version: 1.1
Name: filterpy
Version: 1.2.4
Version: 1.3
Summary: Kalman filtering and optimal estimation library

@@ -129,2 +129,6 @@ Home-page: https://github.com/rlabbe/filterpy

Full documentation is at
https://filterpy.readthedocs.io/en/latest/
First, import the filters and helper functions.

@@ -131,0 +135,0 @@

+0
-1

@@ -21,3 +21,2 @@ LICENSE

filterpy/examples/GetRadar.py
filterpy/examples/RadarEKF.py
filterpy/examples/RadarUKF.py

@@ -24,0 +23,0 @@ filterpy/examples/__init__.py

@@ -17,2 +17,2 @@ # -*- coding: utf-8 -*-

__version__ = "1.2.4"
__version__ = "1.3"

@@ -0,1 +1,20 @@

Version 1.3
=============
* #113 added plotting of 3D covariance ellipsoid with plot_3d_covariance
* Fixed bug where multivariate_gaussian accepted negative covariances
* #108 used pylint symbolic names
* Got code in compliance with pylint
* Fixed #105 - this was just test code, and it turns out the code as was was correct, so I deleted the second, unhit return
* #88 fixed HInfinity.batch_filter to not use R
Version 1.2.5
=============
#102 - Bug: UKF was using slow code path when using np.subtract.
Version 1.2.4

@@ -2,0 +21,0 @@ =============

@@ -15,10 +15,11 @@ """Copyright 2015 Roger R Labbe Jr.

"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__=["helpers", "discretization", "kinematic"]
#pylint:disable=wildcard-import
from __future__ import absolute_import
__all__ = ["helpers", "discretization", "kinematic"]
from .helpers import *
from .discretization import *
from .kinematic import *

@@ -17,2 +17,3 @@ # -*- coding: utf-8 -*-

#pylint:disable=invalid-name, bad-whitespace

@@ -24,4 +25,5 @@

from numpy import array, zeros, vstack, eye
from scipy.linalg import expm, inv, block_diag
from numpy import zeros, vstack, eye
from numpy.linalg import inv
from scipy.linalg import expm, block_diag

@@ -76,3 +78,5 @@

assert dim == 2 or dim == 3 or dim == 4
if not (dim == 2 or dim == 3 or dim == 4):
assert ValueError("dim must be between 2 and 4")
if dim == 2:

@@ -134,3 +138,5 @@ Q = [[.25*dt**4, .5*dt**3],

assert dim == 2 or dim == 3 or dim == 4
if not (dim == 2 or dim == 3 or dim == 4):
assert ValueError("dim must be between 2 and 4")
if dim == 2:

@@ -154,4 +160,4 @@ Q = [[(dt**3)/3., (dt**2)/2.],

def van_loan_discretization(F, G, dt):
""" Discretizes a linear differential equation which includes white noise
"""
Discretizes a linear differential equation which includes white noise
according to the method of C. F. van Loan [1]. Given the continuous

@@ -169,23 +175,23 @@ model

Given y'' + y = 2u(t), we create the continuous state model of
Given y'' + y = 2u(t), we create the continuous state model of
x' = [ 0 1] * x + [0]*u(t)
[-1 0] [2]
x' = [ 0 1] * x + [0]*u(t)
[-1 0] [2]
and a time step of 0.1:
and a time step of 0.1:
>>> F = np.array([[0,1],[-1,0]], dtype=float)
>>> G = np.array([[0.],[2.]])
>>> phi, Q = van_loan_discretization(F, G, 0.1)
>>> F = np.array([[0,1],[-1,0]], dtype=float)
>>> G = np.array([[0.],[2.]])
>>> phi, Q = van_loan_discretization(F, G, 0.1)
>>> phi
array([[ 0.99500417, 0.09983342],
[-0.09983342, 0.99500417]])
>>> phi
array([[ 0.99500417, 0.09983342],
[-0.09983342, 0.99500417]])
>>> Q
array([[ 0.00133067, 0.01993342],
[ 0.01993342, 0.39866933]])
>>> Q
array([[ 0.00133067, 0.01993342],
[ 0.01993342, 0.39866933]])
(example taken from Brown[2])
(example taken from Brown[2])

@@ -224,3 +230,3 @@

def linear_ode_discretation(F, L=None, Q=None, dt=1):
def linear_ode_discretation(F, L=None, Q=None, dt=1.):
n = F.shape[0]

@@ -232,5 +238,5 @@

if Q is None:
Q = zeros((n,n))
Q = zeros((n, n))
A = expm(F*dt)
A = expm(F * dt)

@@ -243,17 +249,10 @@ phi = zeros((2*n, 2*n))

zo = vstack((zeros((n,n)), eye(n)))
zo = vstack((zeros((n, n)), eye(n)))
CD = expm(phi*dt).dot(zo)
C = CD[0:n,:]
D = CD[n:2*n,:]
C = CD[0:n, :]
D = CD[n:2*n, :]
q = C.dot(inv(D))
return (A, q)
# -*- coding: utf-8 -*-
#pylint: disable=invalid-name

@@ -19,3 +20,2 @@ """Copyright 2015 Roger R Labbe Jr.

def runge_kutta4(y, x, dx, f):

@@ -47,1 +47,70 @@ """computes 4th order Runge-Kutta for dy/dx.

def pretty_str(label, arr):
"""
Generates a pretty printed NumPy array with an assignment. Optionally
transposes column vectors so they are drawn on one line. Strictly speaking
arr can be any time convertible by `str(arr)`, but the output may not
be what you want if the type of the variable is not a scalar or an
ndarray.
Examples
--------
>>> pprint('cov', np.array([[4., .1], [.1, 5]]))
cov = [[4. 0.1]
[0.1 5. ]]
>>> print(pretty_str('x', np.array([[1], [2], [3]])))
x = [[1 2 3]].T
"""
def is_col(a):
""" return true if a is a column vector"""
try:
return a.shape[0] > 1 and a.shape[1] == 1
except (AttributeError, IndexError):
return False
transposed = False
if is_col(arr):
arr = arr.T
transposed = True
rows = str(arr).split('\n')
if not rows:
return ''
if label is None:
label = ''
if label:
label += ' = '
s = label + rows[0]
if transposed:
s += '.T'
return s
pad = ' ' * len(label)
for line in rows[1:]:
s = s + '\n' + pad + line
return s
def pprint(label, arr, **kwargs):
""" pretty prints an NumPy array using the function pretty_str. Keyword
arguments are passed to the print() function.
See Also
--------
pretty_str
Examples
--------
>>> pprint('cov', np.array([[4., .1], [.1, 5]]))
cov = [[4. 0.1]
[0.1 5. ]]
"""
print(pretty_str(label, arr), **kwargs)
# -*- coding: utf-8 -*-
#pylint: disable=invalid-name

@@ -17,15 +18,20 @@ """Copyright 2018 Roger R Labbe Jr.

"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import numpy as np
import scipy as sp
from filterpy.kalman import KalmanFilter
from scipy.linalg import block_diag
import filterpy
def kinematic_state_transition(order, dt):
""" create a state transition matrix of a given order for a given time
"""
create a state transition matrix of a given order for a given time
step `dt`.
"""
assert order >= 0 and int(order) == order, "order must be an int >= 0"
if not(order >= 0 and int(order) == order):
raise ValueError("order must be an int >= 0")
# hard code common cases for computational efficiency

@@ -40,3 +46,3 @@ if order == 0:

[0., 1., dt],
[0., 1., dt]])
[0., 0., 1.]])

@@ -58,10 +64,25 @@ # grind it out computationally....

def kinematic_kf(dim, order, dt=1., order_by_dim=True):
""" Returns a KalmanFilter using newtonian kinematics for an arbitrary
number of dimensions and order. So, for example, a constant velocity
filter in 3D space would be created with
def kinematic_kf(dim, order, dt=1., dim_z=1, order_by_dim=True):
"""
Returns a KalmanFilter using newtonian kinematics of arbitrary order
for any number of dimensions. For example, a constant velocity filter
in 3D space would have order 1 dimension 3.
kinematic_kf(3, 1)
Examples
--------
A constant velocity filter in 3D space with delta time = .2 seconds
would be created with
>>> kf = kinematic_kf(dim=3, order=1, dt=.2)
>>> kf.F
>>> array([[1. , 0.2, 0. , 0. , 0. , 0. ],
[0. , 1. , 0. , 0. , 0. , 0. ],
[0. , 0. , 1. , 0.2, 0. , 0. ],
[0. , 0. , 0. , 1. , 0. , 0. ],
[0. , 0. , 0. , 0. , 1. , 0.2],
[0. , 0. , 0. , 0. , 0. , 1. ]])
which will set the state `x` to be interpreted as

@@ -71,3 +92,3 @@

If you set `order_by_dim` to False, then `x` is assumed to be
If you set `order_by_dim` to False, then `x` is ordered as

@@ -78,3 +99,3 @@ [x y z x' y' z'].T

kinematic_kf(2, 3)
>> kinematic_kf(2, 3)

@@ -89,10 +110,19 @@

>>> kf = kinematic_kf(2, 1, dt=3.0)
>>> kf.F
array([[1., 3., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 3.],
[0., 0., 0., 1.]])
Parameters
----------
dim : int
number of dimensions
dim : int, >= 1
number of dimensions (2D space would be dim=2)
order : int, >= 1
order of the filter. 2 would be a const acceleration model.
order : int, >= 0
order of the filter. 2 would be a const acceleration model with
a stat

@@ -105,12 +135,27 @@ dim_z : int, default 1

order_by_dim : bool, default=True
Defines ordering of variables in the state vector. `True` orders
by keeping all derivatives of each dimensions)
[x x' x'' y y' y'']
whereas `False` interleaves the dimensions
[x y z x' y' z' x'' y'' z'']
"""
if dim < 1:
raise ValueError("dim must be >= 1")
if order < 0:
raise ValueError("order must be >= 0")
if dim_z < 1:
raise ValueError("dim_z must be >= 1")
dim_x = order + 1
kf = KalmanFilter(dim_x=dim * dim_x, dim_z=dim)
kf = filterpy.kalman.KalmanFilter(dim_x=dim * dim_x, dim_z=dim)
F = kinematic_state_transition(order, dt)
if order_by_dim:
diag = [F] * dim
kf.F = sp.linalg.block_diag(*diag)
kf.F = block_diag(*diag)

@@ -122,3 +167,3 @@ else:

ix, iy = (i // dim_x) * dim, (i % dim_x) * dim
ix, iy = (i // dim_x) * dim, (i % dim_x) * dim
kf.F[ix:ix+dim, iy:iy+dim] = f

@@ -135,11 +180,8 @@

if __name__ == "__main__":
kf = kinematic_kf(2, 1, dt = 3, order_by_dim=False)
print(kf.F)
_kf = kinematic_kf(2, 1, dt=3., order_by_dim=False)
print(_kf.F)
print('\n\n')
kf = kinematic_kf(3, 1, dt = 3, order_by_dim=False)
print(kf.F)
_kf = kinematic_kf(3, 1, dt=3., order_by_dim=False)
print(_kf.F)

@@ -17,5 +17,8 @@ # -*- coding: utf-8 -*-

#pylint: disable=wildcard-import
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__=["discrete_bayes"]
__all__ = ["discrete_bayes"]
from .discrete_bayes import *

@@ -126,4 +126,4 @@ # -*- coding: utf-8 -*-

return convolve(np.roll(pdf, offset), kernel, mode='wrap')
else:
return convolve(shift(pdf, offset, cval=cval), kernel,
cval=cval, mode='constant')
return convolve(shift(pdf, offset, cval=cval), kernel,
cval=cval, mode='constant')
# -*- coding: utf-8 -*-
#pylint: disable=wildcard-import
""" Contains various example, mostly very outdated now."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__=["radar_sim"]
__all__ = ["radar_sim"]
from .radar_sim import *

@@ -16,3 +16,6 @@ # -*- coding: utf-8 -*-

"""
#pylint: skip-file
import math

@@ -34,3 +37,6 @@ from numpy import array, asarray

This is very old code; it no longer runs due to changes in the UKF
"""
dt = 0.1

@@ -37,0 +43,0 @@ y = 20

@@ -17,2 +17,3 @@

#pylint: disable=invalid-name
from __future__ import (absolute_import, division, print_function,

@@ -26,3 +27,3 @@ unicode_literals)

def GetRadar(dt):
def get_radar(dt):
""" Simulate radar range to object at 1K altidue and moving at 100m/s.

@@ -33,12 +34,12 @@ Adds about 5% measurement noise. Returns slant range to the object.

if not hasattr (GetRadar, "posp"):
GetRadar.posp = 0
if not hasattr(get_radar, "posp"):
get_radar.posp = 0
vel = 100 + .5 * randn()
alt = 1000 + 10 * randn()
pos = GetRadar.posp + vel*dt
pos = get_radar.posp + vel*dt
v = 0 + pos* 0.05*randn()
slant_range = math.sqrt (pos**2 + alt**2) + v
GetRadar.posp = pos
slant_range = math.sqrt(pos**2 + alt**2) + v
get_radar.posp = pos

@@ -49,3 +50,3 @@ return slant_range

if __name__ == "__main__":
for i in range (100):
print(GetRadar (0.1))
for i in range(100):
print(get_radar(0.1))

@@ -18,8 +18,11 @@ # -*- coding: utf-8 -*-

#pylint: skip-file
from numpy.random import randn
from math import radians, atan2
class RadarSim(object):
""" Simulates the radar signal returns from an object flying
"""
Simulates the radar signal returns from an object flying
at a constant altityude and velocity in 1D. Assumes random

@@ -38,8 +41,9 @@ process noise - altitude and velocity varies a bit for each call.

def get_range(self, process_err_pct=0.05):
""" Returns slant range to the object. Call once for each
"""
Returns slant range to the object. Call once for each
new measurement at dt time from last call.
"""
vel = self.vel + 5*randn()
alt = self.alt + 10*randn()
vel = self.vel + 5 * randn()
alt = self.alt + 10 * randn()

@@ -52,10 +56,1 @@ self.pos += vel*self.dt

return slant_range
def get_range_bearing(self, angle_sigma_deg=1.):
r = self.get_range()
a = atan2(self.alt, self.x)
a += randn() * radians(angle_sigma_deg)
return r, a

@@ -17,10 +17,15 @@ # -*- coding: utf-8 -*-

#pylint: skip-file
""" This is very old code, and no longer runs due to reorgization of the
UKF code"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import scipy.linalg as linalg
import matplotlib.pyplot as plt
from GetRadar import GetRadar
from GetRadar import get_radar
from filterpy.kalman import UnscentedKalmanFilter as UKF

@@ -27,0 +32,0 @@ from filterpy.common import Q_discrete_white_noise

@@ -17,8 +17,8 @@ # -*- coding: utf-8 -*-

from __future__ import (absolute_import, division, print_function,
unicode_literals)
#pylint: disable=wildcard-import
__all__=["gh_filter"]
from __future__ import absolute_import
__all__ = ["gh_filter"]
from .gh_filter import *
# -*- coding: utf-8 -*-
# pylint: disable=C0103, R0913, R0902, C0326, R0903, W1401
# disable snake_case warning, too many arguments, too many attributes,
# one space before assignment, too few public methods, anomalous backslash
# in string
"""Copyright 2015 Roger R Labbe Jr.

@@ -23,2 +28,3 @@

from numpy import dot
from filterpy.common import pretty_str

@@ -87,10 +93,9 @@

self.residual : np.array
self.y : np.array
difference between the measurement and the prediction
"""
if order < 0 or order > 2:
raise ValueError('order must be between 0 and 2')
assert order >= 0
assert order <= 2
if np.isscalar(x0):

@@ -108,2 +113,3 @@ self.x = np.zeros(order+1)

self.k = k
self.y = 0.

@@ -119,4 +125,4 @@

g = self.g
self.residual = z - self.x[0]
self.x += dot(g, self.residual)
self.y = z - self.x[0]
self.x += dot(g, self.y)

@@ -132,5 +138,5 @@ elif self.order == 1:

self.residual = z - (x + dxdt)
self.x[0] = x + dxdt + g*self.residual
self.x[1] = dx + h*self.residual / self.dt
self.y = z - (x + dxdt)
self.x[0] = x + dxdt + g*self.y
self.x[1] = dx + h*self.y / self.dt

@@ -151,7 +157,7 @@ else: # order == 2

self.residual = z -(x + dxdt +0.5*ddx*T2)
self.y = z -(x + dxdt +0.5*ddx*T2)
self.x[0] = x + dxdt + 0.5*ddx*T2 + g*self.residual
self.x[1] = dx + ddx*self.dt + h*self.residual / self.dt
self.x[2] = ddx + 2*k*self.residual / (self.dt**2)
self.x[0] = x + dxdt + 0.5*ddx*T2 + g*self.y
self.x[1] = dx + ddx*self.dt + h*self.y / self.dt
self.x[2] = ddx + 2*k*self.y / (self.dt**2)

@@ -212,2 +218,3 @@

def __init__(self, x, dx, dt, g, h):

@@ -243,6 +250,2 @@ """ Creates a g-h filter.

assert np.isscalar(dt)
assert np.isscalar(g)
assert np.isscalar(h)
self.x = x

@@ -253,5 +256,9 @@ self.dx = dx

self.h = h
self.dx_prediction = self.dx
self.x_prediction = self.x
self.y = 0. # residual
def update (self, z, g=None, h=None):
def update(self, z, g=None, h=None):
"""performs the g-h filter predict and update step on the

@@ -306,5 +313,5 @@ measurement z. Modifies the member variables listed below,

# update step
self.residual = z - self.x_prediction
self.dx = self.dx_prediction + h * self.residual / self.dt
self.x = self.x_prediction + g * self.residual
self.y = z - self.x_prediction
self.dx = self.dx_prediction + h * self.y / self.dt
self.x = self.x_prediction + g * self.y

@@ -314,3 +321,3 @@ return (self.x, self.dx)

def batch_filter (self, data, save_predictions=False):
def batch_filter(self, data, save_predictions=False):
"""

@@ -356,4 +363,4 @@ Given a sequenced list of data, performs g-h filter

results = np.zeros((n+1, 2))
results[0,0] = x
results[0,1] = dx
results[0, 0] = x
results[0, 1] = dx

@@ -366,5 +373,5 @@ if save_predictions:

for i,z in enumerate(data):
for i, z in enumerate(data):
#prediction step
x_est = x + (dx*self.dt)
x_est = x + (dx * self.dt)

@@ -376,4 +383,4 @@ # update step

results[i+1,0] = x
results[i+1,1] = dx
results[i+1, 0] = x
results[i+1, 1] = dx
if save_predictions:

@@ -384,6 +391,6 @@ predictions[i] = x_est

return results, predictions
else:
return results
return results
def VRF_prediction(self):

@@ -440,2 +447,14 @@ """ Returns the Variance Reduction Factor of the prediction

def __repr__(self):
return '\n'.join([
'GHFilter object',
pretty_str('dt', self.dt),
pretty_str('g', self.g),
pretty_str('h', self.h),
pretty_str('x', self.x),
pretty_str('dx', self.dx),
pretty_str('x_prediction', self.x_prediction),
pretty_str('dx_prediction', self.dx_prediction),
pretty_str('y', self.y)
])

@@ -493,9 +512,9 @@ class GHKFilter(object):

assert np.isscalar(dt)
assert np.isscalar(g)
assert np.isscalar(h)
self.x = x
self.dx = dx
self.ddx = ddx
self.x_prediction = self.x
self.dx_prediction = self.dx
self.ddx_prediction = self.ddx
self.dt = dt

@@ -505,9 +524,10 @@ self.g = g

self.k = k
self.y = 0. # residual
def update (self, z, g=None, h=None, k=None):
def update(self, z, g=None, h=None, k=None):
"""performs the g-h filter predict and update step on the
measurement z.
On return, self.x, self.dx, self.residual, and self.x_prediction
On return, self.x, self.dx, self.y, and self.x_prediction
will have been updated with the results of the computation. For

@@ -547,11 +567,11 @@ convienence, self.x and self.dx are returned in a tuple.

self.ddx_prediction = self.ddx
self.dx_prediction = self.dx + self.ddx*dt
self.x_prediction = self.x + self.dx*dt + .5*self.ddx*(dt_sqr)
self.dx_prediction = self.dx + self.ddx*dt
self.x_prediction = self.x + self.dx*dt + .5*self.ddx*(dt_sqr)
# update step
self.residual = z - self.x_prediction
self.y = z - self.x_prediction
self.ddx = self.ddx_prediction + 2*k*self.residual / dt_sqr
self.dx = self.dx_prediction + h * self.residual / dt
self.x = self.x_prediction + g * self.residual
self.ddx = self.ddx_prediction + 2*k*self.y / dt_sqr
self.dx = self.dx_prediction + h * self.y / dt
self.x = self.x_prediction + g * self.y

@@ -561,3 +581,3 @@ return (self.x, self.dx)

def batch_filter (self, data, save_predictions=False):
def batch_filter(self, data, save_predictions=False):
"""

@@ -601,4 +621,4 @@ Performs g-h filter with a fixed g and h.

results = np.zeros((n+1, 2))
results[0,0] = x
results[0,1] = dx
results[0, 0] = x
results[0, 1] = dx

@@ -611,3 +631,3 @@ if save_predictions:

for i,z in enumerate(data):
for i, z in enumerate(data):
#prediction step

@@ -621,4 +641,4 @@ x_est = x + (dx*self.dt)

results[i+1,0] = x
results[i+1,1] = dx
results[i+1, 0] = x
results[i+1, 1] = dx
if save_predictions:

@@ -629,6 +649,6 @@ predictions[i] = x_est

return results, predictions
else:
return results
return results
def VRF_prediction(self):

@@ -719,2 +739,19 @@ """ Returns the Variance Reduction Factor for x of the prediction

def __repr__(self):
return '\n'.join([
'GHFilter object',
pretty_str('dt', self.dt),
pretty_str('g', self.g),
pretty_str('h', self.h),
pretty_str('k', self.k),
pretty_str('x', self.x),
pretty_str('dx', self.dx),
pretty_str('ddx', self.ddx),
pretty_str('x_prediction', self.x_prediction),
pretty_str('dx_prediction', self.dx_prediction),
pretty_str('ddx_prediction', self.dx_prediction),
pretty_str('y', self.y)
])
def optimal_noise_smoothing(g):

@@ -761,3 +798,3 @@ """ provides g,h,k parameters for optimal smoothing of noise for a given

return (g,h,k)
return (g, h, k)

@@ -806,3 +843,3 @@

h = 6 / den
return (g,h)
return (g, h)

@@ -883,4 +920,4 @@

"""
assert theta >= 0
assert theta <= 1
if theta < 0 or theta > 1:
raise ValueError('theta must be between 0 and 1')

@@ -893,3 +930,3 @@ if order == 2:

raise Exception('bad order specified: {}'.format(order))
raise ValueError('bad order specified: {}'.format(order))

@@ -946,3 +983,3 @@

return (g, 0.8 * (2. - g_sqr - 2*(1-g_sqr)**.5) / g_sqr)
else:
return (g, g_sqr / (2.-g))
return (g, g_sqr / (2.-g))

@@ -16,8 +16,10 @@ # -*- coding: utf-8 -*-

"""
#pylint: disable=wildcard-import
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__=["hinfinity_filter"]
__all__ = ["hinfinity_filter"]
from .hinfinity_filter import *
# -*- coding: utf-8 -*-
# pylint: disable=C0103, R0913, R0902, C0326, R0914
# disable snake_case warning, too many arguments, too many attributes,
# one space before assignment, too many local variables
"""Copyright 2015 Roger R Labbe Jr.

@@ -18,6 +22,7 @@

from __future__ import absolute_import, division
import warnings
import numpy as np
from numpy import dot, zeros, eye
import scipy.linalg as linalg
import warnings
from filterpy.common import pretty_str

@@ -27,7 +32,2 @@

"""
.. warning::
I do not believe this code is correct. DO NOT USE THIS.
In particular, note that predict does not update the covariance
matrix.
H-Infinity filter. You are responsible for setting the

@@ -37,3 +37,2 @@ various state variables to reasonable values; the defaults below will

Parameters

@@ -55,13 +54,14 @@ ----------

Number of control inputs for the Gu part of the prediction step.
gamma : float
.. warning::
I do not believe this code is correct. DO NOT USE THIS.
In particular, note that predict does not update the covariance
matrix.
"""
def __init__(self, dim_x, dim_z, dim_u, gamma):
warnings.warn("This code is likely incorrect. DO NOT USE.", DeprecationWarning)
warnings.warn("This code is likely incorrect. DO NOT USE.",
DeprecationWarning)
self.dim_x = dim_x

@@ -74,9 +74,10 @@ self.dim_z = dim_z

self.B = 0 # control transition matrix
self.F = 0 # state transition matrix
self.H = 0 # Measurement function
self.P = eye(dim_x) # Uncertainty covariance.
self.B = 0 # control transition matrix
self.F = eye(dim_x) # state transition matrix
self.H = zeros((dim_z, dim_x)) # Measurement function
self.P = eye(dim_x) # Uncertainty covariance.
self.Q = eye(dim_x)
self._V_inv = zeros((dim_z, dim_z)) # inverse measurement noise
self._V = zeros((dim_z, dim_z)) # measurement noise
self.W = zeros((dim_x, dim_x)) # process uncertainty

@@ -89,3 +90,3 @@

self.K = 0 # H-infinity gain
self.residual = zeros((dim_z, 1))
self.y = zeros((dim_z, 1))

@@ -98,8 +99,7 @@ # identity matrix. Do not alter this.

"""
Add a new measurement `z` to the H-Infinity filter. If `z` is None, nothing
is changed.
Add a new measurement `z` to the H-Infinity filter. If `z` is None,
nothing is changed.
Parameters
----------
z : ndarray

@@ -133,7 +133,7 @@ measurement for this update.

self.residual = z - dot(H, x)
self.y = z - dot(H, x)
# x = x + Ky
# predict new x with residual scaled by the H-Infinity gain
self.x = self.x + dot(K, self.residual)
self.x = self.x + dot(K, self.y)
self.P = dot(F, PL).dot(F.T) + W

@@ -146,7 +146,7 @@

def predict(self, u=0):
""" Predict next position.
"""
Predict next position.
Parameters
----------
u : ndarray

@@ -161,3 +161,3 @@ Optional control vector. If non-zero, it is multiplied by `B`

def batch_filter(self, Zs, Rs=None, update_first=False):
def batch_filter(self, Zs,update_first=False):
""" Batch processes a sequences of measurements.

@@ -167,3 +167,2 @@

----------
Zs : list-like

@@ -173,14 +172,8 @@ list of measurements at each time step `self.dt` Missing

Rs : list-like, optional
optional list of values to use for the measurement error
covariance; a value of None in any position will cause the filter
to use `self.R` for that time step.
update_first : bool, optional,
update_first : bool, default=False, optional,
controls whether the order of operations is update followed by
predict, or predict followed by update. Default is predict->update.
predict, or predict followed by update.
Returns
-------
means: ndarray ((n, dim_x, 1))

@@ -196,5 +189,4 @@ array of the state for each time step. Each entry is an np.array.

n = np.size(Zs, 0)
if Rs is None:
Rs = [None] * n
# mean estimates from H-Infinity Filter

@@ -207,4 +199,4 @@ means = zeros((n, self.dim_x, 1))

if update_first:
for i, (z, r) in enumerate(zip(Zs, Rs)):
self.update(z,r)
for i, z in enumerate(Zs):
self.update(z)
means[i, :] = self.x

@@ -214,5 +206,5 @@ covariances[i, :, :] = self.P

else:
for i, (z, r) in enumerate(zip(Zs, Rs)):
for i, z in enumerate(Zs):
self.predict()
self.update(z, r)
self.update(z)

@@ -231,3 +223,2 @@ means[i, :] = self.x

----------
u : ndarray

@@ -238,11 +229,8 @@ optional control input

-------
x : ndarray
State vector of the prediction.
"""
return dot(self.F, self.x) + dot(self.B, u)
x = dot(self.F, self.x) + dot(self.B, u)
return x
def residual_of(self, z):

@@ -260,3 +248,2 @@ """ returns the residual for the given measurement (z). Does not alter

----------
x : ndarray

@@ -267,3 +254,2 @@ H-Infinity state vector

-------
z : ndarray

@@ -277,2 +263,3 @@ measurement corresponding to the given state

def V(self):
""" measurement noise matrix"""
return self._V

@@ -283,2 +270,4 @@

def V(self, value):
""" measurement noise matrix"""
if np.isscalar(value):

@@ -289,1 +278,19 @@ self._V = np.array([[value]], dtype=float)

self._V_inv = linalg.inv(self._V)
def __repr__(self):
return '\n'.join([
'HInfinityFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('gamma', self.dim_u),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('V', self.V),
pretty_str('W', self.W),
pretty_str('K', self.K),
pretty_str('y', self.y),
])
# -*- coding: utf-8 -*-
#pylint: disable=wildcard-import
"""Copyright 2015 Roger R Labbe Jr.

@@ -3,0 +5,0 @@

# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments
"""Copyright 2016 Roger R Labbe Jr.

@@ -21,7 +24,8 @@

from math import sqrt
import sys
import numpy as np
from numpy import eye, zeros, dot, isscalar, outer
from scipy.linalg import inv, cholesky
import sys
from filterpy.stats import logpdf
from filterpy.common import pretty_str

@@ -55,3 +59,3 @@

for k in range(n):
sigmas[k] = x + U[k]
sigmas[k] = x + U[k]
sigmas[n+k] = x - U[k]

@@ -63,3 +67,24 @@

def ckf_transform(Xs, Q):
"""
Compute mean and covariance of array of cubature points.
Parameters
----------
Xs : ndarray
Cubature points
Q : ndarray
Noise covariance
Returns
-------
mean : ndarray
mean of the cubature points
variance: ndarray
covariance matrix of the cubature points
"""
m, n = Xs.shape

@@ -210,2 +235,3 @@

self.P = eye(dim_x)
self.K = 0
self._dim_x = dim_x

@@ -219,2 +245,3 @@ self._dim_z = dim_z

self.z_mean = z_mean_fn
self.y = 0

@@ -241,3 +268,3 @@

def predict(self, dt=None, fx_args=()):
def predict(self, dt=None, fx_args=()):
r""" Performs the predict step of the CKF. On return, self.x and

@@ -321,3 +348,3 @@ self.P contain the predicted state (x) and covariance (P).

dx = self.sigmas_f[k] - xf
dz = self.sigmas_h[k] - zpf
dz = self.sigmas_h[k] - zpf
Pxz += outer(dx, dz)

@@ -337,2 +364,17 @@

def __repr__(self):
return '\n'.join([
'CubatureKalmanFilter object',
pretty_str('dim_x', self._dim_x),
pretty_str('dim_z', self._dim_z),
pretty_str('dt', self._dt),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('log-likelihood', self.log_likelihood)])
@property

@@ -354,3 +396,3 @@ def likelihood(self):

if lh == 0:
lh = sys.float_info.min
return lh
lh = sys.float_info.min
return lh
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,too-many-instance-attributes, too-many-arguments
"""Copyright 2015 Roger R Labbe Jr.

@@ -19,2 +21,4 @@

from __future__ import (absolute_import, division, unicode_literals)
import sys
import math

@@ -24,4 +28,4 @@ import numpy as np

import scipy.linalg as linalg
import sys
from filterpy.stats import logpdf
from filterpy.common import pretty_str

@@ -118,9 +122,9 @@

self.x = zeros((dim_x,1)) # state
self.P = eye(dim_x) # uncertainty covariance
self.B = 0 # control transition matrix
self.F = 0 # state transition matrix
self.R = eye(dim_z) # state uncertainty
self.Q = eye(dim_x) # process uncertainty
self.y = zeros((dim_z, 1))
self.x = zeros((dim_x, 1)) # state
self.P = eye(dim_x) # uncertainty covariance
self.B = 0 # control transition matrix
self.F = np.eye(dim_x) # state transition matrix
self.R = eye(dim_z) # state uncertainty
self.Q = eye(dim_x) # process uncertainty
self.y = zeros((dim_z, 1)) # residual

@@ -174,2 +178,3 @@ # gain and residual are computed during the innovation step. We

"""
#pylint: disable=too-many-locals

@@ -201,3 +206,3 @@ if not isinstance(args, tuple):

self.S = dot(H, PHT) + R
self.K = PHT.dot(linalg.inv (self.S))
self.K = dot(PHT, linalg.inv(self.S))

@@ -277,3 +282,3 @@ self.y = z - Hx(x, *hx_args)

hx = Hx(self.x, *hx_args)
hx = Hx(self.x, *hx_args)
self.y = residual(z, hx)

@@ -329,3 +334,17 @@ self.x = self.x + dot(self.K, self.y)

if lh == 0:
lh = sys.float_info.min
lh = sys.float_info.min
return lh
def __repr__(self):
return '\n'.join([
'KalmanFilter object',
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('log-likelihood', self.log_likelihood)])
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, too-many-instance-attributes

@@ -25,6 +26,8 @@ """Copyright 2015 Roger R Labbe Jr.

from scipy.linalg import inv
from filterpy.common import pretty_str
class EnsembleKalmanFilter(object):
""" This implements the ensemble Kalman filter (EnKF). The EnKF uses
"""
This implements the ensemble Kalman filter (EnKF). The EnKF uses
an ensemble of hundreds to thousands of state vectors that are randomly

@@ -54,3 +57,4 @@ sampled around the estimate, and adds perturbations at each update and

def __init__(self, x, P, dim_z, dt, N, hx, fx):
""" Create a Kalman filter. You are responsible for setting the
"""
Create an ensemble Kalman filter. You are responsible for setting the
various state variables to reasonable values; the defaults below will

@@ -62,3 +66,3 @@ not give you a functional filter.

x : np.array(dim_z)
x : np.array(dim_x)
state mean

@@ -79,2 +83,5 @@

K : np.array
Kalman gain
hx : function hx(x)

@@ -116,7 +123,10 @@ Measurement function. May be linear or nonlinear - converts state

f.update(np.asarray([z]))
"""
assert dim_z > 0
if dim_z <= 0:
raise ValueError('dim_z must be greater than zero')
if N <= 0:
raise ValueError('N must be greater than zero')
self.dim_x = len(x)

@@ -128,2 +138,5 @@ self.dim_z = dim_z

self.fx = fx
self.K = 0
self.x = x
self.P = P

@@ -137,3 +150,4 @@ self.Q = eye(self.dim_x) # process uncertainty

def initialize(self, x, P):
""" Initializes the filter with the specified mean and
"""
Initializes the filter with the specified mean and
covariance. Only need to call this if you are using the filter

@@ -151,5 +165,7 @@ to filter more than one set of data; this is called by __init__

"""
assert x.ndim == 1
if x.ndim != 1:
raise ValueError('x must be a 1D array')
self.sigmas = multivariate_normal(mean=x, cov=P, size=self.N)
self.x = x

@@ -204,10 +220,10 @@ self.P = P

K = dot(P_xz, inv(P_zz))
self.K = dot(P_xz, inv(P_zz))
e_r = multivariate_normal([0]*dim_z, R, N)
for i in range(N):
self.sigmas[i] += dot(K, z + e_r[i] - sigmas_h[i])
self.sigmas[i] += dot(self.K, z + e_r[i] - sigmas_h[i])
self.x = np.mean(self.sigmas, axis=0)
self.P = self.P - dot(K, P_zz).dot(K.T)
self.P = self.P - dot(dot(self.K, P_zz), self.K.T)

@@ -220,3 +236,3 @@

for i, s in enumerate(self.sigmas):
self.sigmas[i] = self.fx(s, self.dt)
self.sigmas[i] = self.fx(s, self.dt)

@@ -233,1 +249,19 @@ e = multivariate_normal(self.mean, self.Q, N)

self.P = P / (N-1)
def __repr__(self):
return '\n'.join([
'EnsembleKalmanFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dt', self.dt),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('K', self.K),
pretty_str('mean', self.mean),
pretty_str('sigmas', self.sigmas),
pretty_str('hx', self.hx),
pretty_str('fx', self.fx)
])
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, too-many-instance-attributes
"""Copyright 2015 Roger R Labbe Jr.

@@ -20,2 +22,4 @@

from __future__ import (absolute_import, division, unicode_literals)
import sys
import warnings
import math

@@ -25,4 +29,4 @@ import numpy as np

import scipy.linalg as linalg
import sys
from filterpy.stats import logpdf
from filterpy.common import pretty_str

@@ -32,84 +36,96 @@

def __init__(self, alpha, dim_x, dim_z, dim_u=0,
compute_log_likelihood=True):
""" Create a Kalman filter. You are responsible for setting the
various state variables to reasonable values; the defaults below
will not give you a functional filter.
Parameters
----------
"""
Fading memory Kalman filter. This implements a linear Kalman filter with
a fading memory effect controlled by `alpha`. This is obsolete. The
class KalmanFilter now incorporates the `alpha` attribute, and should
be used instead.
alpha : float, >= 1
alpha controls how much you want the filter to forget past
measurements. alpha==1 yields identical performance to the
Kalman filter. A typical application might use 1.01
You are responsible for setting the
various state variables to reasonable values; the defaults below
will not give you a functional filter.
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
Parameters
----------
This is used to set the default size of P, Q, and u
alpha : float, >= 1
alpha controls how much you want the filter to forget past
measurements. alpha==1 yields identical performance to the
Kalman filter. A typical application might use 1.01
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
This is used to set the default size of P, Q, and u
compute_log_likelihood : bool (default = True)
Computes log likelihood by default, but this can be a slow
computation, so if you never use it you can turn this computation
off.
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
Attributes
----------
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
You will have to assign reasonable values to all of these before
running the filter. All must have dtype of float
compute_log_likelihood : bool (default = True)
Computes log likelihood by default, but this can be a slow
computation, so if you never use it you can turn this computation
off.
x : ndarray (dim_x, 1), default = [0,0,0...0]
state of the filter
Attributes
----------
P : ndarray (dim_x, dim_x), default identity matrix
covariance matrix
You will have to assign reasonable values to all of these before
running the filter. All must have dtype of float
Q : ndarray (dim_x, dim_x), default identity matrix
Process uncertainty matrix
x : ndarray (dim_x, 1), default = [0,0,0...0]
state of the filter
R : ndarray (dim_z, dim_z), default identity matrix
measurement uncertainty
P : ndarray (dim_x, dim_x), default identity matrix
covariance matrix
H : ndarray (dim_z, dim_x)
measurement function
Q : ndarray (dim_x, dim_x), default identity matrix
Process uncertainty matrix
F : ndarray (dim_x, dim_x)
state transistion matrix
R : ndarray (dim_z, dim_z), default identity matrix
measurement uncertainty
B : ndarray (dim_x, dim_u), default 0
control transition matrix
H : ndarray (dim_z, dim_x)
measurement function
y : numpy.array
Residual of the update step. Read only.
F : ndarray (dim_x, dim_x)
state transistion matrix
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
B : ndarray (dim_x, dim_u), default 0
control transition matrix
S : numpy.array
Systen uncertaintly projected to measurement space. Read only.
y : numpy.array
Residual of the update step. Read only.
log_likelihood : float
log-likelihood of the last measurement. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
S : numpy.array
Systen uncertaintly projected to measurement space. Read only.
log_likelihood : float
log-likelihood of the last measurement. Read only.
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
"""
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
"""
def __init__(self, alpha, dim_x, dim_z, dim_u=0,
compute_log_likelihood=True):
warnings.warn(
"Use KalmanFilter class instead; it also provides the alpha attribute",
DeprecationWarning)
assert alpha >= 1

@@ -120,3 +136,2 @@ assert dim_x > 0

self.alpha_sq = alpha**2

@@ -127,9 +142,9 @@ self.dim_x = dim_x

self.x = zeros((dim_x,1)) # state
self.P = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.B = 0 # control transition matrix
self.F = 0 # state transition matrix
self.H = 0 # Measurement function
self.R = eye(dim_z) # state uncertainty
self.x = zeros((dim_x, 1)) # state
self.P = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.B = 0. # control transition matrix
self.F = np.eye(dim_x) # state transition matrix
self.H = zeros((dim_z, dim_x)) # Measurement function
self.R = eye(dim_z) # state uncertainty

@@ -260,32 +275,34 @@ # gain and residual are computed during the innovation step. We

n = np.size(zs,0)
n = np.size(zs, 0)
if Rs is None:
Rs = [None]*n
Rs = [None] * n
#pylint: disable=bad-whitespace
# mean estimates from Kalman Filter
means = zeros((n,self.dim_x,1))
means_p = zeros((n,self.dim_x,1))
means = zeros((n, self.dim_x, 1))
means_p = zeros((n, self.dim_x, 1))
# state covariances from Kalman Filter
covariances = zeros((n,self.dim_x,self.dim_x))
covariances_p = zeros((n,self.dim_x,self.dim_x))
covariances = zeros((n, self.dim_x, self.dim_x))
covariances_p = zeros((n, self.dim_x, self.dim_x))
if update_first:
for i,(z,r) in enumerate(zip(zs,Rs)):
self.update(z,r)
means[i,:] = self.x
covariances[i,:,:] = self.P
for i, (z, r) in enumerate(zip(zs, Rs)):
self.update(z, r)
means[i, :] = self.x
covariances[i, :, :] = self.P
self.predict()
means_p[i,:] = self.x
covariances_p[i,:,:] = self.P
means_p[i, :] = self.x
covariances_p[i, :, :] = self.P
else:
for i,(z,r) in enumerate(zip(zs,Rs)):
for i, (z, r) in enumerate(zip(zs, Rs)):
self.predict()
means_p[i,:] = self.x
covariances_p[i,:,:] = self.P
means_p[i, :] = self.x
covariances_p[i, :, :] = self.P
self.update(z,r)
means[i,:] = self.x
covariances[i,:,:] = self.P
self.update(z, r)
means[i, :] = self.x
covariances[i, :, :] = self.P

@@ -343,2 +360,8 @@ return (means, covariances, means_p, covariances_p)

@property
def alpha(self):
""" scaling factor for fading memory"""
return math.sqrt(self.alpha_sq)
@property
def likelihood(self):

@@ -359,3 +382,23 @@ """

if lh == 0:
lh = sys.float_info.min
lh = sys.float_info.min
return lh
def __repr__(self):
return '\n'.join([
'FadingKalmanFilter object',
pretty_str('dim_x', self.x),
pretty_str('dim_z', self.x),
pretty_str('dim_u', self.dim_u),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('B', self.B),
pretty_str('log-likelihood', self.log_likelihood),
pretty_str('alpha', self.alpha),
])
# -*- coding: utf-8 -*-
#pylint: disable=too-many-instance-attributes, too-many-locals, invalid-name
"""Copyright 2015 Roger R Labbe Jr.

@@ -24,4 +25,4 @@

from scipy.linalg import inv
from filterpy.common import pretty_str
class FixedLagSmoother(object):

@@ -112,16 +113,16 @@ """ Fixed Lag Kalman smoother.

self.dim_z = dim_z
self.N = N
self.N = N
self.x = zeros((dim_x,1)) # state
self.x_s = zeros((dim_x,1)) # smoothed state
self.P = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.F = 0 # state transition matrix
self.H = 0 # Measurement function
self.R = eye(dim_z) # state uncertainty
self.K = 0 # kalman gain
self.residual = zeros((dim_z, 1))
self.x = zeros((dim_x, 1)) # state
self.x_s = zeros((dim_x, 1)) # smoothed state
self.P = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.F = eye(dim_x) # state transition matrix
self.H = eye(dim_z, dim_x) # Measurement function
self.R = eye(dim_z) # state uncertainty
self.K = zeros((dim_x, 1)) # kalman gain
self.y = zeros((dim_z, 1))
self.B = 0.
self.S = zeros((dim_z, dim_z))
self.B = 0
# identity matrix. Do not alter this.

@@ -180,3 +181,3 @@ self._I = np.eye(dim_x)

if u is not None:
x_pre += dot(B,u)
x_pre += dot(B, u)

@@ -186,10 +187,10 @@ P = dot(F, P).dot(F.T) + Q

# update step of normal Kalman filter
y = z - dot(H, x_pre)
self.y = z - dot(H, x_pre)
S = dot(H, P).dot(H.T) + R
SI = inv(S)
self.S = dot(H, P).dot(H.T) + R
SI = inv(self.S)
K = dot(P, H.T).dot(SI)
x = x_pre + dot(K, y)
x = x_pre + dot(K, self.y)

@@ -203,7 +204,7 @@ I_KH = self._I - dot(K, H)

HTSI = dot(H.T, SI)
F_LH = (F - dot(K,H)).T
F_LH = (F - dot(K, H)).T
if k >= N:
PS = P.copy() # smoothed P for step i
for i in range (N):
for i in range(N):
K = dot(PS, HTSI) # smoothed gain

@@ -213,3 +214,3 @@ PS = dot(PS, F_LH) # smoothed covariance

si = k-i
self.xSmooth[si] = self.xSmooth[si] + dot(K, y)
self.xSmooth[si] = self.xSmooth[si] + dot(K, self.y)
else:

@@ -219,3 +220,3 @@ # Some sources specify starting the fix lag smoother only

# better results by starting only at step N.
self.xSmooth[k] = x.copy()
self.xSmooth[k] = x.copy()

@@ -276,6 +277,6 @@ self.count += 1

xSmooth = zeros((len(zs), self.dim_x))
xhat = zeros((len(zs), self.dim_x))
xhat = zeros((len(zs), self.dim_x))
else:
xSmooth = zeros((len(zs), self.dim_x, 1))
xhat = zeros((len(zs), self.dim_x, 1))
xhat = zeros((len(zs), self.dim_x, 1))
for k, z in enumerate(zs):

@@ -286,3 +287,3 @@

if us is not None:
x_pre += dot(B,us[k])
x_pre += dot(B, us[k])

@@ -304,3 +305,3 @@ P = dot(F, P).dot(F.T) + Q

xhat[k] = x.copy()
xhat[k] = x.copy()
xSmooth[k] = x_pre.copy()

@@ -310,7 +311,7 @@

HTSI = dot(H.T, SI)
F_LH = (F - dot(K,H)).T
F_LH = (F - dot(K, H)).T
if k >= N:
PS = P.copy() # smoothed P for step i
for i in range (N):
for i in range(N):
K = dot(PS, HTSI) # smoothed gain

@@ -328,1 +329,21 @@ PS = dot(PS, F_LH) # smoothed covariance

return xSmooth, xhat
def __repr__(self):
return '\n'.join([
'FixedLagSmoother object',
pretty_str('dim_x', self.x),
pretty_str('dim_z', self.x),
pretty_str('N', self.N),
pretty_str('x', self.x),
pretty_str('x_s', self.x_s),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('B', self.B),
])
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-instance-attributes, too-few-public-methods
# disable snake_case warning, too many arguments, too many attributes,
# one space before assignment, too few public methods
"""Copyright 2015 Roger R Labbe Jr.

@@ -21,2 +26,3 @@

from numpy import dot, zeros
from filterpy.common import pretty_str

@@ -68,3 +74,4 @@

assert len(filters) > 1
if len(filters) < 1:
raise ValueError('filters must contain at least one filter')

@@ -79,3 +86,3 @@ self.filters = filters

n_states = x_shape[0]
except:
except AttributeError:
n_states = x_shape

@@ -85,4 +92,5 @@

self.P = np.zeros((n_states, n_states))
self.N = len(filters) # number of filters
self.cbar = 0.
self.likelihood = 0

@@ -104,2 +112,3 @@

"""
#pylint: disable=too-many-locals

@@ -160,3 +169,17 @@ # run update on each filter, and save the likelihood in L

# update mode probabilities from total probability * likelihood
self.mu = self.cbar * L
self.mu = self.cbar * L
self.mu /= sum(self.mu) # normalize
self.likelihood = L
def __repr__(self):
return '\n'.join([
'IMMEstimator object',
pretty_str('N', self.N),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('mu', self.mu),
pretty_str('M', self.M),
pretty_str('cbar', self.cbar),
pretty_str('likelihood', self.likelihood),
])
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-instance-attributes

@@ -21,48 +22,91 @@ """Copyright 2015 Roger R Labbe Jr.

import math
import sys
import numpy as np
from numpy import dot, zeros, eye
from scipy.linalg import inv
import sys
from filterpy.stats import logpdf
from filterpy.common import pretty_str
class InformationFilter(object):
"""
Create a linear Information filter. Information filters compute the
inverse of the Kalman filter, allowing you to easily denote having
no information at initialization.
def __init__(self, dim_x, dim_z, dim_u=0, compute_log_likelihood=True):
""" Create a Information filter. You are responsible for setting the
various state variables to reasonable values; the defaults below will
not give you a functional filter.
You are responsible for setting the various state variables to reasonable
values; the defaults below will not give you a functional filter.
Parameters
----------
Parameters
----------
dim_x : int
Number of state variables for the filter. For example, if you
are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
dim_x : int
Number of state variables for the filter. For example, if you
are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
self.compute_log_likelihood = compute_log_likelihood
self.log_likelihood = math.log(sys.float_info.min)
self.compute_log_likelihood = compute_log_likelihood
self.log_likelihood = math.log(sys.float_info.min)
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
"""
Attributes
----------
x : numpy.array(dim_x, 1)
State estimate vector
assert dim_x > 0
assert dim_z > 0
assert dim_u >= 0
P_inv : numpy.array(dim_x, dim_x)
inverse state covariance matrix
R_inv : numpy.array(dim_z, dim_z)
inverse of measurement noise matrix
Q : numpy.array(dim_x, dim_x)
Process noise matrix
H : numpy.array(dim_z, dim_x)
Measurement function
y : numpy.array
Residual of the update step. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
S : numpy.array
Systen uncertaintly projected to measurement space. Read only.
log_likelihood : float
log-likelihood of the last measurement. Read only.
inv : function, default numpy.linalg.inv
If you prefer another inverse function, such as the Moore-Penrose
pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
"""
def __init__(self, dim_x, dim_z, dim_u=0, compute_log_likelihood=True):
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_x must be 0 or greater')
self.dim_x = dim_x

@@ -75,6 +119,6 @@ self.dim_z = dim_z

self.Q = eye(dim_x) # process uncertainty
self.B = 0 # control transition matrix
self._F = 0 # state transition matrix
self._F_inv = 0 # state transition matrix
self.H = 0 # Measurement function
self.B = 0. # control transition matrix
self._F = 0. # state transition matrix
self._F_inv = 0. # state transition matrix
self.H = np.zeros((dim_z, dim_x)) # Measurement function
self.R_inv = eye(dim_z) # state uncertainty

@@ -85,5 +129,5 @@

# purposes
self.K = 0 # kalman gain
self.K = 0. # kalman gain
self.y = zeros((dim_z, 1))
self.S = 0 # system uncertainty in measurement space
self.S = 0. # system uncertainty in measurement space

@@ -97,3 +141,5 @@ # identity matrix. Do not alter this.

self.inv = np.linalg.inv
def update(self, z, R_inv=None):

@@ -142,3 +188,3 @@ """

self.S = P_inv + dot(H_T, R_inv).dot(H)
self.K = dot(inv(self.S), H_T).dot(R_inv)
self.K = dot(self.inv(self.S), H_T).dot(R_inv)

@@ -168,8 +214,9 @@ # x = x + Ky

A = dot(self._F_inv.T, self.P_inv).dot(self._F_inv)
#pylint: disable=bare-except
try:
AI = inv(A)
AI = self.inv(A)
invertable = True
if self._no_information:
try:
self.x = dot(inv(self.P_inv), self.x)
self.x = dot(self.inv(self.P_inv), self.x)
except:

@@ -180,12 +227,12 @@ self.x = dot(0, self.x)

invertable = False
self._no_information = True
self._no_information = True
if invertable:
self.x = dot(self._F, self.x) + dot(self.B, u)
self.P_inv = inv(AI + self.Q)
self.P_inv = self.inv(AI + self.Q)
else:
I_PF = self._I - dot(self.P_inv,self._F_inv)
FTI = inv(self._F.T)
I_PF = self._I - dot(self.P_inv, self._F_inv)
FTI = self.inv(self._F.T)
FTIX = dot(FTI, self.x)
AQI = inv(A + self.Q)
AQI = self.inv(A + self.Q)
self.x = dot(FTI, dot(I_PF, AQI).dot(FTIX))

@@ -225,7 +272,9 @@

raise "this is not implemented yet"
raise NotImplementedError("this is not implemented yet")
''' this is a copy of the code from kalman_filter, it has not been
turned into the information filter yet. DO NOT USE.'''
#pylint: disable=unreachable, no-member
# this is a copy of the code from kalman_filter, it has not been
# turned into the information filter yet. DO NOT USE.
n = np.size(zs, 0)

@@ -236,20 +285,20 @@ if Rs is None:

# mean estimates from Kalman Filter
means = zeros((n, self.dim_x,1))
means = zeros((n, self.dim_x, 1))
# state covariances from Kalman Filter
covariances = zeros((n,self.dim_x, self.dim_x))
covariances = zeros((n, self.dim_x, self.dim_x))
if update_first:
for i,(z,r) in enumerate(zip(zs, Rs)):
for i, (z, r) in enumerate(zip(zs, Rs)):
self.update(z, r)
means[i,:] = self.x
covariances[i,:,:] = self._P
means[i, :] = self.x
covariances[i, :, :] = self._P
self.predict()
else:
for i,(z,r) in enumerate(zip(zs, Rs)):
for i, (z, r) in enumerate(zip(zs, Rs)):
self.predict()
self.update(z, r)
means[i,:] = self.x
covariances[i,:,:] = self._P
means[i, :] = self.x
covariances[i, :, :] = self._P

@@ -275,3 +324,3 @@ return (means, covariances)

if lh == 0:
lh = sys.float_info.min
lh = sys.float_info.min

@@ -288,2 +337,24 @@

self._F = value
self._F_inv = inv(self._F)
self._F_inv = self.inv(self._F)
def __repr__(self):
return '\n'.join([
'InformationFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('x', self.x),
pretty_str('P_inv', self.P_inv),
pretty_str('F', self.F),
pretty_str('_F_inv', self._F_inv),
pretty_str('Q', self.Q),
pretty_str('R_inv', self.R_inv),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('B', self.B),
pretty_str('log-likelihood', self.log_likelihood),
pretty_str('inv', self.inv),
])
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, too-many-branches,
# pylint: disable=too-many-locals, too-many-instance-attributes, too-many-lines

@@ -113,10 +115,12 @@ """

from __future__ import absolute_import, division
import sys
import math
import numpy as np
from numpy import dot, zeros, eye, isscalar, shape
import scipy.linalg as linalg
import sys
import numpy.linalg as linalg
from filterpy.stats import logpdf
from filterpy.common import pretty_str
class KalmanFilter(object):

@@ -139,6 +143,6 @@ r""" Implements a Kalman filter. You are responsible for setting the

x : numpy.array(dim_x, 1)
State estimate vector
State estimate
P : numpy.array(dim_x, dim_x)
Covariance matrix
State covariance matrix

@@ -157,3 +161,2 @@ R : numpy.array(dim_z, dim_z)

y : numpy.array

@@ -171,2 +174,6 @@ Residual of the update step. Read only.

inv : function, default numpy.linalg.inv
If you prefer another inverse function, such as the Moore-Penrose
pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv
Examples

@@ -180,3 +187,2 @@ --------

def __init__(self, dim_x, dim_z, dim_u=0, compute_log_likelihood=True):

@@ -209,5 +215,8 @@ """ Create a Kalman filter. You are responsible for setting the

assert dim_x > 0
assert dim_z > 0
assert dim_u >= 0
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_x must be 0 or greater')

@@ -218,11 +227,11 @@ self.dim_x = dim_x

self.x = zeros((dim_x, 1)) # state
self.P = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.B = None # control transition matrix
self.F = eye(dim_x) # state transition matrix
self.H = zeros((dim_z, dim_x)) # Measurement function
self.R = eye(dim_z) # state uncertainty
self._alpha_sq = 1. # fading memory control
self.M = 0. # process-measurement cross correlation
self.x = zeros((dim_x, 1)) # state
self.P = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.B = None # control transition matrix
self.F = eye(dim_x) # state transition matrix
self.H = zeros((dim_z, dim_x)) # Measurement function
self.R = eye(dim_z) # state uncertainty
self._alpha_sq = 1. # fading memory control
self.M = np.zeros((dim_z, dim_z)) # process-measurement cross correlation

@@ -246,3 +255,5 @@ # gain and residual are computed during the innovation step. We

self.inv = np.linalg.inv
def update(self, z, R=None, H=None):

@@ -294,3 +305,3 @@ """

# map system uncertainty into kalman gain
self.K = dot(PHT, linalg.inv(self.S))
self.K = dot(PHT, self.inv(self.S))

@@ -301,5 +312,2 @@ # x = x + Ky

if self.x.ndim == 2:
assert self.x.shape[0] == self.dim_x and self.x.shape[1] == 1
# P = (I-KH)P(I-KH)' + KRK'

@@ -402,3 +410,3 @@ I_KH = self._I - dot(self.K, H)

z = _reshape_z(z, self.dim_z)
z = _reshape_z(z, self.dim_z, self.x.ndim)

@@ -434,3 +442,3 @@ if R is None:

# map system uncertainty into kalman gain
self.K = dot(PHT + self.M, linalg.inv(self.S))
self.K = dot(PHT + self.M, self.inv(self.S))

@@ -462,6 +470,10 @@ # x = x + Ky

if H is None: H = self.H
if R is None: R = self.R
if F is None: F = self.F
if Q is None: Q = self.Q
if H is None:
H = self.H
if R is None:
R = self.R
if F is None:
F = self.F
if Q is None:
Q = self.Q
x = self.x

@@ -471,4 +483,3 @@ P = self.P

assert x.ndim == 1 or x.ndim == 2, \
"x must have one or two dimensions, but has {}".format(
x.ndim)
"x must have one or two dimensions, but has {}".format(x.ndim)

@@ -478,28 +489,27 @@ if x.ndim == 1:

"Shape of x must be ({},{}), but is {}".format(
self.dim_x, 1, x.shape)
self.dim_x, 1, x.shape)
else:
assert x.shape == (self.dim_x, 1), \
"Shape of x must be ({},{}), but is {}".format(
self.dim_x, 1, x.shape)
self.dim_x, 1, x.shape)
assert P.shape == (self.dim_x, self.dim_x), \
"Shape of P must be ({},{}), but is {}".format(
self.dim_x, self.dim_x, P.shape)
self.dim_x, self.dim_x, P.shape)
assert Q.shape == (self.dim_x, self.dim_x), \
"Shape of P must be ({},{}), but is {}".format(
self.dim_x, self.dim_x, P.shape)
self.dim_x, self.dim_x, P.shape)
assert F.shape == (self.dim_x, self.dim_x), \
"Shape of F must be ({},{}), but is {}".format(
self.dim_x, self.dim_x, F.shape)
self.dim_x, self.dim_x, F.shape)
assert np.ndim(H) == 2, \
"Shape of H must be (dim_z, {}), but is {}".format(
P.shape[0], shape(H))
P.shape[0], shape(H))
assert H.shape[1] == P.shape[0], \
"Shape of H must be (dim_z, {}), but is {}".format(
P.shape[0], H.shape)
P.shape[0], H.shape)

@@ -512,5 +522,5 @@ # shape of R must be the same as HPH'

# r can be scalar, 1D, or 2D in this case
assert r_shape == () or r_shape == (1,) or r_shape == (1,1), \
assert r_shape == () or r_shape == (1,) or r_shape == (1, 1), \
"R must be scalar or one element array, but is shaped {}".format(
r_shape)
r_shape)
else:

@@ -530,5 +540,5 @@ assert r_shape == hph_shape, \

if z_shape == (): # scalar or np.array(scalar)
assert Hx.ndim == 1 or shape(Hx) == (1,1), \
assert Hx.ndim == 1 or shape(Hx) == (1, 1), \
"shape of z should be {}, not {} for the given H".format(
shape(Hx), z_shape)
shape(Hx), z_shape)

@@ -542,8 +552,8 @@ elif shape(Hx) == (1,):

"shape of z should be {}, not {} for the given H".format(
shape(Hx), z_shape)
shape(Hx), z_shape)
if np.ndim(Hx) > 1 and shape(Hx) != (1,1):
if np.ndim(Hx) > 1 and shape(Hx) != (1, 1):
assert shape(Hx) == z_shape, \
'shape of z should be {} for the given H, but it is {}'.format(
shape(Hx), z_shape)
shape(Hx), z_shape)

@@ -717,3 +727,3 @@

n = np.size(zs,0)
n = np.size(zs, 0)
if Fs is None:

@@ -732,8 +742,9 @@ Fs = [self.F] * n

if len(Fs) < n: Fs = [Fs]*n
if len(Qs) < n: Qs = [Qs]*n
if len(Hs) < n: Hs = [Hs]*n
if len(Rs) < n: Rs = [Rs]*n
if len(Bs) < n: Bs = [Bs]*n
if len(us) < n: us = [us]*n
#pylint: disable=multiple-statements
if len(Fs) < n: Fs = [Fs] * n
if len(Qs) < n: Qs = [Qs] * n
if len(Hs) < n: Hs = [Hs] * n
if len(Rs) < n: Rs = [Rs] * n
if len(Bs) < n: Bs = [Bs] * n
if len(us) < n: us = [us] * n

@@ -743,10 +754,10 @@

if self.x.ndim == 1:
means = zeros((n, self.dim_x))
means = zeros((n, self.dim_x))
means_p = zeros((n, self.dim_x))
else:
means = zeros((n, self.dim_x, 1))
means = zeros((n, self.dim_x, 1))
means_p = zeros((n, self.dim_x, 1))
# state covariances from Kalman Filter
covariances = zeros((n, self.dim_x, self.dim_x))
covariances = zeros((n, self.dim_x, self.dim_x))
covariances_p = zeros((n, self.dim_x, self.dim_x))

@@ -758,7 +769,7 @@

self.update(z, R=R, H=H)
means[i, :] = self.x
means[i, :] = self.x
covariances[i, :, :] = self.P
self.predict(u=u, B=B, F=F, Q=Q)
means_p[i, :] = self.x
means_p[i, :] = self.x
covariances_p[i, :, :] = self.P

@@ -769,7 +780,7 @@ else:

self.predict(u=u, B=B, F=F, Q=Q)
means_p[i, :] = self.x
means_p[i, :] = self.x
covariances_p[i, :, :] = self.P
self.update(z, R=R, H=H)
means[i, :] = self.x
means[i, :] = self.x
covariances[i, :, :] = self.P

@@ -830,7 +841,8 @@

assert len(Xs) == len(Ps)
shape = Xs.shape
n = shape[0]
dim_x = shape[1]
if len(Xs) != len(Ps):
raise ValueError('length of Xs and Ps must be the same')
n = Xs.shape[0]
dim_x = Xs.shape[1]
if Fs is None:

@@ -842,10 +854,10 @@ Fs = [self.F] * n

# smoother gain
K = zeros((n,dim_x, dim_x))
K = zeros((n, dim_x, dim_x))
x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy()
for k in range(n-2,-1,-1):
for k in range(n-2, -1, -1):
Pp[k] = dot(dot(Fs[k+1], P[k]), Fs[k+1].T) + Qs[k+1]
K[k] = dot(dot(P[k], Fs[k+1].T), linalg.inv(Pp[k]))
#pylint: disable=bad-whitespace
K[k] = dot(dot(P[k], Fs[k+1].T), self.inv(Pp[k]))
x[k] += dot(K[k], x[k+1] - dot(Fs[k+1], x[k]))

@@ -900,3 +912,3 @@ P[k] += dot(dot(K[k], P[k+1] - Pp[k]), K[k].T)

return self.x, self.P
z = _reshape_z(z, self.dim_z)
z = _reshape_z(z, self.dim_z, self.x.ndim)

@@ -918,3 +930,3 @@ R = self.R

# map system uncertainty into kalman gain
K = dot(PHT, linalg.inv(S))
K = dot(PHT, self.inv(S))

@@ -925,3 +937,3 @@ # predict new x with residual scaled by the kalman gain

# P = (I-KH)P(I-KH)' + KRK'
I_KH = self.I - dot(K, H)
I_KH = self._I - dot(K, H)
P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)

@@ -995,3 +1007,3 @@

if lh == 0:
lh = sys.float_info.min
lh = sys.float_info.min
return lh

@@ -1008,4 +1020,3 @@

return math.log(sys.float_info.min)
else:
return logpdf(z, dot(self.H, self.x), self.S)
return logpdf(z, dot(self.H, self.x), self.S)

@@ -1015,8 +1026,32 @@

def alpha(self, value):
assert np.isscalar(value)
assert value > 0.
if not np.isscalar(value) or value <= 0:
raise ValueError('alpha must be a float greater than 0')
self._alpha_sq = value**2
def __repr__(self):
return '\n'.join([
'KalmanFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('M', self.M),
pretty_str('B', self.B),
pretty_str('log-likelihood', self.log_likelihood),
pretty_str('alpha', self.alpha),
pretty_str('inv', self.inv)
])
def update(x, P, z, R, H=None, return_all=False):

@@ -1081,2 +1116,3 @@ """

#pylint: disable=bare-except

@@ -1086,4 +1122,3 @@ if z is None:

return x, P, None, None, None, None
else:
return x, P
return x, P

@@ -1110,2 +1145,3 @@ if H is None:

except:
# can't invert a 1D array, annoyingly
K = dot(dot(P, H.T), 1./S)

@@ -1120,3 +1156,2 @@

try:

@@ -1133,4 +1168,3 @@ I_KH = np.eye(KH.shape[0]) - KH

return x, P, y, K, S, log_likelihood
else:
return x, P
return x, P

@@ -1364,3 +1398,3 @@

n = np.size(zs,0)
n = np.size(zs, 0)
dim_x = x.shape[0]

@@ -1370,16 +1404,17 @@

if x.ndim == 1:
means = zeros((n, dim_x))
means = zeros((n, dim_x))
means_p = zeros((n, dim_x))
else:
means = zeros((n, dim_x, 1))
means = zeros((n, dim_x, 1))
means_p = zeros((n, dim_x, 1))
# state covariances from Kalman Filter
covariances = zeros((n, dim_x, dim_x))
covariances = zeros((n, dim_x, dim_x))
covariances_p = zeros((n, dim_x, dim_x))
if us is None:
us = [0.]*n
Bs = [0.]*n
us = [0.] * n
Bs = [0.] * n
#pylint: disable=multiple-statements
if len(Fs) < n: Fs = [Fs]*n

@@ -1397,7 +1432,7 @@ if len(Qs) < n: Qs = [Qs]*n

x, P = update(x, P, z, R=R, H=H)
means[i, :] = x
means[i, :] = x
covariances[i, :, :] = P
x, P = predict(x, P, u=u, B=B, F=F, Q=Q)
means_p[i, :] = x
means_p[i, :] = x
covariances_p[i, :, :] = P

@@ -1408,7 +1443,7 @@ else:

x, P = predict(x, P, u=u, B=B, F=F, Q=Q)
means_p[i, :] = x
means_p[i, :] = x
covariances_p[i, :, :] = P
x, P = update(x, P, z, R=R, H=H)
means[i, :] = x
x, P = update(x, P, z, R=R, H=H)
means[i, :] = x
covariances[i, :, :] = P

@@ -1467,3 +1502,5 @@

assert len(Xs) == len(Ps)
if len(Xs) != len(Ps):
raise ValueError('length of Xs and Ps must be the same')
n = Xs.shape[0]

@@ -1473,3 +1510,3 @@ dim_x = Xs.shape[1]

# smoother gain
K = zeros((n,dim_x,dim_x))
K = zeros((n, dim_x, dim_x))
x, P, pP = Xs.copy(), Ps.copy(), Ps.copy()

@@ -1480,2 +1517,3 @@

#pylint: disable=bad-whitespace
K[k] = dot(dot(P[k], Fs[k].T), linalg.inv(pP[k]))

@@ -1531,2 +1569,3 @@ x[k] += dot(K[k], x[k+1] - dot(Fs[k], x[k]))

def save(self):

@@ -1566,8 +1605,7 @@ """ save the current state of the Kalman filter"""

if ndim == 1:
z = z[:,0]
z = z[:, 0]
if ndim == 0:
z = z[0,0]
z = z[0, 0]
return z
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name

@@ -19,3 +20,5 @@ """Copyright 2015 Roger R Labbe Jr.

import numpy as np
from filterpy.common import pretty_str
class MMAEFilterBank(object):

@@ -55,3 +58,2 @@ """ Implements the fixed Multiple Model Adaptive Estimator (MMAE). This

def __init__(self, filters, p, dim_x, H=None):

@@ -73,8 +75,11 @@ """ Creates an fixed MMAE Estimator.

H :
H : Measurement matrix
"""
assert len(filters) == len(p)
assert dim_x > 0
if len(filters) != len(p):
raise ValueError('length of filters and p must be the same')
if dim_x < 1:
raise ValueError('dim_x must be >= 1')
self.filters = filters

@@ -84,2 +89,4 @@ self.p = np.asarray(p)

self.x = None
self.P = None
self.H = H

@@ -123,2 +130,5 @@

if H is None:
H = self.H
# new probability is recursively defined as prior * likelihood

@@ -149,1 +159,10 @@ for i, f in enumerate(self.filters):

self.P += p*(np.outer(y, y) + f.P)
def __repr__(self):
return '\n'.join([
'MMAEFilterBank object',
pretty_str('dim_x', self.dim_x),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('log-p', self.p),
])
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""Copyright 2015 Roger R Labbe Jr.

@@ -20,68 +22,73 @@

from scipy.linalg import cholesky
from filterpy.common import pretty_str
class MerweScaledSigmaPoints(object):
def __init__(self, n, alpha, beta, kappa, sqrt_method=None, subtract=None):
""" Generates sigma points and weights according to Van der Merwe's
2004 dissertation[1] for the UnscentedKalmanFilter class.. It
parametizes the sigma points using alpha, beta, kappa terms, and
is the version seen in most publications.
"""
Generates sigma points and weights according to Van der Merwe's
2004 dissertation[1] for the UnscentedKalmanFilter class.. It
parametizes the sigma points using alpha, beta, kappa terms, and
is the version seen in most publications.
Unless you know better, this should be your default choice.
Unless you know better, this should be your default choice.
Parameters
----------
Parameters
----------
n : int
Dimensionality of the state. 2n+1 weights will be generated.
n : int
Dimensionality of the state. 2n+1 weights will be generated.
alpha : float
Determins the spread of the sigma points around the mean.
Usually a small positive value (1e-3) according to [3].
alpha : float
Determins the spread of the sigma points around the mean.
Usually a small positive value (1e-3) according to [3].
beta : float
Incorporates prior knowledge of the distribution of the mean. For
Gaussian x beta=2 is optimal, according to [3].
beta : float
Incorporates prior knowledge of the distribution of the mean. For
Gaussian x beta=2 is optimal, according to [3].
kappa : float, default=0.0
Secondary scaling parameter usually set to 0 according to [4],
or to 3-n according to [5].
kappa : float, default=0.0
Secondary scaling parameter usually set to 0 according to [4],
or to 3-n according to [5].
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm. Different choices affect how the sigma points
are arranged relative to the eigenvectors of the covariance matrix.
Usually this will not matter to you; if so the default cholesky()
yields maximal performance. As of van der Merwe's dissertation of
2004 [6] this was not a well reseached area so I have no advice
to give you.
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm. Different choices affect how the sigma points
are arranged relative to the eigenvectors of the covariance matrix.
Usually this will not matter to you; if so the default cholesky()
yields maximal performance. As of van der Merwe's dissertation of
2004 [6] this was not a well reseached area so I have no advice
to give you.
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
are state vectors, not scalars.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
are state vectors, not scalars.
Examples
--------
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
References
----------
References
----------
.. [1] R. Van der Merwe "Sigma-Point Kalman Filters for Probabilitic
Inference in Dynamic State-Space Models" (Doctoral dissertation)
.. [1] R. Van der Merwe "Sigma-Point Kalman Filters for Probabilitic
Inference in Dynamic State-Space Models" (Doctoral dissertation)
"""
"""
def __init__(self, n, alpha, beta, kappa, sqrt_method=None, subtract=None):
#pylint: disable=too-many-arguments
self.n = n

@@ -97,3 +104,3 @@ self.alpha = alpha

if subtract is None:
self.subtract= np.subtract
self.subtract = np.subtract
else:

@@ -137,4 +144,5 @@ self.subtract = subtract

assert self.n == np.size(x), "expected size {}, but size is {}".format(
self.n, np.size(x))
if self.n != np.size(x):
raise ValueError("expected size(x) {}, but size is {}".format(
self.n, np.size(x)))

@@ -157,2 +165,3 @@ n = self.n

for k in range(n):
# pylint: disable=bad-whitespace
sigmas[k+1] = self.subtract(x, -U[k])

@@ -189,49 +198,66 @@ sigmas[n+k+1] = self.subtract(x, U[k])

def __repr__(self):
Wm, Wc = self.weights()
return '\n'.join([
'MerweScaledSigmaPoints object',
pretty_str('n', self.n),
pretty_str('alpha', self.alpha),
pretty_str('beta', self.beta),
pretty_str('kappa', self.kappa),
pretty_str('Wm', Wm),
pretty_str('Wc', Wc),
pretty_str('subtract', self.subtract),
pretty_str('sqrt', self.sqrt)
])
class JulierSigmaPoints(object):
"""
Generates sigma points and weights according to Simon J. Julier
and Jeffery K. Uhlmann's original paper[1]. It parametizes the sigma
points using kappa.
def __init__(self,n, kappa, sqrt_method=None, subtract=None):
""" Generates sigma points and weights according to Simon J. Julier
and Jeffery K. Uhlmann's original paper[1]. It parametizes the sigma
points using kappa.
Parameters
----------
Parameters
----------
n : int
Dimensionality of the state. 2n+1 weights will be generated.
n : int
Dimensionality of the state. 2n+1 weights will be generated.
kappa : float, default=0.
Scaling factor that can reduce high order errors. kappa=0 gives
the standard unscented filter. According to [Julier], if you set
kappa to 3-dim_x for a Gaussian x you will minimize the fourth
order errors in x and P.
kappa : float, default=0.
Scaling factor that can reduce high order errors. kappa=0 gives
the standard unscented filter. According to [Julier], if you set
kappa to 3-dim_x for a Gaussian x you will minimize the fourth
order errors in x and P.
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm. Different choices affect how the sigma points
are arranged relative to the eigenvectors of the covariance matrix.
Usually this will not matter to you; if so the default cholesky()
yields maximal performance. As of van der Merwe's dissertation of
2004 [6] this was not a well reseached area so I have no advice
to give you.
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm. Different choices affect how the sigma points
are arranged relative to the eigenvectors of the covariance matrix.
Usually this will not matter to you; if so the default cholesky()
yields maximal performance. As of van der Merwe's dissertation of
2004 [6] this was not a well reseached area so I have no advice
to give you.
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
References
----------
References
----------
.. [1] Julier, Simon J.; Uhlmann, Jeffrey "A New Extension of the Kalman
Filter to Nonlinear Systems". Proc. SPIE 3068, Signal Processing,
Sensor Fusion, and Target Recognition VI, 182 (July 28, 1997)
"""
.. [1] Julier, Simon J.; Uhlmann, Jeffrey "A New Extension of the Kalman
Filter to Nonlinear Systems". Proc. SPIE 3068, Signal Processing,
Sensor Fusion, and Target Recognition VI, 182 (July 28, 1997)
"""
def __init__(self, n, kappa, sqrt_method=None, subtract=None):

@@ -246,3 +272,3 @@ self.n = n

if subtract is None:
self.subtract= np.subtract
self.subtract = np.subtract
else:

@@ -298,3 +324,6 @@ self.subtract = subtract

assert self.n == np.size(x)
if self.n != np.size(x):
raise ValueError("expected size(x) {}, but size is {}".format(
self.n, np.size(x)))
n = self.n

@@ -318,2 +347,3 @@

for k in range(n):
# pylint: disable=bad-whitespace
sigmas[k+1] = self.subtract(x, -U[k])

@@ -346,41 +376,55 @@ sigmas[n+k+1] = self.subtract(x, U[k])

def __repr__(self):
Wm, Wc = self.weights()
return '\n'.join([
'JulierSigmaPoints object',
pretty_str('n', self.n),
pretty_str('kappa', self.kappa),
pretty_str('Wm', Wm),
pretty_str('Wc', Wc),
pretty_str('subtract', self.subtract),
pretty_str('sqrt', self.sqrt)
])
class SimplexSigmaPoints(object):
def __init__(self, n, alpha=1, sqrt_method=None, subtract=None):
""" Generates sigma points and weights according to the simplex
method presented in [1].
"""
Generates sigma points and weights according to the simplex
method presented in [1].
Parameters
----------
Parameters
----------
n : int
Dimensionality of the state. n+1 weights will be generated.
n : int
Dimensionality of the state. n+1 weights will be generated.
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm
sqrt_method : function(ndarray), default=scipy.linalg.cholesky
Defines how we compute the square root of a matrix, which has
no unique answer. Cholesky is the default choice due to its
speed. Typically your alternative choice will be
scipy.linalg.sqrtm
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
If your method returns a triangular matrix it must be upper
triangular. Do not use numpy.linalg.cholesky - for historical
reasons it returns a lower triangular matrix. The SciPy version
does the right thing.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
are state vectors, not scalars.
subtract : callable (x, y), optional
Function that computes the difference between x and y.
You will have to supply this if your state variable cannot support
subtraction, such as angles (359-1 degreees is 2, not 358). x and y
are state vectors, not scalars.
References
----------
References
----------
.. [1] Phillippe Moireau and Dominique Chapelle "Reduced-Order
Unscented Kalman Filtering with Application to Parameter
Identification in Large-Dimensional Systems"
DOI: 10.1051/cocv/2010006
.. [1] Phillippe Moireau and Dominique Chapelle "Reduced-Order
Unscented Kalman Filtering with Application to Parameter
Identification in Large-Dimensional Systems"
DOI: 10.1051/cocv/2010006
"""
"""
def __init__(self, n, alpha=1, sqrt_method=None, subtract=None):
self.n = n

@@ -394,3 +438,3 @@ self.alpha = alpha

if subtract is None:
self.subtract= np.subtract
self.subtract = np.subtract
else:

@@ -434,4 +478,5 @@ self.subtract = subtract

assert self.n == np.size(x), "expected size {}, but size is {}".format(
self.n, np.size(x))
if self.n != np.size(x):
raise ValueError("expected size(x) {}, but size is {}".format(
self.n, np.size(x)))

@@ -482,1 +527,14 @@ n = self.n

return W, W
def __repr__(self):
Wm, Wc = self.weights()
return '\n'.join([
'SimplexSigmaPoints object',
pretty_str('n', self.n),
pretty_str('alpha', self.alpha),
pretty_str('Wm', Wm),
pretty_str('Wc', Wc),
pretty_str('subtract', self.subtract),
pretty_str('sqrt', self.sqrt)
])
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-instance-attributes

@@ -22,2 +23,3 @@ """Copyright 2015 Roger R Labbe Jr.

from scipy.linalg import cholesky, qr, pinv
from filterpy.common import pretty_str

@@ -96,5 +98,8 @@

assert dim_x > 0
assert dim_z > 0
assert dim_u >= 0
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_x must be 0 or greater')

@@ -105,12 +110,16 @@ self.dim_x = dim_x

self.x = zeros((dim_x,1)) # state
self.x = zeros((dim_x, 1)) # state
self._P = eye(dim_x) # uncertainty covariance
self._P1_2 = eye(dim_x) # sqrt uncertainty covariance
self._P1_2 = eye(dim_x) # sqrt uncertainty covariance
self._Q = eye(dim_x) # sqrt process uncertainty
self._Q1_2 = eye(dim_x) # sqrt process uncertainty
self.B = 0 # control transition matrix
self.F = 0 # state transition matrix
self.H = 0 # Measurement function
self._R1_2 = eye(dim_z) # sqrt state uncertainty
self._Q1_2 = eye(dim_x) # sqrt process uncertainty
self.B = 0. # control transition matrix
self.F = np.eye(dim_x) # state transition matrix
self.H = np.zeros((dim_z, dim_x)) # Measurement function
self._R1_2 = eye(dim_z) # sqrt state uncertainty
self._R = eye(dim_z) # state uncertainty
self.K = 0.
self.S = 0.
# Residual is computed during the innovation (update) step. We

@@ -160,5 +169,5 @@ # save it so that in case you want to inspect it for various

_, S = qr(M)
self.K = S[0:dim_z, dim_z:].T
N = S[0:dim_z, 0:dim_z].T
_, self.S = qr(M)
self.K = self.S[0:dim_z, dim_z:].T
N = self.S[0:dim_z, 0:dim_z].T

@@ -172,3 +181,3 @@ # y = z - Hx

self.x += dot(self.K, pinv(N)).dot(self.y)
self._P1_2 = S[dim_z:, dim_z:].T
self._P1_2 = self.S[dim_z:, dim_z:].T

@@ -191,3 +200,3 @@

# P = FPF' + Q
T, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T)
_, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T)
self._P1_2 = P2[:self.dim_x, :self.dim_x].T

@@ -238,3 +247,3 @@

self._Q = value
self._Q1_2 = cholesky (self._Q, lower=True)
self._Q1_2 = cholesky(self._Q, lower=True)

@@ -275,2 +284,22 @@ @property

self._R = value
self._R1_2 = cholesky (self._R, lower=True)
self._R1_2 = cholesky(self._R, lower=True)
def __repr__(self):
return '\n'.join([
'SquareRootKalmanFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('S', self.S),
pretty_str('M', self.M),
pretty_str('B', self.B),
])
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""Copyright 2015 Roger R Labbe Jr.

@@ -19,9 +21,10 @@

import sys
import math
import numpy as np
from numpy import eye, zeros, dot, isscalar, outer
from scipy.linalg import inv, cholesky
import sys
from scipy.linalg import cholesky
from filterpy.kalman import unscented_transform
from filterpy.stats import logpdf
from filterpy.common import pretty_str

@@ -32,3 +35,4 @@

# pylint: disable=C0103
r""" Implements the Scaled Unscented Kalman filter (UKF) as defined by
r"""
Implements the Scaled Unscented Kalman filter (UKF) as defined by
Simon Julier in [1], using the formulation provided by Wan and Merle

@@ -71,3 +75,8 @@ in [2]. This filter scales the sigma points to avoid strong nonlinearities.

inv : function, default numpy.linalg.inv
If you prefer another inverse function, such as the Moore-Penrose
pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv
Examples

@@ -103,3 +112,4 @@ --------

compute_log_likelihood=True):
""" Create a Kalman filter. You are responsible for setting the
"""
Create a Kalman filter. You are responsible for setting the
various state variables to reasonable values; the defaults below will

@@ -225,2 +235,4 @@ not give you a functional filter.

#pylint: disable=too-many-arguments
self.Q = eye(dim_x)

@@ -267,5 +279,11 @@ self.R = eye(dim_z)

self.K = 0. # Kalman gain
self.y = 0. # residual
def predict(self, dt=None, UT=None, fx_args=()):
r""" Performs the predict step of the UKF. On return, self.x and
self.inv = np.linalg.inv
def predict(self, dt=None, UT=None, fx_args=()):
r"""
Performs the predict step of the UKF. On return, self.x and
self.P contain the predicted state (x) and covariance (P). '

@@ -304,7 +322,5 @@

# calculate sigma points for given mean and covariance
sigmas = self.points_fn.sigma_points(self.x, self.P)
self.compute_process_sigmas(dt, *fx_args)
for i, s in enumerate(sigmas):
self.sigmas_f[i] = self.fx(s, dt, *fx_args)
#and pass sigmas through the unscented transform
self.x, self.P = UT(self.sigmas_f, self.Wm, self.Wc, self.Q,

@@ -314,4 +330,6 @@ self.x_mean, self.residual_x)

def update(self, z, R=None, UT=None, hx_args=()):
""" Update the UKF with the given measurements. On return,
"""
Update the UKF with the given measurements. On return,
self.x and self.P contain the new mean and covariance of the filter.

@@ -364,3 +382,3 @@

self.K = dot(Pxz, inv(Pz)) # Kalman gain
self.K = dot(Pxz, self.inv(Pz)) # Kalman gain
self.y = self.residual_z(z, zp) # residual

@@ -370,3 +388,3 @@

self.x = self.x + dot(self.K, self.y)
self.P = self.P - dot(self.K, Pz).dot(self.K.T)
self.P = self.P - dot(self.K, dot(Pz, self.K.T))

@@ -386,3 +404,3 @@ if self.compute_log_likelihood:

dx = self.residual_x(sigmas_f[i], x)
dz = self.residual_z(sigmas_h[i], z)
dz = self.residual_z(sigmas_h[i], z)
Pxz += self.Wc[i] * outer(dx, dz)

@@ -392,2 +410,17 @@ return Pxz

def compute_process_sigmas(self, dt, *fx_args):
"""
computes the values of sigmas_f. Normally a user would not call
this, but it is useful if you need to call update more than once
between calls to predict (to update for multiple simultaneous
measurements), so the sigmas correctly reflect the updated state
x, P.
"""
# calculate sigma points for given mean and covariance
sigmas = self.points_fn.sigma_points(self.x, self.P)
for i, s in enumerate(sigmas):
self.sigmas_f[i] = self.fx(s, dt, *fx_args)
@property

@@ -409,3 +442,3 @@ def likelihood(self):

if lh == 0:
lh = sys.float_info.min
lh = sys.float_info.min
return lh

@@ -415,3 +448,4 @@

def batch_filter(self, zs, Rs=None, UT=None):
""" Performs the UKF filter over the list of measurement in `zs`.
"""
Performs the UKF filter over the list of measurement in `zs`.

@@ -451,12 +485,12 @@ Parameters

z = zs[0]
except:
assert not isscalar(zs), 'zs must be list-like'
except TypeError:
raise TypeError('zs must be list-like')
if self._dim_z == 1:
assert isscalar(z) or (z.ndim==1 and len(z) == 1), \
'zs must be a list of scalars or 1D, 1 element arrays'
if not(isscalar(z) or (z.ndim == 1 and len(z) == 1)):
raise TypeError('zs must be a list of scalars or 1D, 1 element arrays')
else:
assert len(z) == self._dim_z, 'each element in zs must be a' \
'1D array of length {}'.format(self._dim_z)
if len(z) != self._dim_z:
raise TypeError(
'each element in zs must be a 1D array of length {}'.format(self._dim_z))

@@ -479,4 +513,4 @@ z_n = np.size(zs, 0)

self.update(z, r, UT=UT)
means[i,:] = self.x
covariances[i,:,:] = self.P
means[i, :] = self.x
covariances[i, :, :] = self.P

@@ -487,3 +521,4 @@ return (means, covariances)

def rts_smoother(self, Xs, Ps, Qs=None, dt=None):
""" Runs the Rauch-Tung-Striebal Kalman smoother on a set of
"""
Runs the Rauch-Tung-Striebal Kalman smoother on a set of
means and covariances computed by the UKF. The usual input

@@ -534,4 +569,7 @@ would come from the output of `batch_filter()`.

"""
#pylint: disable=too-many-locals
assert len(Xs) == len(Ps)
if len(Xs) != len(Ps):
raise ValueError('Xs and Ps must have the same length')
n, dim_x = Xs.shape

@@ -562,4 +600,4 @@

xb, Pb = unscented_transform(
sigmas_f, self.Wm, self.Wc, self.Q,
self.x_mean, self.residual_x)
sigmas_f, self.Wm, self.Wc, self.Q,
self.x_mean, self.residual_x)

@@ -574,3 +612,3 @@ # compute cross variance

# compute gain
K = dot(Pxb, inv(Pb))
K = dot(Pxb, self.inv(Pb))

@@ -583,1 +621,25 @@ # update the smoothed estimates

return (xs, ps, Ks)
def __repr__(self):
return '\n'.join([
'UnscentedKalmanFilter object',
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('Q', self.Q),
pretty_str('R', self.R),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('log-likelihood', self.log_likelihood),
pretty_str('sigmas_f', self.sigmas_f),
pretty_str('h', self.sigmas_h),
pretty_str('Wm', self.Wm),
pretty_str('Wc', self.Wc),
pretty_str('residual_x', self.residual_x),
pretty_str('residual_z', self.residual_z),
pretty_str('msqrt', self.msqrt),
pretty_str('hx', self.hx),
pretty_str('fx', self.fx),
pretty_str('x_mean', self.x_mean),
pretty_str('z_mean', self.z_mean)
])
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments
"""Copyright 2015 Roger R Labbe Jr.

@@ -22,3 +24,4 @@

mean_fn=None, residual_fn=None):
""" Computes unscented transform of a set of sigma points and weights.
r"""
Computes unscented transform of a set of sigma points and weights.
returns the mean and covariance in a tuple.

@@ -81,3 +84,2 @@

Returns

@@ -113,5 +115,5 @@ -------

# this is the fast way to do this - see 'else' for the slow way
if residual_fn is None:
y = sigmas - x[np.newaxis,:]
P = y.T.dot(np.diag(Wc)).dot(y)
if residual_fn is np.subtract or residual_fn is None:
y = sigmas - x[np.newaxis, :]
P = np.dot(y.T, np.dot(np.diag(Wc), y))
else:

@@ -118,0 +120,0 @@ P = np.zeros((n, n))

@@ -20,4 +20,4 @@ # -*- coding: utf-8 -*-

__all__=["least_squares"]
__all__ = ["least_squares"]
from .least_squares import LeastSquaresFilter
# -*- coding: utf-8 -*-
# pylint: disable=C0103, R0913, R0902, C0326, R0914
# disable snake_case warning, too many arguments, too many attributes,
# one space before assignment, too many local variables
"""Copyright 2015 Roger R Labbe Jr.

@@ -20,2 +24,3 @@

import numpy as np
from filterpy.kalman import pretty_str

@@ -33,17 +38,16 @@

Examples
--------
Parameters
----------
.. code-block:: Python
dt : float
time step per update
from filterpy.leastsq import LeastSquaresFilter
order : int
order of filter 0..2
lsq = LeastSquaresFilter(dt=0.1, order=1, noise_sigma=2.3)
noise_sigma : float
sigma (std dev) in x. This allows us to calculate the error of
the filter, it does not influence the filter output.
while True:
z = sensor_reading() # get a measurement
x = lsq(z) # get the filtered estimate.
print('error: {}, velocity error: {}'.format(lsq.error, lsq.derror))
Attributes

@@ -64,2 +68,16 @@ ----------

Examples
--------
.. code-block:: Python
from filterpy.leastsq import LeastSquaresFilter
lsq = LeastSquaresFilter(dt=0.1, order=1, noise_sigma=2.3)
while True:
z = sensor_reading() # get a measurement
x = lsq(z) # get the filtered estimate.
print('error: {}, velocity error: {}'.format(lsq.error, lsq.derror))
References

@@ -79,21 +97,5 @@ ----------

def __init__(self, dt, order, noise_sigma=0.):
""" Least Squares filter of order 0 to 2.
if order < 0 or order > 2:
raise ValueError('order must be between 0 and 2')
Parameters
----------
dt : float
time step per update
order : int
order of filter 0..2
noise_sigma : float
sigma (std dev) in x. This allows us to calculate the error of
the filter, it does not influence the filter output.
"""
assert order >= 0
assert order <= 2
self.dt = dt

@@ -112,7 +114,9 @@ self.dt2 = dt**2

self.n = 0 #nth step in the recursion
self.x = np.zeros(self._order+1)
self.K = np.zeros(self._order+1)
self.x = np.zeros(self._order + 1)
self.K = np.zeros(self._order + 1)
def update(self, z):
""" Update filter with new measurement `z` """
self.n += 1

@@ -125,3 +129,3 @@ n = self.n

self.K[0] = 1./n
residual = z - self.x
residual = z - self.x
self.x += residual * self.K[0]

@@ -133,8 +137,8 @@

self.x[0] += self.x[1]*dt
self.x[0] += self.x[1] * dt
residual = z - self.x[0]
residual = z - self.x[0]
self.x[0] += self.K[0]*residual
self.x[1] += self.K[1]*residual
self.x[0] += self.K[0] * residual
self.x[1] += self.K[1] * residual

@@ -151,7 +155,7 @@ else:

residual = z - self.x[0]
residual = z - self.x[0]
self.x[0] += self.K[0]*residual
self.x[1] += self.K[1]*residual
self.x[2] += self.K[2]*residual
self.x[0] += self.K[0] * residual
self.x[1] += self.K[1] * residual
self.x[2] += self.K[2] * residual

@@ -162,3 +166,4 @@ return self.x

def errors(self):
""" Computes and returns the error and standard deviation of the
"""
Computes and returns the error and standard deviation of the
filter at this time step.

@@ -178,4 +183,4 @@

error = np.zeros(order+1)
std = np.zeros(order+1)
error = np.zeros(order + 1)
std = np.zeros(order + 1)

@@ -188,10 +193,10 @@

error[0] = sigma/sqrt(n)
std[0] = sigma/sqrt(self.n)
std[0] = sigma/sqrt(n)
elif order == 1:
if n > 1:
error[0] = sigma*sqrt(2*(2*n-1)/(n*(n+1)))
error[1] = sigma*sqrt(12/(n*(n*n-1)*dt*dt))
std[0] = sigma*sqrt((2*(2*n-1)) / (n*(n+1)))
std[1] = (sigma/dt) *sqrt(12/(n*(n*n-1)))
error[0] = sigma * sqrt(2*(2*n-1) / (n*(n+1)))
error[1] = sigma * sqrt(12. / (n*(n*n-1)*dt*dt))
std[0] = sigma * sqrt((2*(2*n-1)) / (n*(n+1)))
std[1] = (sigma/dt) * sqrt(12. / (n*(n*n-1)))

@@ -204,8 +209,8 @@ elif order == 2:

error[1] = sigma * sqrt(12*(16*n*n-30*n+11) /
(n*(n*n-1)*(n*n-4)*dt2))
(n*(n*n-1)*(n*n-4)*dt2))
error[2] = sigma * sqrt(720/(n*(n*n-1)*(n*n-4)*dt2*dt2))
std[0] = sigma*sqrt((3*(3*n*n - 3*n + 2)) / (n*(n+1)*(n+2)))
std[1] = (sigma/dt)*sqrt((12*(16*n*n - 30*n + 11)) /
(n*(n*n - 1)*(n*n -4)))
std[0] = sigma * sqrt((3*(3*n*n - 3*n + 2)) / (n*(n+1)*(n+2)))
std[1] = (sigma/dt) * sqrt((12*(16*n*n - 30*n + 11)) /
(n*(n*n - 1)*(n*n -4)))
std[2] = (sigma/dt2) * sqrt(720 / (n*(n*n-1)*(n*n-4)))

@@ -217,3 +222,10 @@

def __repr__(self):
return 'LeastSquareFilter x={}'.format(self.x)
return '\n'.join([
'LeastSquaresFilter object',
pretty_str('dt', self.dt),
pretty_str('dt2', self.dt2),
pretty_str('sigma', self.sigma),
pretty_str('_order', self._order),
pretty_str('x', self.x),
pretty_str('K', self.K)
])

@@ -17,8 +17,9 @@ # -*- coding: utf-8 -*-

#pylint: disable=wildcard-import
from __future__ import (absolute_import, division, print_function,
unicode_literals)
#__all__=["fading_memory"]
__all__ = ["fading_memory"]
from .fading_memory import *
# -*- coding: utf-8 -*-
# pylint: disable=C0103, R0913, R0902, C0326, R0914, R0903
# disable snake_case warning, too many arguments, too many attributes,
# one space before assignment, too many local variables, too few public
# methods
"""Copyright 2015 Roger R Labbe Jr.

@@ -18,3 +23,2 @@

from __future__ import (absolute_import, division, print_function,

@@ -24,2 +28,3 @@ unicode_literals)

from numpy import dot
from filterpy.common import pretty_str

@@ -29,62 +34,77 @@

def __init__(self, x0, dt, order, beta):
""" Creates a fading memory filter of order 0, 1, or 2.
""" Creates a fading memory filter of order 0, 1, or 2.
The KalmanFilter class also implements a more general fading memory
filter and should be preferred in most cases. This is probably faster
for low order systems.
Parameters
----------
This algorithm is based on the fading filter algorithm developed in
Zarcan's "Fundamentals of Kalman Filtering" [1].
x0 : 1D np.array or scalar
Initial value for the filter state. Each value can be a scalar
or a np.array.
Parameters
----------
You can use a scalar for x0. If order > 0, then 0.0 is assumed
for the higher order terms.
x0 : 1D np.array or scalar
Initial value for the filter state. Each value can be a scalar
or a np.array.
x[0] is the value being tracked
x[1] is the first derivative (for order 1 and 2 filters)
x[2] is the second derivative (for order 2 filters)
You can use a scalar for x0. If order > 0, then 0.0 is assumed
for the higher order terms.
dt : scalar
timestep
x[0] is the value being tracked
x[1] is the first derivative (for order 1 and 2 filters)
x[2] is the second derivative (for order 2 filters)
order : int
order of the filter. Defines the order of the system
0 - assumes system of form x = a_0 + a_1*t
1 - assumes system of form x = a_0 +a_1*t + a_2*t^2
2 - assumes system of form x = a_0 +a_1*t + a_2*t^2 + a_3*t^3
dt : scalar
timestep
beta : float
filter gain parameter.
order : int
order of the filter. Defines the order of the system
0 - assumes system of form x = a_0 + a_1*t
1 - assumes system of form x = a_0 +a_1*t + a_2*t^2
2 - assumes system of form x = a_0 +a_1*t + a_2*t^2 + a_3*t^3
Attributes
----------
beta : float
filter gain parameter.
x : np.array
State of the filter.
x[0] is the value being tracked
x[1] is the derivative of x[0] (order 1 and 2 only)
x[2] is the 2nd derivative of x[0] (order 2 only)
Attributes
----------
This is always an np.array, even for order 0 where you can
initialize x0 with a scalar.
x : np.array
State of the filter.
x[0] is the value being tracked
x[1] is the derivative of x[0] (order 1 and 2 only)
x[2] is the 2nd derivative of x[0] (order 2 only)
P : np.array
The diagonal of the covariance matrix. Assumes that variance
is one; multiply by sigma^2 to get the actual variances.
This is always an np.array, even for order 0 where you can
initialize x0 with a scalar.
This is a constant and will not vary as the filter runs.
P : np.array
The diagonal of the covariance matrix. Assumes that variance
is one; multiply by sigma^2 to get the actual variances.
e : np.array
The truncation error of the filter. Each term must be multiplied
by the a_1, a_2, or a_3 of the polynomial for the system.
This is a constant and will not vary as the filter runs.
For example, if the filter is order 2, then multiply all terms
of self.e by a_3 to get the actual error. Multipy by a_2 for order
1, and a_1 for order 0.
"""
e : np.array
The truncation error of the filter. Each term must be multiplied
by the a_1, a_2, or a_3 of the polynomial for the system.
assert order >= 0
assert order <= 2
For example, if the filter is order 2, then multiply all terms
of self.e by a_3 to get the actual error. Multipy by a_2 for order
1, and a_1 for order 0.
References
----------
Paul Zarchan and Howard Musoff. "Fundamentals of Kalman Filtering:
A Practical Approach" American Institute of Aeronautics and Astronautics,
Inc. Fourth Edition. p. 521-536. (2015)
"""
def __init__(self, x0, dt, order, beta):
if order < 0 or order > 2:
raise ValueError('order must be between 0 and 2')
if np.isscalar(x0):

@@ -111,3 +131,3 @@ self.x = np.zeros(order+1)

de = dt*((1+3*beta)/(1-beta))
self.e = np.array ([e, de], dtype=float)
self.e = np.array([e, de], dtype=float)

@@ -127,7 +147,18 @@ else:

de = dt**2 * (2 + 5*beta + 11*beta**2) / (1-beta)**2
dde = 6*dt*(1+2*beta)/(1-beta)
dde = 6*dt*(1+2*beta) / (1-beta)
self.e = np.array ([e, de, dde], dtype=float)
self.e = np.array([e, de, dde], dtype=float)
def __repr__(self):
return '\n'.join([
'FadingMemoryFilter object',
pretty_str('dt', self.dt),
pretty_str('order', self.order),
pretty_str('beta', self.beta),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('e', self.e),
])
def update(self, z):

@@ -140,3 +171,3 @@ """ update the filter with measurement z. z must be the same type

G = 1 - self.beta
self.x = self.x + dot(G,(z-self.x))
self.x = self.x + dot(G, (z - self.x))

@@ -153,3 +184,2 @@ elif self.order == 1:

self.x[1] = dx + (H / self.dt)*residual
#print(self.x)

@@ -167,3 +197,3 @@ else: # order == 2

residual = z -(x + dxdt +0.5*ddx*T2)
residual = z - (x + dxdt + 0.5*ddx*T2)

@@ -170,0 +200,0 @@ self.x[0] = x + dxdt + 0.5*ddx*T2 + G*residual

@@ -17,7 +17,9 @@ # -*- coding: utf-8 -*-

#pylint: disable=wildcard-import
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__=["monte_carlo"]
__all__ = ["resampling"]
from .resampling import *

@@ -1,7 +0,19 @@

# -*- coding: utf-8 -*-
# pylint: skip-file
"""
Created on Mon Jul 27 06:15:39 2015
FilterPy library.
http://github.com/rlabbe/filterpy
@author: RL
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
Copyright 2015-2018 Roger R Labbe Jr.
this is still experimental code, not intended for use.

@@ -14,8 +26,7 @@ """

from math import cos, sqrt, pi, exp
from filterpy.common import pretty_str
def gaussian_density(x, mu, sigma):
return 1/(sigma*sqrt(2*pi))*\
exp(-(x-mu)**2/(2*sigma**2))
return 1 / (sigma * sqrt(2 * pi)) * exp(-(x - mu)**2 / (2 * sigma**2))

@@ -25,3 +36,3 @@

class ProcessModel:
def __init__(self, sigma_1=10, sigma_v=10, sigma_w=1):
def __init__(self, sigma_1=10., sigma_v=10., sigma_w=1.):
self.sigma_1 = sigma_1

@@ -31,8 +42,11 @@ self.sigma_v = sigma_v

def sample_initial(self):
return normalvariate(0, self.sigma_1)
def initial_density(self, x):
return gaussian_density(x, 0, self.sigma_1)
def deterministic_transition(self, t, x):

@@ -42,13 +56,18 @@ return x/2 + 25*x/(1+x**2) + 8*cos(1.2*t)

def sample_transition(self, t, x):
return self.deterministic_transition(t, x)\
return self.deterministic_transition(t, x) \
+ normalvariate(0, self.sigma_v)
def transition_density(self, t, x, x_next):
return gaussian_density(x_next,
return gaussian_density(
x_next,
self.deterministic_transition(t, x), self.sigma_v)
def sample_observation(self, t, x):
return x**2/20 + normalvariate(0, self.sigma_w)
def observation_density(self, t, y, x_sample):

@@ -58,2 +77,12 @@ return gaussian_density(y, x_sample**2/20, self.sigma_w)

def __repr__(self):
return '\n'.join([
'ProcessModel object',
pretty_str('sigma_1', self.sigma_1),
pretty_str('sigma_v', self.sigma_v),
pretty_str('sigma_w', self.sigma_w)
])
def quad_data(n, interval):

@@ -74,15 +103,5 @@ from scipy.special.orthogonal import p_roots

def output_quadrature(name, x_history, particles, prob_history):
output_x(name, x_history)
dist_f = file("%s-dist.dat" % name, "w")
for t, prob in enumerate(prob_history):
for x, p in zip(particles, prob):
dist_f.write("%g\t%g\t%g\n" % (t, x, p))
dist_f.write("\n")
def quadrature(model, n=100, max_time=20, interval=(-40,40)):
def quadrature(model, n=100, max_time=20, interval=(-40, 40)):
particles, gauss_weights = quad_data(n, interval)

@@ -103,5 +122,5 @@

prediction_prob = [
sum(model.transition_density(t, xj, xi) * prob[j]
for j, xj in enumerate(particles))
for xi in particles]
sum(model.transition_density(t, xj, xi) * prob[j]
for j, xj in enumerate(particles))
for xi in particles]

@@ -126,8 +145,6 @@ prob = [model.observation_density(t, y, xi) *

pm = ProcessModel()
x = 1.
xs = [x]
xs = [1]
for i in range(100):
x = pm.deterministic_transition(7., i)
xs.append(x)
xs.append(pm.deterministic_transition(7., i))

@@ -134,0 +151,0 @@ plt.plot(xs)

# -*- coding: utf-8 -*-
# pylint: disable=C0103, R0913, R0902, C0326, R0914
# disable snake_case warning, too many arguments, too many attributes,
# one space before assignment, too many local variables
"""Copyright 2015 Roger R Labbe Jr.

@@ -4,0 +9,0 @@

@@ -15,8 +15,9 @@ """Copyright 2015 Roger R Labbe Jr.

"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__=["stats"]
#pylint: disable=wildcard-import
from __future__ import absolute_import
__all__ = ["stats"]
from .stats import *
# -*- coding: utf-8 -*-
# pylint: disable=C0103, R0913, R0902, C0326, C0302, C0321, R0914, R0912
#
# disable snake_case warning, too many arguments, too many attributes,
# one space before assignment, too many lines, more than one statement
# on line, too many local variables, too many branches
"""Copyright 2015 Roger R Labbe Jr.

@@ -19,7 +25,8 @@

from __future__ import (absolute_import, division, print_function,
unicode_literals)
from __future__ import absolute_import, division, unicode_literals
import math
from math import cos, sin
import random
import warnings
from matplotlib.patches import Ellipse

@@ -29,11 +36,9 @@ import matplotlib.pyplot as plt

from numpy.linalg import inv
import random
import scipy.linalg as linalg
import scipy.sparse as sp
import scipy.sparse.linalg as spln
import scipy.stats
from scipy.stats import norm, multivariate_normal
import warnings
# Older versions of scipy do not support the allow_singular keyword. I could

@@ -44,3 +49,9 @@ # check the version number explicily, but perhaps this is clearer

multivariate_normal.logpdf(1, 1, 1, allow_singular=True)
except:
except TypeError:
warnings.warn(
'You are using a version of SciPy that does not support the '\
'allow_singular parameter in scipy.stats.multivariate_normal.logpdf(). '\
'Future versions of FilterPy will require a version of SciPy that '\
'implements this keyword',
DeprecationWarning)
_support_singular = False

@@ -63,6 +74,6 @@

def mahalanobis(x, mean, cov):
""" Computes the Mahalanobis distance between the state vector x from the
"""
Computes the Mahalanobis distance between the state vector x from the
Gaussian `mean` with covariance `cov`.
Parameters

@@ -77,3 +88,2 @@ ----------

Returns

@@ -84,3 +94,2 @@ -------

Examples

@@ -110,5 +119,7 @@ --------

def log_likelihood(z, x, P, H, R):
"""Returns log-likelihood of the measurement z given the Gaussian
"""
Returns log-likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R"""
covariance error R
"""
S = np.dot(H, np.dot(P, H.T)) + R

@@ -119,5 +130,7 @@ return logpdf(z, np.dot(H, x), S)

def likelihood(z, x, P, H, R):
"""Returns likelihood of the measurement z given the Gaussian
"""
Returns likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R"""
covariance error R
"""
return np.exp(log_likelihood(z, x, P, H, R))

@@ -127,3 +140,4 @@

def logpdf(x, mean=None, cov=1, allow_singular=True):
"""Computes the log of the probability density function of the normal
"""
Computes the log of the probability density function of the normal
N(mean, cov) for the data x. The normal may be univariate or multivariate.

@@ -149,4 +163,3 @@

return multivariate_normal.logpdf(flat_x, flat_mean, cov, allow_singular)
else:
return multivariate_normal.logpdf(flat_x, flat_mean, cov)
return multivariate_normal.logpdf(flat_x, flat_mean, cov)

@@ -156,3 +169,4 @@

def gaussian(x, mean, var):
"""returns normal distribution (pdf) for x given a Gaussian with the
"""
returns normal distribution (pdf) for x given a Gaussian with the
specified mean and variance. All must be scalars.

@@ -196,4 +210,5 @@

def mul (mean1, var1, mean2, var2):
""" multiply Gaussians (mean1, var1) with (mean2, var2) and return the
def mul(mean1, var1, mean2, var2):
"""
Multiply Gaussians (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean,var).

@@ -209,4 +224,5 @@

def add (mean1, var1, mean2, var2):
""" add the Gaussians (mean1, var1) with (mean2, var2) and return the
def add(mean1, var1, mean2, var2):
"""
Add the Gaussians (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean,var).

@@ -221,3 +237,4 @@

def multivariate_gaussian(x, mu, cov):
""" This is designed to replace scipy.stats.multivariate_normal
"""
This is designed to replace scipy.stats.multivariate_normal
which is not available before version 0.14. You may either pass in a

@@ -247,3 +264,3 @@ multivariate set of data:

multivariate_gaussian(1, 2, 3)
scipy.stats.multivariate_normal(2,3).pdf(1)
scipy.stats.multivariate_normal(2,3).pdf(1)

@@ -276,5 +293,10 @@

warnings.warn(
("This was implemented before SciPy version 0.14, which implemented "
"scipy.stats.multivariate_normal. This function will be removed in "
"a future release of FilterPy"), DeprecationWarning)
# force all to numpy.array type, and flatten in case they are vectors
x = np.array(x, copy=False, ndmin=1).flatten()
mu = np.array(mu,copy=False, ndmin=1).flatten()
x = np.array(x, copy=False, ndmin=1).flatten()
mu = np.array(mu, copy=False, ndmin=1).flatten()

@@ -284,6 +306,7 @@ nx = len(mu)

norm_coeff = nx*math.log(2*math.pi) + np.linalg.slogdet(cov)[1]
err = x - mu
if (sp.issparse(cov)):
if sp.issparse(cov):
numerator = spln.spsolve(cov, err).T.dot(err)

@@ -297,3 +320,4 @@ else:

def multivariate_multiply(m1, c1, m2, c2):
""" Multiplies the two multivariate Gaussians together and returns the
"""
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).

@@ -357,3 +381,4 @@

label=None):
"""Plots a normal distribution CDF with the given mean and variance.
"""
Plots a normal distribution CDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the cumulative probability.

@@ -408,3 +433,4 @@

label=None):
"""Plots a normal distribution CDF with the given mean and variance.
"""
Plots a normal distribution CDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the cumulative probability.

@@ -445,3 +471,3 @@

sigma = math.sqrt(variance)
n = scipy.stats.norm(mean, sigma)
n = norm(mean, sigma)
if xlim is None:

@@ -460,4 +486,5 @@ xlim = [n.ppf(0.001), n.ppf(0.999)]

def plot_gaussian_pdf(mean=0., variance=1.,
def plot_gaussian_pdf(mean=0.,
variance=1.,
std=None,
ax=None,

@@ -468,3 +495,4 @@ mean_line=False,

label=None):
"""Plots a normal distribution PDF with the given mean and variance.
"""
Plots a normal distribution PDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the probability density.

@@ -478,5 +506,9 @@

variance : scalar, default 0.
variance : scalar, default 1., optional
variance for the normal distribution.
std: scalar, default=None, optional
standard deviation of the normal distribution. Use instead of
`variance` if desired
ax : matplotlib axes object, optional

@@ -509,5 +541,13 @@ If provided, the axes to draw on, otherwise plt.gca() is used.

sigma = math.sqrt(variance)
n = scipy.stats.norm(mean, sigma)
if variance is not None and std is not None:
raise ValueError('Specify only one of variance and std')
if variance is None and std is None:
raise ValueError('Specify variance or std')
if variance is not None:
std = math.sqrt(variance)
n = norm(mean, std)
if xlim is None:

@@ -517,3 +557,3 @@ xlim = [n.ppf(0.001), n.ppf(0.999)]

xs = np.arange(xlim[0], xlim[1], (xlim[1] - xlim[0]) / 1000.)
ax.plot(xs,n.pdf(xs), label=label)
ax.plot(xs, n.pdf(xs), label=label)
ax.set_xlim(xlim)

@@ -542,33 +582,9 @@

label=None):
""" DEPRECATED. Use plot_gaussian_pdf() instead. This is poorly named, as
"""
DEPRECATED. Use plot_gaussian_pdf() instead. This is poorly named, as
there are multiple ways to plot a Gaussian.
Plots a normal distribution PDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the probability density.
Parameters
----------
ax : matplotlib axes object, optional
If provided, the axes to draw on, otherwise plt.gca() is used.
mean_line : boolean
draws a line at x=mean
xlim, ylim: (float,float), optional
specify the limits for the x or y axis as tuple (low,high).
If not specified, limits will be automatically chosen to be 'nice'
xlabel : str,optional
label for the x-axis
ylabel : str, optional
label for the y-axis
label : str, optional
label for the legend
"""
warnings.warn('This function is deprecated. It is poorly named. '
'A Gaussian can be plotted as a PDF or CDF. This '
warnings.warn('This function is deprecated. It is poorly named. '\
'A Gaussian can be plotted as a PDF or CDF. This '\
'plots a PDF. Use plot_gaussian_pdf() instead,',

@@ -581,3 +597,4 @@ DeprecationWarning)

def covariance_ellipse(P, deviations=1):
""" returns a tuple defining the ellipse representing the 2 dimensional
"""
Returns a tuple defining the ellipse representing the 2 dimensional
covariance matrix P.

@@ -597,23 +614,247 @@

U,s,v = linalg.svd(P)
orientation = math.atan2(U[1,0],U[0,0])
width = deviations*math.sqrt(s[0])
height = deviations*math.sqrt(s[1])
U, s, _ = linalg.svd(P)
orientation = math.atan2(U[1, 0], U[0, 0])
width = deviations * math.sqrt(s[0])
height = deviations * math.sqrt(s[1])
assert width >= height
if height > width:
raise ValueError('width must be greater than height')
return (orientation, width, height)
def plot_covariance_ellipse(mean, cov=None, variance = 1.0, std=None,
ellipse=None, title=None, axis_equal=True, show_semiaxis=False,
facecolor=None, edgecolor=None,
fc='none', ec='#004080',
alpha=1.0, xlim=None, ylim=None,
ls='solid'):
""" plots the covariance ellipse where
mean is a (x,y) tuple for the mean of the covariance (center of ellipse)
def _eigsorted(cov, asc=True):
"""
Computes eigenvalues and eigenvectors of a covariance matrix and returns
them sorted by eigenvalue.
cov is a 2x2 covariance matrix.
Parameters
----------
cov : ndarray
covariance matrix
asc : bool, default=True
determines whether we are sorted smallest to largest (asc=True),
or largest to smallest (asc=False)
Returns
-------
eigval : 1D ndarray
eigenvalues of covariance ordered largest to smallest
eigvec : 2D ndarray
eigenvectors of covariance matrix ordered to match `eigval` ordering.
I.e eigvec[:, 0] is the rotation vector for eigval[0]
"""
eigval, eigvec = np.linalg.eigh(cov)
order = eigval.argsort()
if not asc:
# sort largest to smallest
order = order[::-1]
return eigval[order], eigvec[:, order]
def plot_3d_covariance(mean, cov, std=1.,
ax=None, title=None,
color=None, alpha=1.,
label_xyz=True,
N=60,
shade=True,
limit_xyz=True,
**kwargs):
"""
Plots a covariance matrix `cov` as a 3D ellipsoid centered around
the `mean`.
Parameters
----------
mean : 3-vector
mean in x, y, z. Can be any type convertable to a row vector.
cov : ndarray 3x3
covariance matrix
std : double, default=1
standard deviation of ellipsoid
ax : matplotlib.axes._subplots.Axes3DSubplot, optional
Axis to draw on. If not provided, a new 3d axis will be generated
for the current figure
title : str, optional
If provided, specifies the title for the plot
color : any value convertible to a color
if specified, color of the ellipsoid.
alpha : float, default 1.
Alpha value of the ellipsoid. <1 makes is semi-transparent.
label_xyz: bool, default True
Gives labels 'X', 'Y', and 'Z' to the axis.
N : int, default=60
Number of segments to compute ellipsoid in u,v space. Large numbers
can take a very long time to plot. Default looks nice.
shade : bool, default=True
Use shading to draw the ellipse
limit_xyz : bool, default=True
Limit the axis range to fit the ellipse
**kwargs : optional
keyword arguments to supply to the call to plot_surface()
"""
from mpl_toolkits.mplot3d import Axes3D
# force mean to be a 1d vector no matter its shape when passed in
mean = np.atleast_2d(mean)
if mean.shape[1] == 1:
mean = mean.T
if not(mean.shape[0] == 1 and mean.shape[1] == 3):
raise ValueError('mean must be convertible to a 1x3 row vector')
mean = mean[0]
# force covariance to be 3x3 np.array
cov = np.asarray(cov)
if cov.shape[0] != 3 or cov.shape[1] != 3:
raise ValueError("covariance must be 3x3")
# The idea is simple - find the 3 axis of the covariance matrix
# by finding the eigenvalues and vectors. The eigenvalues are the
# radii (squared, since covariance has squared terms), and the
# eigenvectors give the rotation. So we make an ellipse with the
# given radii and then rotate it to the proper orientation.
eigval, eigvec = _eigsorted(cov, asc=True)
radii = std * np.sqrt(np.real(eigval))
if eigval[0] < 0:
raise ValueError("covariance matrix must be positive definite")
# calculate cartesian coordinates for the ellipsoid surface
u = np.linspace(0.0, 2.0 * np.pi, N)
v = np.linspace(0.0, np.pi, N)
x = np.outer(np.cos(u), np.sin(v)) * radii[0]
y = np.outer(np.sin(u), np.sin(v)) * radii[1]
z = np.outer(np.ones_like(u), np.cos(v)) * radii[2]
# rotate data with eigenvector and center on mu
a = np.kron(eigvec[:, 0], x)
b = np.kron(eigvec[:, 1], y)
c = np.kron(eigvec[:, 2], z)
data = a + b + c
N = data.shape[0]
x = data[:, 0:N] + mean[0]
y = data[:, N:N*2] + mean[1]
z = data[:, N*2:] + mean[2]
fig = plt.gcf()
if ax is None:
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z,
rstride=3, cstride=3, linewidth=0.1, alpha=alpha,
shade=shade, color=color, **kwargs)
# now make it pretty!
if label_xyz:
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if limit_xyz:
r = radii.max()
ax.set_xlim(-r + mean[0], r + mean[0])
ax.set_ylim(-r + mean[1], r + mean[1])
ax.set_zlim(-r + mean[2], r + mean[2])
if title is not None:
plt.title(title)
#pylint: disable=pointless-statement
Axes3D #kill pylint warning about unused import
return ax
def plot_covariance_ellipse(
mean, cov=None, variance=1.0, std=None,
ellipse=None, title=None, axis_equal=True, show_semiaxis=False,
facecolor=None, edgecolor=None,
fc='none', ec='#004080',
alpha=1.0, xlim=None, ylim=None,
ls='solid'):
"""
Deprecated function to plot a covariance ellipse. Use plot_covariance
instead.
See Also
--------
plot_covariance
"""
warnings.warn("deprecated, use plot_covariance instead", DeprecationWarning)
plot_covariance(mean=mean, cov=cov, variance=variance, std=std,
ellipse=ellipse, title=title, axis_equal=axis_equal,
show_semiaxis=show_semiaxis, facecolor=facecolor,
edgecolor=edgecolor, fc=fc, ec=ec, alpha=alpha,
xlim=xlim, ylim=ylim, ls=ls)
def _std_tuple_of(var=None, std=None, interval=None):
"""
Convienence function for plotting. Given one of var, standard
deviation, or interval, return the std. Any of the three can be an
iterable list.
Examples
--------
>>>_std_tuple_of(var=[1, 3, 9])
(1, 2, 3)
"""
if std is not None:
if np.isscalar(std):
std = (std,)
return std
if interval is not None:
if np.isscalar(interval):
interval = (interval,)
return norm.interval(interval)[1]
if var is None:
raise ValueError("no inputs were provided")
if np.isscalar(var):
var = (var,)
return np.sqrt(var)
def plot_covariance(
mean, cov=None, variance=1.0, std=None, interval=None,
ellipse=None, title=None, axis_equal=True, show_semiaxis=False,
facecolor=None, edgecolor=None,
fc='none', ec='#004080',
alpha=1.0, xlim=None, ylim=None,
ls='solid'):
"""
Plots the covariance ellipse for the 2D normal defined by (mean, cov)
`variance` is the normal sigma^2 that we want to plot. If list-like,

@@ -629,9 +870,76 @@ ellipses for all ellipses will be ploted. E.g. [1,2] will plot the

plt.show() is not called, allowing you to plot multiple things on the
same figure.
Parameters
----------
mean : row vector like (2x1)
The mean of the normal
cov : ndarray-like
2x2 covariance matrix
variance : float, default 1, or iterable float, optional
Variance of the plotted ellipse. May specify std or interval instead.
If iterable, such as (1, 2**2, 3**2), then ellipses will be drawn
for all in the list.
std : float, or iterable float, optional
Standard deviation of the plotted ellipse. If specified, variance
is ignored, and interval must be `None`.
If iterable, such as (1, 2, 3), then ellipses will be drawn
for all in the list.
interval : float range [0,1), or iterable float, optional
Confidence interval for the plotted ellipse. For example, .68 (for
68%) gives roughly 1 standand deviation. If specified, variance
is ignored and `std` must be `None`
If iterable, such as (.68, .95), then ellipses will be drawn
for all in the list.
ellipse: (float, float, float)
Instead of a covariance, plots an ellipse described by (angle, width,
height), where angle is in radians, and the width and height are the
minor and major sub-axis radii. `cov` must be `None`.
title: str, optional
title for the plot
axis_equal: bool, default=True
Use the same scale for the x-axis and y-axis to ensure the aspect
ratio is correct.
show_semiaxis: bool, default=False
Draw the semiaxis of the ellipse
facecolor, fc: color, default=None
If specified, fills the ellipse with the specified color. `fc` is an
allowed abbreviation
edgecolor, ec: color, default=None
If specified, overrides the default color sequence for the edge color
of the ellipse. `ec` is an allowed abbreviation
alpha: float range [0,1], default=1.
alpha value for the ellipse
xlim: float or (float,float), default=None
specifies the limits for the x-axis
ylim: float or (float,float), default=None
specifies the limits for the y-axis
ls: str, default='solid':
line style for the edge of the ellipse
"""
assert cov is None or ellipse is None
assert not (cov is None and ellipse is None)
if cov is not None and ellipse is not None:
raise ValueError('You cannot specify both cov and ellipse')
if cov is None and ellipse is None:
raise ValueError('Specify one of cov or ellipse')
if facecolor is None:

@@ -647,20 +955,7 @@ facecolor = fc

if axis_equal:
#plt.gca().set_aspect('equal')
plt.axis('equal')
if title is not None:
plt.title (title)
plt.title(title)
compute_std = False
if std is None:
std = variance
compute_std = True
if np.isscalar(std):
std = [std]
if compute_std:
std = np.sqrt(np.asarray(std))
ax = plt.gca()

@@ -672,2 +967,3 @@

std = _std_tuple_of(variance, std, interval)
for sd in std:

@@ -695,4 +991,5 @@ e = Ellipse(xy=mean, width=sd*width, height=sd*height, angle=angle,

def norm_cdf (x_range, mu, var=1, std=None):
""" computes the probability that a Gaussian distribution lies
def norm_cdf(x_range, mu, var=1, std=None):
"""
Computes the probability that a Gaussian distribution lies
within a range of values.

@@ -728,87 +1025,40 @@

def _is_inside_ellipse(x, y, ex, ey, orientation, width, height):
def _to_cov(x, n):
"""
If x is a scalar, returns a covariance matrix generated from it
as the identity matrix multiplied by x. The dimension will be nxn.
If x is already a 2D numpy array then it is returned unchanged.
co = np.cos(orientation)
so = np.sin(orientation)
Raises ValueError if not positive definite
"""
xx = x*co + y*so
yy = y*co - x*so
if np.isscalar(x):
if x < 0:
raise ValueError('covariance must be > 0')
return np.eye(n) * x
return (xx / width)**2 + (yy / height)**2 <= 1.
return ((x-ex)*co - (y-ey)*so)**2/width**2 + \
((x-ex)*so + (y-ey)*co)**2/height**2 <= 1
def _to_cov(x, n):
""" If x is a scalar, returns a covariance matrix generated from it
as the identity matrix multiplied by x. The dimension will be nxn.
If x is already a numpy array then it is returned unchanged.
"""
x = np.atleast_2d(x)
try:
x.shape
if type(x) != np.ndarray:
x = np.asarray(x)[0]
return x
# quickly find out if we are positive definite
np.linalg.cholesky(x)
except:
cov = np.asarray(x)
try:
# asarray of a scalar returns an unsized object, so len will raise
# an exception
len(cov)
return cov
except:
return np.eye(n) * x
raise ValueError('covariance must be positive definit')
return x
def _do_plot_test():
from numpy.random import multivariate_normal
p = np.array([[32, 15],[15., 40.]])
x,y = multivariate_normal(mean=(0,0), cov=p, size=5000).T
sd = 2
a,w,h = covariance_ellipse(p,sd)
print (np.degrees(a), w, h)
count = 0
color=[]
for i in range(len(x)):
if _is_inside_ellipse(x[i], y[i], 0, 0, a, w, h):
color.append('b')
count += 1
else:
color.append('r')
plt.scatter(x,y,alpha=0.2, c=color)
plt.axis('equal')
plot_covariance_ellipse(mean=(0., 0.),
cov = p,
std=sd,
facecolor='none')
print (count / len(x))
def plot_std_vs_var():
plt.figure()
x = (0,0)
P = np.array([[3,1],[1,3]])
plot_covariance_ellipse(x, P, std=[1,2,3], facecolor='g', alpha=.2)
plot_covariance_ellipse(x, P, variance=[1,2,3], facecolor='r', alpha=.5)
def rand_student_t(df, mu=0, std=1):
"""return random number distributed by student's t distribution with
"""
return random number distributed by student's t distribution with
`df` degrees of freedom with the specified mean and standard deviation.
"""
x = random.gauss(0, std)
y = 2.0*random.gammavariate(0.5*df, 2.0)
return x / (math.sqrt(y/df)) + mu
y = 2.0*random.gammavariate(0.5 * df, 2.0)
return x / (math.sqrt(y / df)) + mu
def NESS(xs, est_xs, ps):
""" Computes the normalized estimated error squared test on a sequence
"""
Computes the normalized estimated error squared test on a sequence
of estimates. The estimates are optimal if the mean error is zero and

@@ -853,60 +1103,1 @@ the covariance matches the Kalman filter's covariance. If this holds,

return ness
if __name__ == '__main__':
"""ax = plot_gaussian_pdf(2, 3)
plot_gaussian_cdf(2, 3, ax=ax)
plt.show()
ys =np.abs(np.random.randn(100))
ys /= np.sum(ys)
plot_discrete_cdf(xs=None, ys=ys)"""
mean=(0,0)
cov=[[1,.5],[.5,1]]
print("For list and np.array covariances:")
for covariance in (cov,np.asarray(cov)):
a = [[multivariate_gaussian((i,j),mean,covariance)
for i in (-1,0,1)]
for j in (-1,0,1)]
print(np.asarray(a))
print()
#P1 = [[2, 1.9], [1.9, 2]]
#plot_covariance_ellipse((10, 10), P1, facecolor='y', alpha=0.6)
"""plot_std_vs_var()
plt.figure()
_do_plot_test()
#test_gaussian()
# test conversion of scalar to covariance matrix
x = multivariate_gaussian(np.array([1,1]), np.array([3,4]), np.eye(2)*1.4)
x2 = multivariate_gaussian(np.array([1,1]), np.array([3,4]), 1.4)
assert x == x2
# test univarate case
rv = norm(loc = 1., scale = np.sqrt(2.3))
x2 = multivariate_gaussian(1.2, 1., 2.3)
x3 = gaussian(1.2, 1., 2.3)
assert rv.pdf(1.2) == x2
assert abs(x2- x3) < 0.00000001
cov = np.array([[1.0, 1.0],
[1.0, 1.1]])
plt.figure()
P = np.array([[2,0],[0,2]])
plot_covariance_ellipse((2,7), cov=cov, variance=[1,2], facecolor='g',
title='my title', alpha=.2, ls='dashed')
plt.show()
print("all tests passed")
"""
Metadata-Version: 1.1
Name: filterpy
Version: 1.2.4
Version: 1.3
Summary: Kalman filtering and optimal estimation library

@@ -129,2 +129,6 @@ Home-page: https://github.com/rlabbe/filterpy

Full documentation is at
https://filterpy.readthedocs.io/en/latest/
First, import the filters and helper functions.

@@ -131,0 +135,0 @@

@@ -120,2 +120,6 @@ FilterPy - Kalman filters and other optimal and non-optimal estimation filters in Python.

Full documentation is at
https://filterpy.readthedocs.io/en/latest/
First, import the filters and helper functions.

@@ -122,0 +126,0 @@

# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from filterpy.kalman import ExtendedKalmanFilter
rf = ExtendedKalmanFilter(dim_x=3, dim_z=1)