[docs]defpredict(motion_mat:Tensor,cov_motion_q:Tensor,mean:Tensor,covariance:Tensor,)->tuple[Tensor,Tensor]:"""Run Kalman filter prediction step."""# x = Fxmean=torch.matmul(motion_mat,mean)# P = (FP)F + Qcovariance=(torch.matmul(motion_mat,torch.matmul(covariance,motion_mat.T))+cov_motion_q)returnmean,covariance
[docs]defproject(update_mat:Tensor,cov_project_r:Tensor,mean:Tensor,covariance:Tensor)->tuple[Tensor,Tensor]:"""Project state distribution to measurement space."""# Hxmean=torch.matmul(update_mat,mean)# HPH^T + Rcovariance=torch.matmul(update_mat,torch.matmul(covariance,update_mat.T))projected_cov=covariance+cov_project_rreturnmean,projected_cov
[docs]defupdate(update_mat:Tensor,cov_project_r:Tensor,mean:Tensor,covariance:Tensor,measurement:Tensor,)->tuple[Tensor,Tensor]:"""Run Kalman filter correction step."""# Hx, S = HPH^T + Rprojected_mean,projected_cov=project(update_mat,cov_project_r,mean,covariance)# K = PHT * S^-1chol_factor=torch.linalg.cholesky(# pylint: disable=not-callableprojected_cov)kalman_gain=torch.cholesky_solve(torch.matmul(covariance,update_mat.T).T,chol_factor,upper=False,).T# y = z - Hxinnovation=measurement-projected_mean# x = x + Kynew_mean=mean+torch.matmul(innovation,kalman_gain.T)# P = (I-KH)P(I-KH)' + KRK'# This is more numerically stable# and works for non-optimal K vs the equation# P = (I-KH)P usually seen in the literature.i_kh=torch.eye(mean.shape[-1]).to(device=measurement.device)-torch.matmul(kalman_gain,update_mat)new_covariance=torch.matmul(torch.matmul(i_kh,covariance),i_kh.T)+torch.matmul(torch.matmul(kalman_gain,cov_project_r),kalman_gain.T)returnnew_mean,new_covariance