001// Copyright (c) FIRST and other WPILib contributors. 002// Open Source Software; you can modify and/or share it under the terms of 003// the WPILib BSD license file in the root directory of this project. 004 005package edu.wpi.first.math.estimator; 006 007import edu.wpi.first.math.MathUtil; 008import edu.wpi.first.math.Matrix; 009import edu.wpi.first.math.Nat; 010import edu.wpi.first.math.VecBuilder; 011import edu.wpi.first.math.geometry.Pose2d; 012import edu.wpi.first.math.geometry.Rotation2d; 013import edu.wpi.first.math.geometry.Twist2d; 014import edu.wpi.first.math.interpolation.Interpolatable; 015import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer; 016import edu.wpi.first.math.kinematics.MecanumDriveKinematics; 017import edu.wpi.first.math.kinematics.MecanumDriveOdometry; 018import edu.wpi.first.math.kinematics.MecanumDriveWheelPositions; 019import edu.wpi.first.math.numbers.N1; 020import edu.wpi.first.math.numbers.N3; 021import edu.wpi.first.util.WPIUtilJNI; 022import java.util.Map; 023import java.util.Objects; 024 025/** 026 * This class wraps {@link MecanumDriveOdometry Mecanum Drive Odometry} to fuse latency-compensated 027 * vision measurements with mecanum drive encoder distance measurements. It will correct for noisy 028 * measurements and encoder drift. It is intended to be a drop-in replacement for {@link 029 * edu.wpi.first.math.kinematics.MecanumDriveOdometry}. 030 * 031 * <p>{@link MecanumDrivePoseEstimator#update} should be called every robot loop. 032 * 033 * <p>{@link MecanumDrivePoseEstimator#addVisionMeasurement} can be called as infrequently as you 034 * want; if you never call it, then this class will behave mostly like regular encoder odometry. 035 */ 036public class MecanumDrivePoseEstimator { 037 private final MecanumDriveKinematics m_kinematics; 038 private final MecanumDriveOdometry m_odometry; 039 private final Matrix<N3, N1> m_q = new Matrix<>(Nat.N3(), Nat.N1()); 040 private Matrix<N3, N3> m_visionK = new Matrix<>(Nat.N3(), Nat.N3()); 041 042 private final TimeInterpolatableBuffer<InterpolationRecord> m_poseBuffer = 043 TimeInterpolatableBuffer.createBuffer(1.5); 044 045 /** 046 * Constructs a MecanumDrivePoseEstimator with default standard deviations for the model and 047 * vision measurements. 048 * 049 * <p>The default standard deviations of the model states are 0.1 meters for x, 0.1 meters for y, 050 * and 0.1 radians for heading. The default standard deviations of the vision measurements are 051 * 0.45 meters for x, 0.45 meters for y, and 0.45 radians for heading. 052 * 053 * @param kinematics A correctly-configured kinematics object for your drivetrain. 054 * @param gyroAngle The current gyro angle. 055 * @param wheelPositions The distances driven by each wheel. 056 * @param initialPoseMeters The starting pose estimate. 057 */ 058 public MecanumDrivePoseEstimator( 059 MecanumDriveKinematics kinematics, 060 Rotation2d gyroAngle, 061 MecanumDriveWheelPositions wheelPositions, 062 Pose2d initialPoseMeters) { 063 this( 064 kinematics, 065 gyroAngle, 066 wheelPositions, 067 initialPoseMeters, 068 VecBuilder.fill(0.1, 0.1, 0.1), 069 VecBuilder.fill(0.45, 0.45, 0.45)); 070 } 071 072 /** 073 * Constructs a MecanumDrivePoseEstimator. 074 * 075 * @param kinematics A correctly-configured kinematics object for your drivetrain. 076 * @param gyroAngle The current gyro angle. 077 * @param wheelPositions The distance measured by each wheel. 078 * @param initialPoseMeters The starting pose estimate. 079 * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position 080 * in meters, and heading in radians). Increase these numbers to trust your state estimate 081 * less. 082 * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position 083 * in meters, y position in meters, and heading in radians). Increase these numbers to trust 084 * the vision pose measurement less. 085 */ 086 public MecanumDrivePoseEstimator( 087 MecanumDriveKinematics kinematics, 088 Rotation2d gyroAngle, 089 MecanumDriveWheelPositions wheelPositions, 090 Pose2d initialPoseMeters, 091 Matrix<N3, N1> stateStdDevs, 092 Matrix<N3, N1> visionMeasurementStdDevs) { 093 m_kinematics = kinematics; 094 m_odometry = new MecanumDriveOdometry(kinematics, gyroAngle, wheelPositions, initialPoseMeters); 095 096 for (int i = 0; i < 3; ++i) { 097 m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0)); 098 } 099 100 // Initialize vision R 101 setVisionMeasurementStdDevs(visionMeasurementStdDevs); 102 } 103 104 /** 105 * Sets the pose estimator's trust of global measurements. This might be used to change trust in 106 * vision measurements after the autonomous period, or to change trust as distance to a vision 107 * target increases. 108 * 109 * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these 110 * numbers to trust global measurements from vision less. This matrix is in the form [x, y, 111 * theta]ᵀ, with units in meters and radians. 112 */ 113 public void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) { 114 var r = new double[3]; 115 for (int i = 0; i < 3; ++i) { 116 r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0); 117 } 118 119 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0 120 // and C = I. See wpimath/algorithms.md. 121 for (int row = 0; row < 3; ++row) { 122 if (m_q.get(row, 0) == 0.0) { 123 m_visionK.set(row, row, 0.0); 124 } else { 125 m_visionK.set( 126 row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row]))); 127 } 128 } 129 } 130 131 /** 132 * Resets the robot's position on the field. 133 * 134 * <p>The gyroscope angle does not need to be reset in the user's robot code. The library 135 * automatically takes care of offsetting the gyro angle. 136 * 137 * @param gyroAngle The angle reported by the gyroscope. 138 * @param wheelPositions The distances driven by each wheel. 139 * @param poseMeters The position on the field that your robot is at. 140 */ 141 public void resetPosition( 142 Rotation2d gyroAngle, MecanumDriveWheelPositions wheelPositions, Pose2d poseMeters) { 143 // Reset state estimate and error covariance 144 m_odometry.resetPosition(gyroAngle, wheelPositions, poseMeters); 145 m_poseBuffer.clear(); 146 } 147 148 /** 149 * Gets the estimated robot pose. 150 * 151 * @return The estimated robot pose in meters. 152 */ 153 public Pose2d getEstimatedPosition() { 154 return m_odometry.getPoseMeters(); 155 } 156 157 /** 158 * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate 159 * while still accounting for measurement noise. 160 * 161 * <p>This method can be called as infrequently as you want, as long as you are calling {@link 162 * MecanumDrivePoseEstimator#update} every loop. 163 * 164 * <p>To promote stability of the pose estimate and make it robust to bad vision data, we 165 * recommend only adding vision measurements that are already within one meter or so of the 166 * current pose estimate. 167 * 168 * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera. 169 * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you 170 * don't use your own time source by calling {@link 171 * MecanumDrivePoseEstimator#updateWithTime(double,Rotation2d,MecanumDriveWheelPositions)} 172 * then you must use a timestamp with an epoch since FPGA startup (i.e., the epoch of this 173 * timestamp is the same epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) 174 * This means that you should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as 175 * your time source or sync the epochs. 176 */ 177 public void addVisionMeasurement(Pose2d visionRobotPoseMeters, double timestampSeconds) { 178 // Step 1: Get the pose odometry measured at the moment the vision measurement was made. 179 var sample = m_poseBuffer.getSample(timestampSeconds); 180 181 if (sample.isEmpty()) { 182 return; 183 } 184 185 // Step 2: Measure the twist between the odometry pose and the vision pose. 186 var twist = sample.get().poseMeters.log(visionRobotPoseMeters); 187 188 // Step 3: We should not trust the twist entirely, so instead we scale this twist by a Kalman 189 // gain matrix representing how much we trust vision measurements compared to our current pose. 190 var k_times_twist = m_visionK.times(VecBuilder.fill(twist.dx, twist.dy, twist.dtheta)); 191 192 // Step 4: Convert back to Twist2d. 193 var scaledTwist = 194 new Twist2d(k_times_twist.get(0, 0), k_times_twist.get(1, 0), k_times_twist.get(2, 0)); 195 196 // Step 5: Reset Odometry to state at sample with vision adjustment. 197 m_odometry.resetPosition( 198 sample.get().gyroAngle, 199 sample.get().wheelPositions, 200 sample.get().poseMeters.exp(scaledTwist)); 201 202 // Step 6: Replay odometry inputs between sample time and latest recorded sample to update the 203 // pose buffer and correct odometry. 204 for (Map.Entry<Double, InterpolationRecord> entry : 205 m_poseBuffer.getInternalBuffer().tailMap(timestampSeconds).entrySet()) { 206 updateWithTime(entry.getKey(), entry.getValue().gyroAngle, entry.getValue().wheelPositions); 207 } 208 } 209 210 /** 211 * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate 212 * while still accounting for measurement noise. 213 * 214 * <p>This method can be called as infrequently as you want, as long as you are calling {@link 215 * MecanumDrivePoseEstimator#update} every loop. 216 * 217 * <p>To promote stability of the pose estimate and make it robust to bad vision data, we 218 * recommend only adding vision measurements that are already within one meter or so of the 219 * current pose estimate. 220 * 221 * <p>Note that the vision measurement standard deviations passed into this method will continue 222 * to apply to future measurements until a subsequent call to {@link 223 * MecanumDrivePoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method. 224 * 225 * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera. 226 * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you 227 * don't use your own time source by calling {@link 228 * MecanumDrivePoseEstimator#updateWithTime(double,Rotation2d,MecanumDriveWheelPositions)}, 229 * then you must use a timestamp with an epoch since FPGA startup (i.e., the epoch of this 230 * timestamp is the same epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}. This 231 * means that you should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your 232 * time source in this case. 233 * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position 234 * in meters, y position in meters, and heading in radians). Increase these numbers to trust 235 * the vision pose measurement less. 236 */ 237 public void addVisionMeasurement( 238 Pose2d visionRobotPoseMeters, 239 double timestampSeconds, 240 Matrix<N3, N1> visionMeasurementStdDevs) { 241 setVisionMeasurementStdDevs(visionMeasurementStdDevs); 242 addVisionMeasurement(visionRobotPoseMeters, timestampSeconds); 243 } 244 245 /** 246 * Updates the pose estimator with wheel encoder and gyro information. This should be called every 247 * loop. 248 * 249 * @param gyroAngle The current gyro angle. 250 * @param wheelPositions The distances driven by each wheel. 251 * @return The estimated pose of the robot in meters. 252 */ 253 public Pose2d update(Rotation2d gyroAngle, MecanumDriveWheelPositions wheelPositions) { 254 return updateWithTime(WPIUtilJNI.now() * 1.0e-6, gyroAngle, wheelPositions); 255 } 256 257 /** 258 * Updates the pose estimator with wheel encoder and gyro information. This should be called every 259 * loop. 260 * 261 * @param currentTimeSeconds Time at which this method was called, in seconds. 262 * @param gyroAngle The current gyroscope angle. 263 * @param wheelPositions The distances driven by each wheel. 264 * @return The estimated pose of the robot in meters. 265 */ 266 public Pose2d updateWithTime( 267 double currentTimeSeconds, Rotation2d gyroAngle, MecanumDriveWheelPositions wheelPositions) { 268 m_odometry.update(gyroAngle, wheelPositions); 269 270 m_poseBuffer.addSample( 271 currentTimeSeconds, 272 new InterpolationRecord( 273 getEstimatedPosition(), 274 gyroAngle, 275 new MecanumDriveWheelPositions( 276 wheelPositions.frontLeftMeters, 277 wheelPositions.frontRightMeters, 278 wheelPositions.rearLeftMeters, 279 wheelPositions.rearRightMeters))); 280 281 return getEstimatedPosition(); 282 } 283 284 /** 285 * Represents an odometry record. The record contains the inputs provided as well as the pose that 286 * was observed based on these inputs, as well as the previous record and its inputs. 287 */ 288 private class InterpolationRecord implements Interpolatable<InterpolationRecord> { 289 // The pose observed given the current sensor inputs and the previous pose. 290 private final Pose2d poseMeters; 291 292 // The current gyro angle. 293 private final Rotation2d gyroAngle; 294 295 // The distances traveled by each wheel encoder. 296 private final MecanumDriveWheelPositions wheelPositions; 297 298 /** 299 * Constructs an Interpolation Record with the specified parameters. 300 * 301 * @param pose The pose observed given the current sensor inputs and the previous pose. 302 * @param gyro The current gyro angle. 303 * @param wheelPositions The distances traveled by each wheel encoder. 304 */ 305 private InterpolationRecord( 306 Pose2d poseMeters, Rotation2d gyro, MecanumDriveWheelPositions wheelPositions) { 307 this.poseMeters = poseMeters; 308 this.gyroAngle = gyro; 309 this.wheelPositions = wheelPositions; 310 } 311 312 /** 313 * Return the interpolated record. This object is assumed to be the starting position, or lower 314 * bound. 315 * 316 * @param endValue The upper bound, or end. 317 * @param t How far between the lower and upper bound we are. This should be bounded in [0, 1]. 318 * @return The interpolated value. 319 */ 320 @Override 321 public InterpolationRecord interpolate(InterpolationRecord endValue, double t) { 322 if (t < 0) { 323 return this; 324 } else if (t >= 1) { 325 return endValue; 326 } else { 327 // Find the new wheel distances. 328 var wheels_lerp = 329 new MecanumDriveWheelPositions( 330 MathUtil.interpolate( 331 this.wheelPositions.frontLeftMeters, 332 endValue.wheelPositions.frontLeftMeters, 333 t), 334 MathUtil.interpolate( 335 this.wheelPositions.frontRightMeters, 336 endValue.wheelPositions.frontRightMeters, 337 t), 338 MathUtil.interpolate( 339 this.wheelPositions.rearLeftMeters, endValue.wheelPositions.rearLeftMeters, t), 340 MathUtil.interpolate( 341 this.wheelPositions.rearRightMeters, 342 endValue.wheelPositions.rearRightMeters, 343 t)); 344 345 // Find the distance travelled between this measurement and the interpolated measurement. 346 var wheels_delta = 347 new MecanumDriveWheelPositions( 348 wheels_lerp.frontLeftMeters - this.wheelPositions.frontLeftMeters, 349 wheels_lerp.frontRightMeters - this.wheelPositions.frontRightMeters, 350 wheels_lerp.rearLeftMeters - this.wheelPositions.rearLeftMeters, 351 wheels_lerp.rearRightMeters - this.wheelPositions.rearRightMeters); 352 353 // Find the new gyro angle. 354 var gyro_lerp = gyroAngle.interpolate(endValue.gyroAngle, t); 355 356 // Create a twist to represent this change based on the interpolated sensor inputs. 357 Twist2d twist = m_kinematics.toTwist2d(wheels_delta); 358 twist.dtheta = gyro_lerp.minus(gyroAngle).getRadians(); 359 360 return new InterpolationRecord(poseMeters.exp(twist), gyro_lerp, wheels_lerp); 361 } 362 } 363 364 @Override 365 public boolean equals(Object obj) { 366 if (this == obj) { 367 return true; 368 } 369 if (!(obj instanceof InterpolationRecord)) { 370 return false; 371 } 372 InterpolationRecord record = (InterpolationRecord) obj; 373 return Objects.equals(gyroAngle, record.gyroAngle) 374 && Objects.equals(wheelPositions, record.wheelPositions) 375 && Objects.equals(poseMeters, record.poseMeters); 376 } 377 378 @Override 379 public int hashCode() { 380 return Objects.hash(gyroAngle, wheelPositions, poseMeters); 381 } 382 } 383}