001// Copyright (c) FIRST and other WPILib contributors. 002// Open Source Software; you can modify and/or share it under the terms of 003// the WPILib BSD license file in the root directory of this project. 004 005package edu.wpi.first.math.estimator; 006 007import edu.wpi.first.math.MathSharedStore; 008import edu.wpi.first.math.MathUtil; 009import edu.wpi.first.math.Matrix; 010import edu.wpi.first.math.Nat; 011import edu.wpi.first.math.VecBuilder; 012import edu.wpi.first.math.geometry.Pose2d; 013import edu.wpi.first.math.geometry.Rotation2d; 014import edu.wpi.first.math.geometry.Twist2d; 015import edu.wpi.first.math.interpolation.Interpolatable; 016import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer; 017import edu.wpi.first.math.kinematics.MecanumDriveKinematics; 018import edu.wpi.first.math.kinematics.MecanumDriveOdometry; 019import edu.wpi.first.math.kinematics.MecanumDriveWheelPositions; 020import edu.wpi.first.math.numbers.N1; 021import edu.wpi.first.math.numbers.N3; 022import java.util.Map; 023import java.util.NoSuchElementException; 024import java.util.Objects; 025 026/** 027 * This class wraps {@link MecanumDriveOdometry Mecanum Drive Odometry} to fuse latency-compensated 028 * vision measurements with mecanum drive encoder distance measurements. It will correct for noisy 029 * measurements and encoder drift. It is intended to be a drop-in replacement for {@link 030 * edu.wpi.first.math.kinematics.MecanumDriveOdometry}. 031 * 032 * <p>{@link MecanumDrivePoseEstimator#update} should be called every robot loop. 033 * 034 * <p>{@link MecanumDrivePoseEstimator#addVisionMeasurement} can be called as infrequently as you 035 * want; if you never call it, then this class will behave mostly like regular encoder odometry. 036 */ 037public class MecanumDrivePoseEstimator { 038 private final MecanumDriveKinematics m_kinematics; 039 private final MecanumDriveOdometry m_odometry; 040 private final Matrix<N3, N1> m_q = new Matrix<>(Nat.N3(), Nat.N1()); 041 private Matrix<N3, N3> m_visionK = new Matrix<>(Nat.N3(), Nat.N3()); 042 043 private static final double kBufferDuration = 1.5; 044 045 private final TimeInterpolatableBuffer<InterpolationRecord> m_poseBuffer = 046 TimeInterpolatableBuffer.createBuffer(kBufferDuration); 047 048 /** 049 * Constructs a MecanumDrivePoseEstimator with default standard deviations for the model and 050 * vision measurements. 051 * 052 * <p>The default standard deviations of the model states are 0.1 meters for x, 0.1 meters for y, 053 * and 0.1 radians for heading. The default standard deviations of the vision measurements are 054 * 0.45 meters for x, 0.45 meters for y, and 0.45 radians for heading. 055 * 056 * @param kinematics A correctly-configured kinematics object for your drivetrain. 057 * @param gyroAngle The current gyro angle. 058 * @param wheelPositions The distances driven by each wheel. 059 * @param initialPoseMeters The starting pose estimate. 060 */ 061 public MecanumDrivePoseEstimator( 062 MecanumDriveKinematics kinematics, 063 Rotation2d gyroAngle, 064 MecanumDriveWheelPositions wheelPositions, 065 Pose2d initialPoseMeters) { 066 this( 067 kinematics, 068 gyroAngle, 069 wheelPositions, 070 initialPoseMeters, 071 VecBuilder.fill(0.1, 0.1, 0.1), 072 VecBuilder.fill(0.45, 0.45, 0.45)); 073 } 074 075 /** 076 * Constructs a MecanumDrivePoseEstimator. 077 * 078 * @param kinematics A correctly-configured kinematics object for your drivetrain. 079 * @param gyroAngle The current gyro angle. 080 * @param wheelPositions The distance measured by each wheel. 081 * @param initialPoseMeters The starting pose estimate. 082 * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position 083 * in meters, and heading in radians). Increase these numbers to trust your state estimate 084 * less. 085 * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position 086 * in meters, y position in meters, and heading in radians). Increase these numbers to trust 087 * the vision pose measurement less. 088 */ 089 public MecanumDrivePoseEstimator( 090 MecanumDriveKinematics kinematics, 091 Rotation2d gyroAngle, 092 MecanumDriveWheelPositions wheelPositions, 093 Pose2d initialPoseMeters, 094 Matrix<N3, N1> stateStdDevs, 095 Matrix<N3, N1> visionMeasurementStdDevs) { 096 m_kinematics = kinematics; 097 m_odometry = new MecanumDriveOdometry(kinematics, gyroAngle, wheelPositions, initialPoseMeters); 098 099 for (int i = 0; i < 3; ++i) { 100 m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0)); 101 } 102 103 // Initialize vision R 104 setVisionMeasurementStdDevs(visionMeasurementStdDevs); 105 } 106 107 /** 108 * Sets the pose estimator's trust of global measurements. This might be used to change trust in 109 * vision measurements after the autonomous period, or to change trust as distance to a vision 110 * target increases. 111 * 112 * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these 113 * numbers to trust global measurements from vision less. This matrix is in the form [x, y, 114 * theta]ᵀ, with units in meters and radians. 115 */ 116 public void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) { 117 var r = new double[3]; 118 for (int i = 0; i < 3; ++i) { 119 r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0); 120 } 121 122 // Solve for closed form Kalman gain for continuous Kalman filter with A = 0 123 // and C = I. See wpimath/algorithms.md. 124 for (int row = 0; row < 3; ++row) { 125 if (m_q.get(row, 0) == 0.0) { 126 m_visionK.set(row, row, 0.0); 127 } else { 128 m_visionK.set( 129 row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row]))); 130 } 131 } 132 } 133 134 /** 135 * Resets the robot's position on the field. 136 * 137 * <p>The gyroscope angle does not need to be reset in the user's robot code. The library 138 * automatically takes care of offsetting the gyro angle. 139 * 140 * @param gyroAngle The angle reported by the gyroscope. 141 * @param wheelPositions The distances driven by each wheel. 142 * @param poseMeters The position on the field that your robot is at. 143 */ 144 public void resetPosition( 145 Rotation2d gyroAngle, MecanumDriveWheelPositions wheelPositions, Pose2d poseMeters) { 146 // Reset state estimate and error covariance 147 m_odometry.resetPosition(gyroAngle, wheelPositions, poseMeters); 148 m_poseBuffer.clear(); 149 } 150 151 /** 152 * Gets the estimated robot pose. 153 * 154 * @return The estimated robot pose in meters. 155 */ 156 public Pose2d getEstimatedPosition() { 157 return m_odometry.getPoseMeters(); 158 } 159 160 /** 161 * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate 162 * while still accounting for measurement noise. 163 * 164 * <p>This method can be called as infrequently as you want, as long as you are calling {@link 165 * MecanumDrivePoseEstimator#update} every loop. 166 * 167 * <p>To promote stability of the pose estimate and make it robust to bad vision data, we 168 * recommend only adding vision measurements that are already within one meter or so of the 169 * current pose estimate. 170 * 171 * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera. 172 * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you 173 * don't use your own time source by calling {@link 174 * MecanumDrivePoseEstimator#updateWithTime(double,Rotation2d,MecanumDriveWheelPositions)} 175 * then you must use a timestamp with an epoch since FPGA startup (i.e., the epoch of this 176 * timestamp is the same epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) 177 * This means that you should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as 178 * your time source or sync the epochs. 179 */ 180 public void addVisionMeasurement(Pose2d visionRobotPoseMeters, double timestampSeconds) { 181 // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip. 182 try { 183 if (m_poseBuffer.getInternalBuffer().lastKey() - kBufferDuration > timestampSeconds) { 184 return; 185 } 186 } catch (NoSuchElementException ex) { 187 return; 188 } 189 190 // Step 1: Get the pose odometry measured at the moment the vision measurement was made. 191 var sample = m_poseBuffer.getSample(timestampSeconds); 192 193 if (sample.isEmpty()) { 194 return; 195 } 196 197 // Step 2: Measure the twist between the odometry pose and the vision pose. 198 var twist = sample.get().poseMeters.log(visionRobotPoseMeters); 199 200 // Step 3: We should not trust the twist entirely, so instead we scale this twist by a Kalman 201 // gain matrix representing how much we trust vision measurements compared to our current pose. 202 var k_times_twist = m_visionK.times(VecBuilder.fill(twist.dx, twist.dy, twist.dtheta)); 203 204 // Step 4: Convert back to Twist2d. 205 var scaledTwist = 206 new Twist2d(k_times_twist.get(0, 0), k_times_twist.get(1, 0), k_times_twist.get(2, 0)); 207 208 // Step 5: Reset Odometry to state at sample with vision adjustment. 209 m_odometry.resetPosition( 210 sample.get().gyroAngle, 211 sample.get().wheelPositions, 212 sample.get().poseMeters.exp(scaledTwist)); 213 214 // Step 6: Record the current pose to allow multiple measurements from the same timestamp 215 m_poseBuffer.addSample( 216 timestampSeconds, 217 new InterpolationRecord( 218 getEstimatedPosition(), sample.get().gyroAngle, sample.get().wheelPositions)); 219 220 // Step 7: Replay odometry inputs between sample time and latest recorded sample to update the 221 // pose buffer and correct odometry. 222 for (Map.Entry<Double, InterpolationRecord> entry : 223 m_poseBuffer.getInternalBuffer().tailMap(timestampSeconds).entrySet()) { 224 updateWithTime(entry.getKey(), entry.getValue().gyroAngle, entry.getValue().wheelPositions); 225 } 226 } 227 228 /** 229 * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate 230 * while still accounting for measurement noise. 231 * 232 * <p>This method can be called as infrequently as you want, as long as you are calling {@link 233 * MecanumDrivePoseEstimator#update} every loop. 234 * 235 * <p>To promote stability of the pose estimate and make it robust to bad vision data, we 236 * recommend only adding vision measurements that are already within one meter or so of the 237 * current pose estimate. 238 * 239 * <p>Note that the vision measurement standard deviations passed into this method will continue 240 * to apply to future measurements until a subsequent call to {@link 241 * MecanumDrivePoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method. 242 * 243 * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera. 244 * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you 245 * don't use your own time source by calling {@link 246 * MecanumDrivePoseEstimator#updateWithTime(double,Rotation2d,MecanumDriveWheelPositions)}, 247 * then you must use a timestamp with an epoch since FPGA startup (i.e., the epoch of this 248 * timestamp is the same epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). 249 * This means that you should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as 250 * your time source in this case. 251 * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position 252 * in meters, y position in meters, and heading in radians). Increase these numbers to trust 253 * the vision pose measurement less. 254 */ 255 public void addVisionMeasurement( 256 Pose2d visionRobotPoseMeters, 257 double timestampSeconds, 258 Matrix<N3, N1> visionMeasurementStdDevs) { 259 setVisionMeasurementStdDevs(visionMeasurementStdDevs); 260 addVisionMeasurement(visionRobotPoseMeters, timestampSeconds); 261 } 262 263 /** 264 * Updates the pose estimator with wheel encoder and gyro information. This should be called every 265 * loop. 266 * 267 * @param gyroAngle The current gyro angle. 268 * @param wheelPositions The distances driven by each wheel. 269 * @return The estimated pose of the robot in meters. 270 */ 271 public Pose2d update(Rotation2d gyroAngle, MecanumDriveWheelPositions wheelPositions) { 272 return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions); 273 } 274 275 /** 276 * Updates the pose estimator with wheel encoder and gyro information. This should be called every 277 * loop. 278 * 279 * @param currentTimeSeconds Time at which this method was called, in seconds. 280 * @param gyroAngle The current gyroscope angle. 281 * @param wheelPositions The distances driven by each wheel. 282 * @return The estimated pose of the robot in meters. 283 */ 284 public Pose2d updateWithTime( 285 double currentTimeSeconds, Rotation2d gyroAngle, MecanumDriveWheelPositions wheelPositions) { 286 m_odometry.update(gyroAngle, wheelPositions); 287 288 m_poseBuffer.addSample( 289 currentTimeSeconds, 290 new InterpolationRecord( 291 getEstimatedPosition(), 292 gyroAngle, 293 new MecanumDriveWheelPositions( 294 wheelPositions.frontLeftMeters, 295 wheelPositions.frontRightMeters, 296 wheelPositions.rearLeftMeters, 297 wheelPositions.rearRightMeters))); 298 299 return getEstimatedPosition(); 300 } 301 302 /** 303 * Represents an odometry record. The record contains the inputs provided as well as the pose that 304 * was observed based on these inputs, as well as the previous record and its inputs. 305 */ 306 private class InterpolationRecord implements Interpolatable<InterpolationRecord> { 307 // The pose observed given the current sensor inputs and the previous pose. 308 private final Pose2d poseMeters; 309 310 // The current gyro angle. 311 private final Rotation2d gyroAngle; 312 313 // The distances traveled by each wheel encoder. 314 private final MecanumDriveWheelPositions wheelPositions; 315 316 /** 317 * Constructs an Interpolation Record with the specified parameters. 318 * 319 * @param pose The pose observed given the current sensor inputs and the previous pose. 320 * @param gyro The current gyro angle. 321 * @param wheelPositions The distances traveled by each wheel encoder. 322 */ 323 private InterpolationRecord( 324 Pose2d poseMeters, Rotation2d gyro, MecanumDriveWheelPositions wheelPositions) { 325 this.poseMeters = poseMeters; 326 this.gyroAngle = gyro; 327 this.wheelPositions = wheelPositions; 328 } 329 330 /** 331 * Return the interpolated record. This object is assumed to be the starting position, or lower 332 * bound. 333 * 334 * @param endValue The upper bound, or end. 335 * @param t How far between the lower and upper bound we are. This should be bounded in [0, 1]. 336 * @return The interpolated value. 337 */ 338 @Override 339 public InterpolationRecord interpolate(InterpolationRecord endValue, double t) { 340 if (t < 0) { 341 return this; 342 } else if (t >= 1) { 343 return endValue; 344 } else { 345 // Find the new wheel distances. 346 var wheels_lerp = 347 new MecanumDriveWheelPositions( 348 MathUtil.interpolate( 349 this.wheelPositions.frontLeftMeters, 350 endValue.wheelPositions.frontLeftMeters, 351 t), 352 MathUtil.interpolate( 353 this.wheelPositions.frontRightMeters, 354 endValue.wheelPositions.frontRightMeters, 355 t), 356 MathUtil.interpolate( 357 this.wheelPositions.rearLeftMeters, endValue.wheelPositions.rearLeftMeters, t), 358 MathUtil.interpolate( 359 this.wheelPositions.rearRightMeters, 360 endValue.wheelPositions.rearRightMeters, 361 t)); 362 363 // Find the distance travelled between this measurement and the interpolated measurement. 364 var wheels_delta = 365 new MecanumDriveWheelPositions( 366 wheels_lerp.frontLeftMeters - this.wheelPositions.frontLeftMeters, 367 wheels_lerp.frontRightMeters - this.wheelPositions.frontRightMeters, 368 wheels_lerp.rearLeftMeters - this.wheelPositions.rearLeftMeters, 369 wheels_lerp.rearRightMeters - this.wheelPositions.rearRightMeters); 370 371 // Find the new gyro angle. 372 var gyro_lerp = gyroAngle.interpolate(endValue.gyroAngle, t); 373 374 // Create a twist to represent this change based on the interpolated sensor inputs. 375 Twist2d twist = m_kinematics.toTwist2d(wheels_delta); 376 twist.dtheta = gyro_lerp.minus(gyroAngle).getRadians(); 377 378 return new InterpolationRecord(poseMeters.exp(twist), gyro_lerp, wheels_lerp); 379 } 380 } 381 382 @Override 383 public boolean equals(Object obj) { 384 if (this == obj) { 385 return true; 386 } 387 if (!(obj instanceof InterpolationRecord)) { 388 return false; 389 } 390 InterpolationRecord record = (InterpolationRecord) obj; 391 return Objects.equals(gyroAngle, record.gyroAngle) 392 && Objects.equals(wheelPositions, record.wheelPositions) 393 && Objects.equals(poseMeters, record.poseMeters); 394 } 395 396 @Override 397 public int hashCode() { 398 return Objects.hash(gyroAngle, wheelPositions, poseMeters); 399 } 400 } 401}