001// Copyright (c) FIRST and other WPILib contributors.
002// Open Source Software; you can modify and/or share it under the terms of
003// the WPILib BSD license file in the root directory of this project.
004
005package edu.wpi.first.math.estimator;
006
007import edu.wpi.first.math.MathSharedStore;
008import edu.wpi.first.math.Matrix;
009import edu.wpi.first.math.Nat;
010import edu.wpi.first.math.VecBuilder;
011import edu.wpi.first.math.geometry.Pose2d;
012import edu.wpi.first.math.geometry.Rotation2d;
013import edu.wpi.first.math.geometry.Twist2d;
014import edu.wpi.first.math.interpolation.Interpolatable;
015import edu.wpi.first.math.interpolation.TimeInterpolatableBuffer;
016import edu.wpi.first.math.kinematics.Kinematics;
017import edu.wpi.first.math.kinematics.Odometry;
018import edu.wpi.first.math.kinematics.WheelPositions;
019import edu.wpi.first.math.numbers.N1;
020import edu.wpi.first.math.numbers.N3;
021import java.util.Map;
022import java.util.NoSuchElementException;
023import java.util.Objects;
024
025/**
026 * This class wraps {@link Odometry} to fuse latency-compensated vision measurements with encoder
027 * measurements. Robot code should not use this directly- Instead, use the particular type for your
028 * drivetrain (e.g., {@link DifferentialDrivePoseEstimator}). It is intended to be a drop-in
029 * replacement for {@link Odometry}; in fact, if you never call {@link
030 * PoseEstimator#addVisionMeasurement} and only call {@link PoseEstimator#update} then this will
031 * behave exactly the same as Odometry.
032 *
033 * <p>{@link PoseEstimator#update} should be called every robot loop.
034 *
035 * <p>{@link PoseEstimator#addVisionMeasurement} can be called as infrequently as you want; if you
036 * never call it then this class will behave exactly like regular encoder odometry.
037 */
038public class PoseEstimator<T extends WheelPositions<T>> {
039  private final Kinematics<?, T> m_kinematics;
040  private final Odometry<T> m_odometry;
041  private final Matrix<N3, N1> m_q = new Matrix<>(Nat.N3(), Nat.N1());
042  private final Matrix<N3, N3> m_visionK = new Matrix<>(Nat.N3(), Nat.N3());
043
044  private static final double kBufferDuration = 1.5;
045  private final TimeInterpolatableBuffer<InterpolationRecord> m_poseBuffer =
046      TimeInterpolatableBuffer.createBuffer(kBufferDuration);
047
048  /**
049   * Constructs a PoseEstimator.
050   *
051   * @param kinematics A correctly-configured kinematics object for your drivetrain.
052   * @param odometry A correctly-configured odometry object for your drivetrain.
053   * @param stateStdDevs Standard deviations of the pose estimate (x position in meters, y position
054   *     in meters, and heading in radians). Increase these numbers to trust your state estimate
055   *     less.
056   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
057   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
058   *     the vision pose measurement less.
059   */
060  public PoseEstimator(
061      Kinematics<?, T> kinematics,
062      Odometry<T> odometry,
063      Matrix<N3, N1> stateStdDevs,
064      Matrix<N3, N1> visionMeasurementStdDevs) {
065    m_kinematics = kinematics;
066    m_odometry = odometry;
067
068    for (int i = 0; i < 3; ++i) {
069      m_q.set(i, 0, stateStdDevs.get(i, 0) * stateStdDevs.get(i, 0));
070    }
071    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
072  }
073
074  /**
075   * Sets the pose estimator's trust of global measurements. This might be used to change trust in
076   * vision measurements after the autonomous period, or to change trust as distance to a vision
077   * target increases.
078   *
079   * @param visionMeasurementStdDevs Standard deviations of the vision measurements. Increase these
080   *     numbers to trust global measurements from vision less. This matrix is in the form [x, y,
081   *     theta]ᵀ, with units in meters and radians.
082   */
083  public void setVisionMeasurementStdDevs(Matrix<N3, N1> visionMeasurementStdDevs) {
084    var r = new double[3];
085    for (int i = 0; i < 3; ++i) {
086      r[i] = visionMeasurementStdDevs.get(i, 0) * visionMeasurementStdDevs.get(i, 0);
087    }
088
089    // Solve for closed form Kalman gain for continuous Kalman filter with A = 0
090    // and C = I. See wpimath/algorithms.md.
091    for (int row = 0; row < 3; ++row) {
092      if (m_q.get(row, 0) == 0.0) {
093        m_visionK.set(row, row, 0.0);
094      } else {
095        m_visionK.set(
096            row, row, m_q.get(row, 0) / (m_q.get(row, 0) + Math.sqrt(m_q.get(row, 0) * r[row])));
097      }
098    }
099  }
100
101  /**
102   * Resets the robot's position on the field.
103   *
104   * <p>The gyroscope angle does not need to be reset here on the user's robot code. The library
105   * automatically takes care of offsetting the gyro angle.
106   *
107   * @param gyroAngle The angle reported by the gyroscope.
108   * @param wheelPositions The current encoder readings.
109   * @param poseMeters The position on the field that your robot is at.
110   */
111  public void resetPosition(Rotation2d gyroAngle, T wheelPositions, Pose2d poseMeters) {
112    // Reset state estimate and error covariance
113    m_odometry.resetPosition(gyroAngle, wheelPositions, poseMeters);
114    m_poseBuffer.clear();
115  }
116
117  /**
118   * Gets the estimated robot pose.
119   *
120   * @return The estimated robot pose in meters.
121   */
122  public Pose2d getEstimatedPosition() {
123    return m_odometry.getPoseMeters();
124  }
125
126  /**
127   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
128   * while still accounting for measurement noise.
129   *
130   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
131   * PoseEstimator#update} every loop.
132   *
133   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
134   * recommend only adding vision measurements that are already within one meter or so of the
135   * current pose estimate.
136   *
137   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
138   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
139   *     don't use your own time source by calling {@link
140   *     PoseEstimator#updateWithTime(double,Rotation2d,WheelPositions)} then you must use a
141   *     timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same
142   *     epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}.) This means that you
143   *     should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source or
144   *     sync the epochs.
145   */
146  public void addVisionMeasurement(Pose2d visionRobotPoseMeters, double timestampSeconds) {
147    // Step 0: If this measurement is old enough to be outside the pose buffer's timespan, skip.
148    try {
149      if (m_poseBuffer.getInternalBuffer().lastKey() - kBufferDuration > timestampSeconds) {
150        return;
151      }
152    } catch (NoSuchElementException ex) {
153      return;
154    }
155
156    // Step 1: Get the pose odometry measured at the moment the vision measurement was made.
157    var sample = m_poseBuffer.getSample(timestampSeconds);
158
159    if (sample.isEmpty()) {
160      return;
161    }
162
163    // Step 2: Measure the twist between the odometry pose and the vision pose.
164    var twist = sample.get().poseMeters.log(visionRobotPoseMeters);
165
166    // Step 3: We should not trust the twist entirely, so instead we scale this twist by a Kalman
167    // gain matrix representing how much we trust vision measurements compared to our current pose.
168    var k_times_twist = m_visionK.times(VecBuilder.fill(twist.dx, twist.dy, twist.dtheta));
169
170    // Step 4: Convert back to Twist2d.
171    var scaledTwist =
172        new Twist2d(k_times_twist.get(0, 0), k_times_twist.get(1, 0), k_times_twist.get(2, 0));
173
174    // Step 5: Reset Odometry to state at sample with vision adjustment.
175    m_odometry.resetPosition(
176        sample.get().gyroAngle,
177        sample.get().wheelPositions,
178        sample.get().poseMeters.exp(scaledTwist));
179
180    // Step 6: Record the current pose to allow multiple measurements from the same timestamp
181    m_poseBuffer.addSample(
182        timestampSeconds,
183        new InterpolationRecord(
184            getEstimatedPosition(), sample.get().gyroAngle, sample.get().wheelPositions));
185
186    // Step 7: Replay odometry inputs between sample time and latest recorded sample to update the
187    // pose buffer and correct odometry.
188    for (Map.Entry<Double, InterpolationRecord> entry :
189        m_poseBuffer.getInternalBuffer().tailMap(timestampSeconds).entrySet()) {
190      updateWithTime(entry.getKey(), entry.getValue().gyroAngle, entry.getValue().wheelPositions);
191    }
192  }
193
194  /**
195   * Adds a vision measurement to the Kalman Filter. This will correct the odometry pose estimate
196   * while still accounting for measurement noise.
197   *
198   * <p>This method can be called as infrequently as you want, as long as you are calling {@link
199   * PoseEstimator#update} every loop.
200   *
201   * <p>To promote stability of the pose estimate and make it robust to bad vision data, we
202   * recommend only adding vision measurements that are already within one meter or so of the
203   * current pose estimate.
204   *
205   * <p>Note that the vision measurement standard deviations passed into this method will continue
206   * to apply to future measurements until a subsequent call to {@link
207   * PoseEstimator#setVisionMeasurementStdDevs(Matrix)} or this method.
208   *
209   * @param visionRobotPoseMeters The pose of the robot as measured by the vision camera.
210   * @param timestampSeconds The timestamp of the vision measurement in seconds. Note that if you
211   *     don't use your own time source by calling {@link #updateWithTime}, then you must use a
212   *     timestamp with an epoch since FPGA startup (i.e., the epoch of this timestamp is the same
213   *     epoch as {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()}). This means that you
214   *     should use {@link edu.wpi.first.wpilibj.Timer#getFPGATimestamp()} as your time source in
215   *     this case.
216   * @param visionMeasurementStdDevs Standard deviations of the vision pose measurement (x position
217   *     in meters, y position in meters, and heading in radians). Increase these numbers to trust
218   *     the vision pose measurement less.
219   */
220  public void addVisionMeasurement(
221      Pose2d visionRobotPoseMeters,
222      double timestampSeconds,
223      Matrix<N3, N1> visionMeasurementStdDevs) {
224    setVisionMeasurementStdDevs(visionMeasurementStdDevs);
225    addVisionMeasurement(visionRobotPoseMeters, timestampSeconds);
226  }
227
228  /**
229   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
230   * loop.
231   *
232   * @param gyroAngle The current gyro angle.
233   * @param wheelPositions The current encoder readings.
234   * @return The estimated pose of the robot in meters.
235   */
236  public Pose2d update(Rotation2d gyroAngle, T wheelPositions) {
237    return updateWithTime(MathSharedStore.getTimestamp(), gyroAngle, wheelPositions);
238  }
239
240  /**
241   * Updates the pose estimator with wheel encoder and gyro information. This should be called every
242   * loop.
243   *
244   * @param currentTimeSeconds Time at which this method was called, in seconds.
245   * @param gyroAngle The current gyro angle.
246   * @param wheelPositions The current encoder readings.
247   * @return The estimated pose of the robot in meters.
248   */
249  public Pose2d updateWithTime(double currentTimeSeconds, Rotation2d gyroAngle, T wheelPositions) {
250    m_odometry.update(gyroAngle, wheelPositions);
251    m_poseBuffer.addSample(
252        currentTimeSeconds,
253        new InterpolationRecord(getEstimatedPosition(), gyroAngle, wheelPositions.copy()));
254
255    return getEstimatedPosition();
256  }
257
258  /**
259   * Represents an odometry record. The record contains the inputs provided as well as the pose that
260   * was observed based on these inputs, as well as the previous record and its inputs.
261   */
262  private class InterpolationRecord implements Interpolatable<InterpolationRecord> {
263    // The pose observed given the current sensor inputs and the previous pose.
264    private final Pose2d poseMeters;
265
266    // The current gyro angle.
267    private final Rotation2d gyroAngle;
268
269    // The current encoder readings.
270    private final T wheelPositions;
271
272    /**
273     * Constructs an Interpolation Record with the specified parameters.
274     *
275     * @param poseMeters The pose observed given the current sensor inputs and the previous pose.
276     * @param gyro The current gyro angle.
277     * @param wheelPositions The current encoder readings.
278     */
279    private InterpolationRecord(Pose2d poseMeters, Rotation2d gyro, T wheelPositions) {
280      this.poseMeters = poseMeters;
281      this.gyroAngle = gyro;
282      this.wheelPositions = wheelPositions;
283    }
284
285    /**
286     * Return the interpolated record. This object is assumed to be the starting position, or lower
287     * bound.
288     *
289     * @param endValue The upper bound, or end.
290     * @param t How far between the lower and upper bound we are. This should be bounded in [0, 1].
291     * @return The interpolated value.
292     */
293    @Override
294    public InterpolationRecord interpolate(InterpolationRecord endValue, double t) {
295      if (t < 0) {
296        return this;
297      } else if (t >= 1) {
298        return endValue;
299      } else {
300        // Find the new wheel distances.
301        var wheelLerp = wheelPositions.interpolate(endValue.wheelPositions, t);
302
303        // Find the distance travelled between this measurement and the interpolated measurement.
304        var wheelDelta = wheelLerp.minus(wheelPositions);
305
306        // Find the new gyro angle.
307        var gyroLerp = gyroAngle.interpolate(endValue.gyroAngle, t);
308
309        // Create a twist to represent this change based on the interpolated sensor inputs.
310        Twist2d twist = m_kinematics.toTwist2d(wheelDelta);
311        twist.dtheta = gyroLerp.minus(gyroAngle).getRadians();
312
313        return new InterpolationRecord(poseMeters.exp(twist), gyroLerp, wheelLerp);
314      }
315    }
316
317    @Override
318    public boolean equals(Object obj) {
319      if (this == obj) {
320        return true;
321      }
322      if (!(obj instanceof PoseEstimator.InterpolationRecord)) {
323        return false;
324      }
325      var record = (PoseEstimator<?>.InterpolationRecord) obj;
326      return Objects.equals(gyroAngle, record.gyroAngle)
327          && Objects.equals(wheelPositions, record.wheelPositions)
328          && Objects.equals(poseMeters, record.poseMeters);
329    }
330
331    @Override
332    public int hashCode() {
333      return Objects.hash(gyroAngle, wheelPositions, poseMeters);
334    }
335  }
336}