Skip to content

Commit

Permalink
Merge pull request #10 from frc2204/Swerve-Limelight
Browse files Browse the repository at this point in the history
Limelight localization code
  • Loading branch information
Pepps233 authored Feb 6, 2025
2 parents 8744b7e + 3d9e558 commit 86d18cd
Show file tree
Hide file tree
Showing 8 changed files with 736 additions and 0 deletions.
23 changes: 23 additions & 0 deletions src/main/java/frc/robot/RobotContainer.java
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@

package frc.robot;

import static frc.robot.subsystems.vision.VisionConstants.*;

import com.pathplanner.lib.auto.AutoBuilder;
import edu.wpi.first.math.geometry.Pose2d;
import edu.wpi.first.math.geometry.Rotation2d;
Expand All @@ -25,6 +27,10 @@
import frc.robot.commands.DriveCommands;
import frc.robot.generated.TunerConstants;
import frc.robot.subsystems.drive.*;
import frc.robot.subsystems.vision.Vision;
import frc.robot.subsystems.vision.VisionIO;
import frc.robot.subsystems.vision.VisionIOLimelight;
import frc.robot.subsystems.vision.VisionIOPhotonVisionSim;
import org.littletonrobotics.junction.networktables.LoggedDashboardChooser;

/**
Expand All @@ -43,6 +49,9 @@ public class RobotContainer {
// Dashboard inputs
private final LoggedDashboardChooser<Command> autoChooser;

// Vision
private final Vision vision;

/** The container for the robot. Contains subsystems, OI devices, and commands. */
public RobotContainer() {
switch (Constants.currentMode) {
Expand All @@ -55,6 +64,12 @@ public RobotContainer() {
new ModuleIOTalonFX(TunerConstants.FrontRight),
new ModuleIOTalonFX(TunerConstants.BackLeft),
new ModuleIOTalonFX(TunerConstants.BackRight));

vision =
new Vision(
drive::addVisionMeasurement,
new VisionIOLimelight(camera0Name, drive::getRotation),
new VisionIOLimelight(camera1Name, drive::getRotation));
break;

case SIM:
Expand All @@ -66,6 +81,12 @@ public RobotContainer() {
new ModuleIOSim(TunerConstants.FrontRight),
new ModuleIOSim(TunerConstants.BackLeft),
new ModuleIOSim(TunerConstants.BackRight));

vision =
new Vision(
drive::addVisionMeasurement,
new VisionIOPhotonVisionSim(camera0Name, robotToCamera0, drive::getPose),
new VisionIOPhotonVisionSim(camera1Name, robotToCamera1, drive::getPose));
break;

default:
Expand All @@ -77,6 +98,8 @@ public RobotContainer() {
new ModuleIO() {},
new ModuleIO() {},
new ModuleIO() {});

vision = new Vision(drive::addVisionMeasurement, new VisionIO() {}, new VisionIO() {});
break;
}

Expand Down
188 changes: 188 additions & 0 deletions src/main/java/frc/robot/subsystems/vision/Vision.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
// Copyright 2021-2025 FRC 6328
// http://github.com/Mechanical-Advantage
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// version 3 as published by the Free Software Foundation or
// available in the root directory of this project.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.

package frc.robot.subsystems.vision;

import static frc.robot.subsystems.vision.VisionConstants.*;

import edu.wpi.first.math.Matrix;
import edu.wpi.first.math.VecBuilder;
import edu.wpi.first.math.geometry.Pose2d;
import edu.wpi.first.math.geometry.Pose3d;
import edu.wpi.first.math.geometry.Rotation2d;
import edu.wpi.first.math.numbers.N1;
import edu.wpi.first.math.numbers.N3;
import edu.wpi.first.wpilibj.Alert;
import edu.wpi.first.wpilibj.Alert.AlertType;
import edu.wpi.first.wpilibj2.command.SubsystemBase;
import frc.robot.subsystems.vision.VisionIO.PoseObservationType;
import java.util.LinkedList;
import java.util.List;
import org.littletonrobotics.junction.Logger;

public class Vision extends SubsystemBase {
private final VisionConsumer consumer;
private final VisionIO[] io;
private final VisionIOInputsAutoLogged[] inputs;
private final Alert[] disconnectedAlerts;

public Vision(VisionConsumer consumer, VisionIO... io) {
this.consumer = consumer;
this.io = io;

// Initialize inputs
this.inputs = new VisionIOInputsAutoLogged[io.length];
for (int i = 0; i < inputs.length; i++) {
inputs[i] = new VisionIOInputsAutoLogged();
}

// Initialize disconnected alerts
this.disconnectedAlerts = new Alert[io.length];
for (int i = 0; i < inputs.length; i++) {
disconnectedAlerts[i] =
new Alert(
"Vision camera " + Integer.toString(i) + " is disconnected.", AlertType.kWarning);
}
}

/**
* Returns the X angle to the best target, which can be used for simple servoing with vision.
*
* @param cameraIndex The index of the camera to use.
*/
public Rotation2d getTargetX(int cameraIndex) {
return inputs[cameraIndex].latestTargetObservation.tx();
}

@Override
public void periodic() {
for (int i = 0; i < io.length; i++) {
io[i].updateInputs(inputs[i]);
Logger.processInputs("Vision/Camera" + Integer.toString(i), inputs[i]);
}

// Initialize logging values
List<Pose3d> allTagPoses = new LinkedList<>();
List<Pose3d> allRobotPoses = new LinkedList<>();
List<Pose3d> allRobotPosesAccepted = new LinkedList<>();
List<Pose3d> allRobotPosesRejected = new LinkedList<>();

// Loop over cameras
for (int cameraIndex = 0; cameraIndex < io.length; cameraIndex++) {
// Update disconnected alert
disconnectedAlerts[cameraIndex].set(!inputs[cameraIndex].connected);

// Initialize logging values
List<Pose3d> tagPoses = new LinkedList<>();
List<Pose3d> robotPoses = new LinkedList<>();
List<Pose3d> robotPosesAccepted = new LinkedList<>();
List<Pose3d> robotPosesRejected = new LinkedList<>();

// Add tag poses
for (int tagId : inputs[cameraIndex].tagIds) {
var tagPose = aprilTagLayout.getTagPose(tagId);
if (tagPose.isPresent()) {
tagPoses.add(tagPose.get());
}
}

// Loop over pose observations
for (var observation : inputs[cameraIndex].poseObservations) {
// Check whether to reject pose
boolean rejectPose =
observation.tagCount() == 0 // Must have at least one tag
|| (observation.tagCount() == 1
&& observation.ambiguity() > maxAmbiguity) // Cannot be high ambiguity
|| Math.abs(observation.pose().getZ())
> maxZError // Must have realistic Z coordinate

// Must be within the field boundaries
|| observation.pose().getX() < 0.0
|| observation.pose().getX() > aprilTagLayout.getFieldLength()
|| observation.pose().getY() < 0.0
|| observation.pose().getY() > aprilTagLayout.getFieldWidth();

// Add pose to log
robotPoses.add(observation.pose());
if (rejectPose) {
robotPosesRejected.add(observation.pose());
} else {
robotPosesAccepted.add(observation.pose());
}

// Skip if rejected
if (rejectPose) {
continue;
}

// Calculate standard deviations
double stdDevFactor =
Math.pow(observation.averageTagDistance(), 2.0) / observation.tagCount();
double linearStdDev = linearStdDevBaseline * stdDevFactor;
double angularStdDev = angularStdDevBaseline * stdDevFactor;
if (observation.type() == PoseObservationType.MEGATAG_2) {
linearStdDev *= linearStdDevMegatag2Factor;
angularStdDev *= angularStdDevMegatag2Factor;
}
if (cameraIndex < cameraStdDevFactors.length) {
linearStdDev *= cameraStdDevFactors[cameraIndex];
angularStdDev *= cameraStdDevFactors[cameraIndex];
}

// Send vision observation
consumer.accept(
observation.pose().toPose2d(),
observation.timestamp(),
VecBuilder.fill(linearStdDev, linearStdDev, angularStdDev));
}

// Log camera datadata
Logger.recordOutput(
"Vision/Camera" + Integer.toString(cameraIndex) + "/TagPoses",
tagPoses.toArray(new Pose3d[tagPoses.size()]));
Logger.recordOutput(
"Vision/Camera" + Integer.toString(cameraIndex) + "/RobotPoses",
robotPoses.toArray(new Pose3d[robotPoses.size()]));
Logger.recordOutput(
"Vision/Camera" + Integer.toString(cameraIndex) + "/RobotPosesAccepted",
robotPosesAccepted.toArray(new Pose3d[robotPosesAccepted.size()]));
Logger.recordOutput(
"Vision/Camera" + Integer.toString(cameraIndex) + "/RobotPosesRejected",
robotPosesRejected.toArray(new Pose3d[robotPosesRejected.size()]));
allTagPoses.addAll(tagPoses);
allRobotPoses.addAll(robotPoses);
allRobotPosesAccepted.addAll(robotPosesAccepted);
allRobotPosesRejected.addAll(robotPosesRejected);
}

// Log summary data
Logger.recordOutput(
"Vision/Summary/TagPoses", allTagPoses.toArray(new Pose3d[allTagPoses.size()]));
Logger.recordOutput(
"Vision/Summary/RobotPoses", allRobotPoses.toArray(new Pose3d[allRobotPoses.size()]));
Logger.recordOutput(
"Vision/Summary/RobotPosesAccepted",
allRobotPosesAccepted.toArray(new Pose3d[allRobotPosesAccepted.size()]));
Logger.recordOutput(
"Vision/Summary/RobotPosesRejected",
allRobotPosesRejected.toArray(new Pose3d[allRobotPosesRejected.size()]));
}

@FunctionalInterface
public static interface VisionConsumer {
public void accept(
Pose2d visionRobotPoseMeters,
double timestampSeconds,
Matrix<N3, N1> visionMeasurementStdDevs);
}
}
58 changes: 58 additions & 0 deletions src/main/java/frc/robot/subsystems/vision/VisionConstants.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
// Copyright 2021-2025 FRC 6328
// http://github.com/Mechanical-Advantage
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// version 3 as published by the Free Software Foundation or
// available in the root directory of this project.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.

package frc.robot.subsystems.vision;

import edu.wpi.first.apriltag.AprilTagFieldLayout;
import edu.wpi.first.apriltag.AprilTagFields;
import edu.wpi.first.math.geometry.Rotation3d;
import edu.wpi.first.math.geometry.Transform3d;

public class VisionConstants {
// AprilTag layout
public static AprilTagFieldLayout aprilTagLayout =
AprilTagFieldLayout.loadField(AprilTagFields.kDefaultField);

// Camera names, must match names configured on coprocessor
public static String camera0Name = "limelight-three";
public static String camera1Name = "limelight-two";

// Robot to camera transforms
// (Not used by Limelight, configure in web UI instead)
public static Transform3d robotToCamera0 =
new Transform3d(0.2, 0.0, 0.2, new Rotation3d(0.0, -0.4, 0.0));
public static Transform3d robotToCamera1 =
new Transform3d(-0.2, 0.0, 0.2, new Rotation3d(0.0, -0.4, Math.PI));

// Basic filtering thresholds
public static double maxAmbiguity = 0.3;
public static double maxZError = 0.75;

// Standard deviation baselines, for 1 meter distance and 1 tag
// (Adjusted automatically based on distance and # of tags)
public static double linearStdDevBaseline = 0.02; // Meters
public static double angularStdDevBaseline = 0.06; // Radians

// Standard deviation multipliers for each camera
// (Adjust to trust some cameras more than others)
public static double[] cameraStdDevFactors =
new double[] {
1.0, // Camera 0
1.0 // Camera 1
};

// Multipliers to apply for MegaTag 2 observations
public static double linearStdDevMegatag2Factor = 0.5; // More stable than full 3D solve
public static double angularStdDevMegatag2Factor =
Double.POSITIVE_INFINITY; // No rotation data available
}
49 changes: 49 additions & 0 deletions src/main/java/frc/robot/subsystems/vision/VisionIO.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
// Copyright 2021-2025 FRC 6328
// http://github.com/Mechanical-Advantage
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License
// version 3 as published by the Free Software Foundation or
// available in the root directory of this project.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.

package frc.robot.subsystems.vision;

import edu.wpi.first.math.geometry.Pose3d;
import edu.wpi.first.math.geometry.Rotation2d;
import org.littletonrobotics.junction.AutoLog;

public interface VisionIO {
@AutoLog
public static class VisionIOInputs {
public boolean connected = false;
public TargetObservation latestTargetObservation =
new TargetObservation(new Rotation2d(), new Rotation2d());
public PoseObservation[] poseObservations = new PoseObservation[0];
public int[] tagIds = new int[0];
}

/** Represents the angle to a simple target, not used for pose estimation. */
public static record TargetObservation(Rotation2d tx, Rotation2d ty) {}

/** Represents a robot pose sample used for pose estimation. */
public static record PoseObservation(
double timestamp,
Pose3d pose,
double ambiguity,
int tagCount,
double averageTagDistance,
PoseObservationType type) {}

public static enum PoseObservationType {
MEGATAG_1,
MEGATAG_2,
PHOTONVISION
}

public default void updateInputs(VisionIOInputs inputs) {}
}
Loading

0 comments on commit 86d18cd

Please sign in to comment.