Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
DevonRD authored Jan 13, 2025
2 parents cdf8ffc + 04e28bc commit be14d58
Show file tree
Hide file tree
Showing 18 changed files with 215 additions and 49 deletions.
20 changes: 10 additions & 10 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,7 @@ jobs:
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: RaspberryPi
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0-beta-4/photonvision_raspi.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_raspi.img.xz
cpu: cortex-a7
image_additional_mb: 0
extraOpts: -Djdk.lang.Process.launchMechanism=vfork
Expand Down Expand Up @@ -430,55 +430,55 @@ jobs:
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: RaspberryPi
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0/photonvision_raspi.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_raspi.img.xz
cpu: cortex-a7
image_additional_mb: 0
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: limelight2
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0/photonvision_limelight.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_limelight.img.xz
cpu: cortex-a7
image_additional_mb: 0
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: limelight3
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0/photonvision_limelight3.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_limelight3.img.xz
cpu: cortex-a7
image_additional_mb: 0
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: orangepi5
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0/photonvision_opi5.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_opi5.img.xz
cpu: cortex-a8
image_additional_mb: 1024
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: orangepi5b
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0/photonvision_opi5b.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_opi5b.img.xz
cpu: cortex-a8
image_additional_mb: 1024
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: orangepi5plus
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0/photonvision_opi5plus.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_opi5plus.img.xz
cpu: cortex-a8
image_additional_mb: 1024
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: orangepi5pro
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0/photonvision_opi5pro.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_opi5pro.img.xz
cpu: cortex-a8
image_additional_mb: 1024
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: orangepi5max
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0/photonvision_opi5max.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_opi5max.img.xz
cpu: cortex-a8
image_additional_mb: 1024
- os: ubuntu-22.04
artifact-name: LinuxArm64
image_suffix: rock5c
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.0/photonvision_rock5c.img.xz
image_url: https://github.com/PhotonVision/photon-image-modifier/releases/download/v2025.0.2/photonvision_rock5c.img.xz
cpu: cortex-a8
image_additional_mb: 1024

Expand Down
17 changes: 17 additions & 0 deletions .github/workflows/cut-new-tag.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
name: Cut a new tag

on:
workflow_dispatch:
inputs:
tag_name:
type: string
description: The full name of the new tag to push to the latest commit to main

jobs:
push_tag:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4
- run: git tag ${{ github.event.inputs.tag_name }}
- run: git push origin ${{ github.event.inputs.tag_name }}
4 changes: 2 additions & 2 deletions .github/workflows/python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@ permissions:

on:
push:
branches: [ master ]
branches: [ main ]
tags:
- 'v*'
pull_request:
branches: [ master ]
branches: [ main ]
merge_group:

concurrency:
Expand Down
2 changes: 1 addition & 1 deletion build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ ext {
openCVversion = "4.10.0-3"
joglVersion = "2.4.0"
javalinVersion = "5.6.2"
libcameraDriverVersion = "v2025.0.0"
libcameraDriverVersion = "v2025.0.2"
rknnVersion = "v2025.0.0"
frcYear = "2025"
mrcalVersion = "v2025.0.0";
Expand Down
Binary file not shown.
Binary file not shown.
136 changes: 136 additions & 0 deletions docs/source/docs/contributing/design-descriptions/e2e-latency.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
# Latency Characterization


## A primer on time

Expecially starting around 2022 with AprilTags making localization easier, providing a way to know when a camera image was captured at became more important for localization.
Since the [creation of USBFrameProvider](https://github.com/PhotonVision/photonvision/commit/f92bf670ded52b59a00352a4a49c277f01bae305), we used the time [provided by CSCore](https://github.wpilib.org/allwpilib/docs/release/java/edu/wpi/first/cscore/CvSink.html#grabFrame(org.opencv.core.Mat)) to tell when a camera image was captured at, but just keeping track of "CSCore told us frame N was captured 104.21s after the Raspberry Pi turned on" isn't very helpful. We can decompose this into asking:

- At what time was a particular image captured at, in the coprocessor's timebase?
- How do I convert a time in a coprocessor's timebase into the RoboRIO's timebase, so I can integrate the measurement with my other sensor measurements (like encoders)?

The first one seems easy - CSCore tells us the time, so just keep track of that? Should be easy. For the second, translating this time, as measured by the coprocessor's clock, into a timebase also used by user code on the RoboRIO, is actually a [fairly hard problem](time-sync.md) that involved reinventing [PTP](https://en.wikipedia.org/wiki/PTP).

And on latency vs timestamps - PhotonVision has exposed a magic "latency" number since forever, but latency (as in, the time from image capture to acting on data) can be useful for benchmarking code, but robots actually want to answer "what time was this image from, relative to "?


## CSCore's Frame Time

WPILib's CSCore is a platform-agnostic wrapper around Windows, Linux, and MacOS camera APIs. On Linux, CSCore uses [Video4Linux](https://en.wikipedia.org/wiki/Video4Linux) to access USB Video Class (UVC) devices like webcams, as well as CSI cameras on some platforms. At a high level, CSCore's [Linux USB Camera driver](https://github.com/wpilibsuite/allwpilib/blob/17a03514bad6de195639634b3d57d5ac411d601e/cscore/src/main/native/linux/UsbCameraImpl.cpp) works by:

- Opening a camera with `open`
- Creating and `mmap`ing a handful of buffers V4L will fill with frame data into program memory
- Asking V4L to start streaming
- While the camera is running:
- Wait for new frames
- Dequeue one buffer
- Call `SourceImpl::PutFrame`, which will copy the image out and convert as needed
- Return the buffer to V4L to fill again

Prior to https://github.com/wpilibsuite/allwpilib/pull/7609, CSCore used the [time it dequeued the buffer at](https://github.com/wpilibsuite/allwpilib/blob/17a03514bad6de195639634b3d57d5ac411d601e/cscore/src/main/native/linux/UsbCameraImpl.cpp#L559) as the image capture time. But this doesn't account for exposure time or latency introduced by the camera + USB stack + Linux itself.

V4L does expose (with some [very heavy caviets](https://github.com/torvalds/linux/blob/fc033cf25e612e840e545f8d5ad2edd6ba613ed5/drivers/media/usb/uvc/uvc_video.c#L600) for some troublesome cameras) its best guess at the time an image was captured at via [buffer flags](https://www.kernel.org/doc/html/v4.9/media/uapi/v4l/buffer.html#buffer-flags). In my testing, all my cameras were able to provide timestamps with both these flags set:
- `V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC`: The buffer timestamp has been taken from the CLOCK_MONOTONIC clock [...] accessible via `clock_gettime()`.
- `V4L2_BUF_FLAG_TSTAMP_SRC_SOE`: Start Of Exposure. The buffer timestamp has been taken when the exposure of the frame has begun.

I'm sure that we'll find a camera that doesn't play nice, because we can't have nice things :). But until then, using this timestamp gets us a free accuracy bump.

Other things to note: This gets us an estimate at when the camera *started* collecting photons. The camera's sensor will remain collecitng light for up to the total integration time, plus readout time for rolling shutter cameras.

## Latency Testing

Here, I've got a RoboRIO with an LED, an Orange Pi 5, and a network switch on a test bench. The LED is assumed to turn on basically instantly once we apply current, and based on DMA testing, the total time to switch a digital output on is on the order of 10uS. The RoboRIO is running a TimeSync Server, and the Orange Pi is running a TimeSync Client.

### Test Setup

<details>
<summary>Show RoboRIO Test Code</summary>

```java
package frc.robot;

import org.photonvision.PhotonCamera;

import edu.wpi.first.wpilibj.DigitalOutput;
import edu.wpi.first.wpilibj.TimedRobot;
import edu.wpi.first.wpilibj.Timer;
import edu.wpi.first.wpilibj.smartdashboard.SmartDashboard;

public class Robot extends TimedRobot {
PhotonCamera camera;
DigitalOutput light;

@Override
public void robotInit() {
camera = new PhotonCamera("Arducam_OV9782_USB_Camera");

light = new DigitalOutput(0);
light.set(false);
}

@Override
public void robotPeriodic() {
super.robotPeriodic();

try {
light.set(false);
for (int i = 0; i < 50; i++) {
Thread.sleep(20);
camera.getAllUnreadResults();
}

var t1 = Timer.getFPGATimestamp();
light.set(true);
var t2 = Timer.getFPGATimestamp();


for (int i = 0; i < 100; i++) {
for (var result : camera.getAllUnreadResults()) {
if (result.hasTargets()) {
var t3 = result.getTimestampSeconds();
var t1p5 = (t1 + t2) / 2;
var error = t3-t1p5;
SmartDashboard.putNumber("blink_error_ms", error * 1000);
return;
}
}

Thread.sleep(20);
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
```
</details>

I've decreased camera exposure as much as possible (so we know with reasonable confidence that the image was collected right at the start of the exposure time reported by V4L), but we only get back new images at 60fps. So we don't know when between frame N and N+1 the LED turned on - just that somtime between now and 1/60th of a second a go, the LED turned on.

The test coprocessor was an Orange Pi 5 running a PhotonVision 2025 (Ubuntu 24.04 based) image, with an ArduCam OV9782 at 1280x800, 60fps, MJPG running a reflective pipeline.


### Test Results

The videos above show the difference between when the RoboRIO turned the LED on and when PhotonVision first seeing a camera frame with the LED on, what I've called error and plotted in yellow with units of seconds. This error decreases when I use the frame time reported by V4L from a mean delta of 26 ms to a mean delta of 11 ms (below the maximum temporal resolution of my camera).

Old CSCore:
```{raw} html
<video width="85%" controls>
<source src="../../../_static/assets/latency-tests/ov9782_1280x720x60xMJPG_old.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
```
CSCore using V4L frame time:
```{raw} html
<video width="85%" controls>
<source src="../../../_static/assets/latency-tests/ov9782_1280x720x60xMJPG_new.mp4" type="video/mp4">
Your browser does not support the video tag.
</video>
```

With the camera capturing at 60fps, the time between successive frames is only ~16.7 ms, so I don't expect to be able to resolve anything smaller. Given sufficient time and with perfect latency compensation, and with more noise in the robot program to make sure we vary LED toggle times, I'd expect the error to converge to ~half the interval between frames - so being within this frame interval with CSCore updates is a very good sign.

### Future Work

This test also makes no effort to isolate error from time syncronization from error introduced by frame time measurement - we're just interested in overall error. Future work could investigate the latency contribution
1 change: 1 addition & 0 deletions docs/source/docs/contributing/design-descriptions/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@
image-rotation
time-sync
camera-matching
e2e-latency
```
14 changes: 8 additions & 6 deletions photon-client/src/components/settings/ObjectDetectionCard.vue
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,12 @@ import axios from "axios";
import { useStateStore } from "@/stores/StateStore";
import { useSettingsStore } from "@/stores/settings/GeneralSettingsStore";
const showObjectDetectionImportDialog = ref(false);
const showImportDialog = ref(false);
const importRKNNFile = ref<File | null>(null);
const importLabelsFile = ref<File | null>(null);
const handleObjectDetectionImport = () => {
// TODO gray out the button when model is uploading
const handleImport = async () => {
if (importRKNNFile.value === null || importLabelsFile.value === null) return;
const formData = new FormData();
Expand Down Expand Up @@ -50,7 +51,8 @@ const handleObjectDetectionImport = () => {
}
});
showObjectDetectionImportDialog.value = false;
showImportDialog.value = false;
importRKNNFile.value = null;
importLabelsFile.value = null;
};
Expand All @@ -68,12 +70,12 @@ const supportedModels = computed(() => {
<div class="pa-6 pt-0">
<v-row>
<v-col cols="12 ">
<v-btn color="secondary" class="justify-center" @click="() => (showObjectDetectionImportDialog = true)">
<v-btn color="secondary" @click="() => (showImportDialog = true)" class="justify-center">
<v-icon left class="open-icon"> mdi-import </v-icon>
<span class="open-label">Import New Model</span>
</v-btn>
<v-dialog
v-model="showObjectDetectionImportDialog"
v-model="showImportDialog"
width="600"
@input="
() => {
Expand Down Expand Up @@ -105,7 +107,7 @@ const supportedModels = computed(() => {
<v-btn
color="secondary"
:disabled="importRKNNFile === null || importLabelsFile === null"
@click="handleObjectDetectionImport"
@click="handleImport"
>
<v-icon left class="open-icon"> mdi-import </v-icon>
<span class="open-label">Import Object Detection Model</span>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -221,29 +221,28 @@ private void loadModel(File model) {
/**
* Discovers DNN models from the specified folder.
*
* @param modelsFolder The folder where the models are stored
* @param modelsDirectory The folder where the models are stored
*/
public void discoverModels(File modelsFolder) {
public void discoverModels(File modelsDirectory) {
logger.info("Supported backends: " + supportedBackends);

if (!modelsFolder.exists()) {
logger.error("Models folder " + modelsFolder.getAbsolutePath() + " does not exist.");
if (!modelsDirectory.exists()) {
logger.error("Models folder " + modelsDirectory.getAbsolutePath() + " does not exist.");
return;
}

if (models == null) {
models = new HashMap<>();
}
models = new HashMap<>();

try {
Files.walk(modelsFolder.toPath())
Files.walk(modelsDirectory.toPath())
.filter(Files::isRegularFile)
.forEach(path -> loadModel(path.toFile()));
} catch (IOException e) {
logger.error("Failed to discover models at " + modelsFolder.getAbsolutePath(), e);
logger.error("Failed to discover models at " + modelsDirectory.getAbsolutePath(), e);
}

// After loading all of the models, sort them by name to ensure a consistent ordering
// After loading all of the models, sort them by name to ensure a consistent
// ordering
models.forEach(
(backend, backendModels) ->
backendModels.sort((a, b) -> a.getName().compareTo(b.getName())));
Expand Down
10 changes: 5 additions & 5 deletions photon-lib/py/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,11 @@
version=versionString,
install_requires=[
"numpy~=2.1",
"wpilib<2026,>=2025.0.0b1",
"robotpy-wpimath<2026,>=2025.0.0b1",
"robotpy-apriltag<2026,>=2025.0.0b1",
"robotpy-cscore<2026,>=2025.0.0b1",
"pyntcore<2026,>=2025.0.0b1",
"wpilib<2026,>=2025.2.1",
"robotpy-wpimath<2026,>=2025.2.1",
"robotpy-apriltag<2026,>=2025.2.1",
"robotpy-cscore<2026,>=2025.2.1",
"pyntcore<2026,>=2025.2.1",
"opencv-python;platform_machine!='roborio'",
],
description=descriptionStr,
Expand Down
8 changes: 6 additions & 2 deletions photon-lib/py/test/visionSystemSim_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -604,10 +604,14 @@ def test_TagAmbiguity() -> None:

robotPose = Pose2d()
visionSysSim.update(robotPose)
ambiguity = camera.getLatestResult().getBestTarget().getPoseAmbiguity()
tgt = camera.getLatestResult().getBestTarget()
assert tgt is not None
ambiguity = tgt.getPoseAmbiguity()
assert ambiguity > 0.5, "Tag ambiguity expected to be high"

robotPose = Pose2d(Translation2d(-2.0, -2.0), Rotation2d.fromDegrees(30.0))
visionSysSim.update(robotPose)
ambiguity = camera.getLatestResult().getBestTarget().getPoseAmbiguity()
tgt = camera.getLatestResult().getBestTarget()
assert tgt is not None
ambiguity = tgt.getPoseAmbiguity()
assert 0 < ambiguity < 0.2, "Tag ambiguity expected to be high"
5 changes: 0 additions & 5 deletions photon-server/src/main/java/org/photonvision/Main.java
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,6 @@ private static boolean handleArgs(String[] args) throws ParseException {
"Run in test mode with 2019 and 2020 WPI field images in place of cameras");

options.addOption("p", "path", true, "Point test mode to a specific folder");
options.addOption(
"i",
"ignore-cameras",
true,
"Ignore cameras that match a regex. Uses camera name as provided by cscore.");
options.addOption("n", "disable-networking", false, "Disables control device network settings");
options.addOption(
"c",
Expand Down
Loading

0 comments on commit be14d58

Please sign in to comment.