Skip to content

Commit

Permalink
Rebase from 'upstream'
Browse files Browse the repository at this point in the history
  • Loading branch information
Serafadam committed Dec 31, 2024
1 parent e4fd651 commit 6e079d0
Show file tree
Hide file tree
Showing 17 changed files with 507 additions and 16 deletions.
24 changes: 24 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,30 @@
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package depthai
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

2.29.0 (2024-11-26)
* Features
* Add the ability to change the calibration on the device in runtime with the new `dai::Device.setCalibration()` method and to retrieve it with the `dai::Device.getCalibration()`.
* New `StereoDepth` presets:
* `DEFAULT`
* `FACE`
* `HIGH_DETAIL`
* `ROBOTICS`
* Multiple camera improvements (more details in https://github.com/luxonis/depthai-core/pull/972):
* Expose more downsampling modes when picking a lower than native resolutions
* Expose more binning modes when binning is picked on IMX582/586 (`sum` and `avg`)
* HDR on IMX582/586
* Option to bypass 3A for having manual expose/ISO take effect faster
* Initial support for new Sony 4K Starvis sensors: IMX678 and IMX715
* Option to set the main camera to drive auto-exposure and auto-white-balance in multi-camera configurations
* Improved StereoDepth filtering and an option to use a set a custom order of filters
* Disparity is first scaled to 13 bit before going through filtering, which results in filters being more effective.


* Misc
* Remove false reports on crashes that happened on device destruction
* Add `getWidth()` and `getHeight()` API to `EncodedFrame`

2.28.0 (2024-08-21)
-----------
Features
Expand Down
5 changes: 1 addition & 4 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,6 @@ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
# Early options
option(DEPTHAI_ENABLE_LIBUSB "Enable usage of libusb and interaction with USB devices" ON)

# Early options
option(DEPTHAI_ENABLE_LIBUSB "Enable usage of libusb and interaction with USB devices" ON)

# Set to use native tls for windows before including Hunter (used for Curl)
if(WIN32)
set(DEPTHAI_CURL_USE_SCHANNEL ON)
Expand Down Expand Up @@ -62,7 +59,7 @@ if(WIN32)
endif()

# Create depthai project
project(depthai VERSION "2.28.0" LANGUAGES CXX C)
project(depthai VERSION "2.29.0" LANGUAGES CXX C)
get_directory_property(has_parent PARENT_DIRECTORY)
if(has_parent)
set(DEPTHAI_VERSION ${PROJECT_VERSION} PARENT_SCOPE)
Expand Down
2 changes: 1 addition & 1 deletion cmake/Depthai/DepthaiDeviceSideConfig.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot")

# "full commit hash of device side binary"
set(DEPTHAI_DEVICE_SIDE_COMMIT "9ed7c9ae4c232ff93a3500a585a6b1c00650e22c")
set(DEPTHAI_DEVICE_SIDE_COMMIT "4d360b5c56225f23e9a3d3a3999ce46c90cfdeaf")

# "version if applicable"
set(DEPTHAI_DEVICE_SIDE_VERSION "")
12 changes: 12 additions & 0 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,14 @@ hunter_private_data(
LOCATION tiny_yolo_v4_blob
)

# YoloV8 resource
hunter_private_data(
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/yolov8n_coco_640x352.blob"
SHA1 "6b88a80ffb4267e253a89cb2b3e4e186453431b0"
FILE "yolov8n_coco_640x352.blob"
LOCATION yolov8n_blob
)

# NeuralNetwork node, mobilenet example, 5 shaves
hunter_private_data(
URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.4_5shave.blob"
Expand Down Expand Up @@ -220,6 +228,7 @@ hunter_private_data(
LOCATION normalization_model
)


## message(STATUS "Location of test1.data: ${test1_data}")

# bootloader
Expand Down Expand Up @@ -380,8 +389,10 @@ dai_add_example(mjpeg_encoding VideoEncoder/mjpeg_encoding_example.cpp ON OFF)
# Yolo
dai_add_example(tiny_yolo_v4_device_side_decoding Yolo/tiny_yolo.cpp ON OFF)
dai_add_example(tiny_yolo_v3_device_side_decoding Yolo/tiny_yolo.cpp ON OFF)
dai_add_example(yolo_v8_nano_device_side_decoding Yolo/yolov8_nano.cpp ON OFF)
target_compile_definitions(tiny_yolo_v4_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}")
target_compile_definitions(tiny_yolo_v3_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}")
target_compile_definitions(yolo_v8_nano_device_side_decoding PRIVATE BLOB_PATH="${yolov8n_blob}")
dai_add_example(apriltag AprilTag/apriltag.cpp ON OFF)
dai_add_example(apriltag_rgb AprilTag/apriltag_rgb.cpp ON OFF)

Expand Down Expand Up @@ -421,3 +432,4 @@ target_compile_definitions(cast_diff PRIVATE BLOB_PATH="${diff_model}")
dai_add_example(spatial_tiny_yolo_tof_v3 SpatialDetection/spatial_tiny_yolo_tof.cpp OFF OFF)
dai_add_example(spatial_tiny_yolo_tof_v4 SpatialDetection/spatial_tiny_yolo_tof.cpp OFF OFF)
target_compile_definitions(spatial_tiny_yolo_tof_v3 PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}")
target_compile_definitions(spatial_tiny_yolo_tof_v4 PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}")
157 changes: 157 additions & 0 deletions examples/Yolo/yolov8_nano.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
#include <chrono>
#include <iostream>

// Includes common necessary includes for development using depthai library
#include "depthai/depthai.hpp"

/*
The code is the same as for Tiny-yolo-V3 and V4, the only difference is the blob file and anchors that are not needed for V8.
The blob was compiled following this tutorial: https://github.com/TNTWEN/OpenVINO-YOLOV4
*/

static const std::vector<std::string> labelMap = {
"person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse",
"sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag",
"tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove",
"skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon",
"bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza",
"donut", "cake", "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor",
"laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink",
"refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"};

static std::atomic<bool> syncNN{true};

int main(int argc, char** argv) {
using namespace std;
using namespace std::chrono;
std::string nnPath(BLOB_PATH);

// If path to blob specified, use that
if(argc > 1) {
nnPath = std::string(argv[1]);
}

// Print which blob we are using
printf("Using blob at path: %s\n", nnPath.c_str());

// Create pipeline
dai::Pipeline pipeline;

// Define sources and outputs
auto camRgb = pipeline.create<dai::node::ColorCamera>();
auto detectionNetwork = pipeline.create<dai::node::YoloDetectionNetwork>();
auto xoutRgb = pipeline.create<dai::node::XLinkOut>();
auto nnOut = pipeline.create<dai::node::XLinkOut>();

xoutRgb->setStreamName("rgb");
nnOut->setStreamName("detections");

// Properties
camRgb->setPreviewSize(640, 352);
camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P);
camRgb->setInterleaved(false);
camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR);
camRgb->setFps(40);

// Network specific settings
detectionNetwork->setConfidenceThreshold(0.5f);
detectionNetwork->setNumClasses(80);
detectionNetwork->setCoordinateSize(4);
detectionNetwork->setIouThreshold(0.5f);
detectionNetwork->setBlobPath(nnPath);
detectionNetwork->setNumInferenceThreads(2);
detectionNetwork->input.setBlocking(false);

// Linking
camRgb->preview.link(detectionNetwork->input);
if(syncNN) {
detectionNetwork->passthrough.link(xoutRgb->input);
} else {
camRgb->preview.link(xoutRgb->input);
}

detectionNetwork->out.link(nnOut->input);

// Connect to device and start pipeline
dai::Device device(pipeline);

// Output queues will be used to get the rgb frames and nn data from the outputs defined above
auto qRgb = device.getOutputQueue("rgb", 4, false);
auto qDet = device.getOutputQueue("detections", 4, false);

cv::Mat frame;
std::vector<dai::ImgDetection> detections;
auto startTime = steady_clock::now();
int counter = 0;
float fps = 0;
auto color2 = cv::Scalar(255, 255, 255);

// Add bounding boxes and text to the frame and show it to the user
auto displayFrame = [](std::string name, cv::Mat frame, std::vector<dai::ImgDetection>& detections) {
auto color = cv::Scalar(255, 0, 0);
// nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height
for(auto& detection : detections) {
int x1 = detection.xmin * frame.cols;
int y1 = detection.ymin * frame.rows;
int x2 = detection.xmax * frame.cols;
int y2 = detection.ymax * frame.rows;

uint32_t labelIndex = detection.label;
std::string labelStr = to_string(labelIndex);
if(labelIndex < labelMap.size()) {
labelStr = labelMap[labelIndex];
}
cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255);
std::stringstream confStr;
confStr << std::fixed << std::setprecision(2) << detection.confidence * 100;
cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255);
cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX);
}
// Show the frame
cv::imshow(name, frame);
};

while(true) {
std::shared_ptr<dai::ImgFrame> inRgb;
std::shared_ptr<dai::ImgDetections> inDet;

if(syncNN) {
inRgb = qRgb->get<dai::ImgFrame>();
inDet = qDet->get<dai::ImgDetections>();
} else {
inRgb = qRgb->tryGet<dai::ImgFrame>();
inDet = qDet->tryGet<dai::ImgDetections>();
}

counter++;
auto currentTime = steady_clock::now();
auto elapsed = duration_cast<duration<float>>(currentTime - startTime);
if(elapsed > seconds(1)) {
fps = counter / elapsed.count();
counter = 0;
startTime = currentTime;
}

if(inRgb) {
frame = inRgb->getCvFrame();
std::stringstream fpsStr;
fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps;
cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color2);
}

if(inDet) {
detections = inDet->detections;
}

if(!frame.empty()) {
displayFrame("rgb", frame, detections);
}

int key = cv::waitKey(1);
if(key == 'q' || key == 'Q') {
return 0;
}
}
return 0;
}
17 changes: 17 additions & 0 deletions include/depthai/device/DeviceBase.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -758,6 +758,23 @@ class DeviceBase {
*/
void flashCalibration2(CalibrationHandler calibrationDataHandler);

/**
* Sets the Calibration at runtime. This is not persistent and will be lost after device reset.
*
* @throws std::runtime_exception if failed to set the calibration
* @param calibrationObj CalibrationHandler object which is loaded with calibration information.
*
*/
void setCalibration(CalibrationHandler calibrationDataHandler);

/**
* Retrieves the CalibrationHandler object containing the non-persistent calibration
*
* @throws std::runtime_exception if failed to get the calibration
* @returns The CalibrationHandler object containing the non-persistent calibration
*/
CalibrationHandler getCalibration();

/**
* Fetches the EEPROM data from the device and loads it into CalibrationHandler object
* If no calibration is flashed, it returns default
Expand Down
23 changes: 23 additions & 0 deletions include/depthai/pipeline/datatype/EncodedFrame.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,15 @@ class EncodedFrame : public Buffer {
* Retrieves instance number
*/
unsigned int getInstanceNum() const;
/**
* Retrieves image width in pixels
*/
unsigned int getWidth() const;

/**
* Retrieves image height in pixels
*/
unsigned int getHeight() const;
/**
* Retrieves exposure time
*/
Expand Down Expand Up @@ -111,6 +120,20 @@ class EncodedFrame : public Buffer {
*/
EncodedFrame& setInstanceNum(unsigned int instance);

/**
* Specifies frame width
*
* @param width frame width
*/
EncodedFrame& setWidth(unsigned int width);

/**
* Specifies frame height
*
* @param height frame height
*/
EncodedFrame& setHeight(unsigned int height);

/**
* Specifies the encoding quality
*
Expand Down
9 changes: 7 additions & 2 deletions include/depthai/pipeline/node/StereoDepth.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,16 @@ class StereoDepth : public NodeCRTP<Node, StereoDepth, StereoDepthProperties> {
/**
* Prefers accuracy over density. More invalid depth values, but less outliers.
*/
HIGH_ACCURACY,
HIGH_ACCURACY [[deprecated("Will be removed in future releases and replaced with DEFAULT")]],
/**
* Prefers density over accuracy. Less invalid depth values, but more outliers.
*/
HIGH_DENSITY
HIGH_DENSITY [[deprecated("Will be removed in future releases and replaced with DEFAULT")]],

DEFAULT,
FACE,
HIGH_DETAIL,
ROBOTICS
};

protected:
Expand Down
2 changes: 1 addition & 1 deletion package.xml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
<package format="3">
<name>depthai</name>
<version>2.28.0</version>
<version>2.29.0</version>
<description>DepthAI core is a C++ library which comes with firmware and an API to interact with OAK Platform</description>

<maintainer email="[email protected]">Adam Serafin</maintainer>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,7 @@ struct RawCameraControl : public RawBuffer {
uint16_t wbColorTemp; // 1000 .. 12000
uint8_t lowPowerNumFramesBurst;
uint8_t lowPowerNumFramesDiscard;
std::vector<std::pair<std::string, std::string>> miscControls;

void setCommand(Command cmd, bool value = true) {
uint64_t mask = 1ull << (uint8_t)cmd;
Expand Down Expand Up @@ -493,7 +494,8 @@ struct RawCameraControl : public RawBuffer {
chromaDenoise,
wbColorTemp,
lowPowerNumFramesBurst,
lowPowerNumFramesDiscard);
lowPowerNumFramesDiscard,
miscControls);
};

} // namespace dai
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ struct RawEncodedFrame : public RawBuffer {
CameraSettings cam;
uint32_t instanceNum = 0; // Which source created this frame (color, mono, ...)

unsigned int width; // width in pixels
unsigned int height; // height in pixels

uint32_t quality;
uint32_t bitrate;
Profile profile;
Expand Down
Loading

0 comments on commit 6e079d0

Please sign in to comment.