diff --git a/include/detect/uv_led_detect_adaptive.cpp b/include/detect/uv_led_detect_adaptive.cpp index b23910e..7420a71 100644 --- a/include/detect/uv_led_detect_adaptive.cpp +++ b/include/detect/uv_led_detect_adaptive.cpp @@ -106,49 +106,12 @@ const std::vector& standardPoints) { detectedPoints.insert(detectedPoints.end(), roiDetectedPoints.begin(), roiDetectedPoints.end()); } -/* // Process each tracking point - for (const auto& point : trackingPoints) { - // Apply adaptive thresholding to the ROI around the tracking point - //std::cout << "[UVDARLedDetectAdaptive]: PROCESSING TRACKING POINT: " << point << std::endl; - std::vector roiDetectedPoints = applyAdaptiveThreshold(inputImage, point, neighborhoodSize_); - numRois++; - - - // Check if the detected points are empty - if (roiDetectedPoints.empty()){ - //std::cout << "[UVDARLedDetectAdaptive]: EMPTY ROI DETECTED POINTS" << std::endl; - continue; - } - // Check each point for uniqueness before adding - //detectedPoints.insert(detectedPoints.end(), roiDetectedPoints.begin(), roiDetectedPoints.end()); - // Check each point for uniqueness before adding - for (const auto& roiPoint : roiDetectedPoints) { - if (uniquePoints.insert(roiPoint).second) { // .second is true if the insertion is successful (i.e., the point is unique) - detectedPoints.push_back(roiPoint); // Insert only the unique point - } - } - } */ std::vector final_detected_points = mergePoints(detectedPoints, standardPoints, point_similarity_threshold_); - - /* - - //Print the final detected points - for (int i = 0; i < final_detected_points.size(); i++) { - std::cout << "[UVDARLedDetectAdaptive]: Final detected point " << i << ": " << final_detected_points[i] << std::endl; - } - - */ detectedPoints = final_detected_points; - //standardPoints = final_detected_points; - - //Fail if no points are detected TODO - //if (final_detected_points.size() == 0){ - // return false; - //} - return true;//if successful + return true; } //} @@ -179,7 +142,7 @@ std::vector UVDARLedDetectAdaptive::applyAdaptiveThreshold(const cv:: // Compute and plot histograms - /* cv::Mat histOriginal = plotHistogram(grayImage); */ + cv::Mat histOriginal = plotHistogram(grayImage); /* cv::Mat grayImage; @@ -211,7 +174,6 @@ std::vector UVDARLedDetectAdaptive::applyAdaptiveThreshold(const cv:: // Create unsharp mask by subtracting the blurred version from the original image cv::Mat unsharpMask = grayImage - blurred; - //saveRoiImage(unsharpMask, point, roiIndex_++, 0, 0.0); // Apply the unsharp mask to the original image to enhance edges cv::Mat enhancedImage; cv::addWeighted(grayImage, 0.25, unsharpMask, 1.75, 0, enhancedImage); @@ -226,15 +188,8 @@ std::vector UVDARLedDetectAdaptive::applyAdaptiveThreshold(const cv:: fs::create_directories(outputDir); - - //saveRoiImage(histOriginal, 0, roiIndex_++, 0, 0.0); - //saveRoiImage(histEnhanced, 0, roiIndex_++, 0, 0.0); - cv::Mat binaryRoi; - //cv::adaptiveThreshold(roiImage, binaryRoi, 255, cv::ADAPTIVE_THRESH_GAUSSIAN_C, cv::THRESH_BINARY, 11, 2); - //Print the adaptive method - //std::cout << "[UVDARLedDetectAdaptive]: ADAPTIVE METHOD: " << adaptive_method_ << std::endl; cv::Mat binaryRoiOriginal; if( adaptive_method_ == "Otsu" || adaptive_method_ == "otsu"){ //Apply Otsu's thresholding with the enhanced ROI @@ -248,14 +203,13 @@ std::vector UVDARLedDetectAdaptive::applyAdaptiveThreshold(const cv:: else{ //std::cout << "[UVDARLedDetectAdaptive]: APPLYING KL DIVERGENCE" << std::endl; auto [thresholdValue, minKLDivergence] = findOptimalThresholdUsingKL(enhancedImage); - //Using entropy - //auto [thresholdValue, minKLDivergence] = findOptimalThresholdUsingEntropy(enhancedImage); thresholdValue_ = thresholdValue; minKLDivergence_ = minKLDivergence; cv::threshold(enhancedImage,binaryRoi, thresholdValue_, 255, cv::THRESH_BINARY); } + //Save the original and enhanced images /* cv::imwrite(outputDir + "/im_" + std::to_string(index) + ".png", grayImage); */ index++; @@ -342,11 +296,9 @@ std::vector UVDARLedDetectAdaptive::applyAdaptiveThreshold(const cv:: else{ klDivergence.push_back(minKLDivergence_); } - //klDivergence.push_back(minKLDivergence); validRoi.push_back(0); //Return empty roiDetectedPoints - std::vector empty_roiDetectedPoints = {}; return empty_roiDetectedPoints; //Clear the lastProcessedROIs_ and lastProcessedBinaryROIs_ vectors @@ -357,8 +309,6 @@ std::vector UVDARLedDetectAdaptive::applyAdaptiveThreshold(const cv:: lastProcessedROIs_.push_back(roi); // Store the ROI for visualization // Store the binary ROI (For debugging/visualization) lastProcessedBinaryROIs_.push_back(binaryRoi); - //saveRoiImage(binaryRoi, point, roiIndex_++, thresholdValue_, 1.0); - //std::cout << "[UVDARLedDetectAdaptive]: ADDING ROI DETECTED POINTS: " << roiDetectedPoints.size() << std::endl; numberDetectedPoints.push_back(roiDetectedPoints.size()); @@ -404,9 +354,6 @@ std::vector UVDARLedDetectAdaptive::mergeOverlappingROIs(const std::ve for (int j = i + 1; j < rois.size(); j++) { if (merged[j]) continue; if (isOverlapping(currentROI, rois[j], overlapThreshold)) { - //Print debug message - //std::cout << "[UVDARLedDetectAdaptive]: MERGING OVERLAPPING ROIS" << std::endl; - // Collect the corners of both rectangles std::vector corners; corners.push_back(cv::Point(currentROI.x, currentROI.y)); corners.push_back(cv::Point(currentROI.x + currentROI.width, currentROI.y + currentROI.height)); @@ -449,8 +396,6 @@ int UVDARLedDetectAdaptive::adjustNeighborhoodSizeBasedOnArea(const cv::Mat& roi // Print the area ratio std::cout << "[UVDARLedDetectAdaptive]: AREA RATIO: " << areaRatio << std::endl; - // Revised adjustment calculation - // Example adjustment: scale currentSize based on deviation from a desired area ratio const double targetAreaRatioSparse = 0.1, targetAreaRatioDense = 0.5; double adjustmentFactor = 0.5; // Proportion of currentSize to adjust by, needs tuning @@ -486,9 +431,6 @@ std::vector UVDARLedDetectAdaptive::detectPointsFromRoi(const cv::Mat cv::Mat labels; int numComponents = cv::connectedComponents(mask, labels, 8, CV_32S); - - //printf("Number of components: %d\n", numComponents); - // Iterate through components to calculate centroid for (int i = 1; i < numComponents; i++) { // Start from 1 to ignore the background cv::Mat componentMask = (labels == i); @@ -557,8 +499,6 @@ std::vector UVDARLedDetectAdaptive::mergePoints(const std::vector UVDARLedDetectAdaptive::mergePoints(const std::vector 0 && overallHist[i] > 0) { - klDivergence += overallHist[i] * log((overallHist[i] + epsilon) / (segmentHist[i] + epsilon)); - } - } - return klDivergence; */ - } @@ -630,16 +560,11 @@ std::tuple UVDARLedDetectAdaptive::findOptimalThresholdUsingKL(cons * optimalThreshold: The optimal threshold */ - //Print shape of roiImage - //std::cout << "[UVDARLedDetectAdaptive]: ROI IMAGE SHAPE: " << roiImage.size() << std::endl; - // Assuming 'roiImage' is already in a binary form (0s and 1s) + int countZeros = cv::countNonZero(roiImage); int totalPixels = roiImage.total(); int countOnes = totalPixels - countZeros; - //Print the count of zeros and ones - //std::cout << "[UVDARLedDetectAdaptive]: COUNT OF ZEROS: " << countZeros << std::endl; - //std::cout << "[UVDARLedDetectAdaptive]: COUNT OF ONES: " << countOnes << std::endl; // Calculate the histogram of the ROI image @@ -656,7 +581,6 @@ std::tuple UVDARLedDetectAdaptive::findOptimalThresholdUsingKL(cons int optimalThreshold = 0; - // Convert cv::Mat hist to std::vector for easier manipulation std::vector Q(histSize); for (int i = 0; i < histSize; ++i) { Q[i] = hist.at(i); @@ -684,26 +608,11 @@ std::tuple UVDARLedDetectAdaptive::findOptimalThresholdUsingKL(cons double klDivBelow = calculateKLDivergence(normalizedP_below, Q, t + 1); double klDivAbove = calculateKLDivergence(normalizedP_above, Q, histSize - t - 1); - /* std::cout << "Threshold: " << t << std::endl; - std::cout << "Sum Below: " << sumBelow << ", Sum Above: " << sumAbove << std::endl; - std::cout << "Normalized P_below: "; - for (double p : normalizedP_below) std::cout << p << " "; - std::cout << std::endl; - - std::cout << "Normalized P_above: "; - for (double p : normalizedP_above) std::cout << p << " "; - std::cout << std::endl; - - //Print the KL divergence - std::cout << "[UVDARLedDetectAdaptive]: KL DIVERGENCE BELOW: " << klDivBelow << std::endl; - std::cout << "[UVDARLedDetectAdaptive]: KL DIVERGENCE ABOVE: " << klDivAbove << std::endl; */ - + double totalKLDiv = klDivBelow + klDivAbove; if (totalKLDiv < minKLDivergence && totalKLDiv > 0.0) { - //Print found better threshold - //std::cout << "[UVDARLedDetectAdaptive]: FOUND BETTER THRESHOLD: " << t << std::endl; - //std::cout << "[UVDARLedDetectAdaptive]: MIN KL DIVERGENCE: " << totalKLDiv << std::endl; + minKLDivergence = totalKLDiv; optimalThreshold = t; @@ -712,10 +621,6 @@ std::tuple UVDARLedDetectAdaptive::findOptimalThresholdUsingKL(cons } - //Print the minKLDivergence and optimalThreshold - //std::cout << "[UVDARLedDetectAdaptive]: MIN KL DIVERGENCE: " << minKLDivergence << std::endl; - //std::cout << "[UVDARLedDetectAdaptive]: OPTIMAL THRESHOLD: " << optimalThreshold << std::endl; - return std::make_tuple(optimalThreshold, minKLDivergence); @@ -760,8 +665,6 @@ std::tuple UVDARLedDetectAdaptive::findOptimalThresholdUsingEntropy } } - //std::cout << "[UVDARLedDetectAdaptive]: MAX ENTROPY: " << maxEntropy << std::endl; - //std::cout << "[UVDARLedDetectAdaptive]: OPTIMAL THRESHOLD: " << optimalThreshold << std::endl; return std::make_tuple(optimalThreshold, maxEntropy); } @@ -806,15 +709,7 @@ void UVDARLedDetectAdaptive::generateVisualizationAdaptive(const cv::Mat& inputI binaryRoi.copyTo(visualization_image(roi)); // Optionally draw a rectangle around the ROI - cv::rectangle(visualization_image, roi, cv::Scalar(0, 255, 0)); // Green rectangle - - - // // Draw detected points within this ROI - //for (const auto& point : detectedPoints) { - // cv::circle(visualization_image, point, 3, cv::Scalar(0, 0, 255), -1); // Red circle for each point - //} - - + cv::rectangle(visualization_image, roi, cv::Scalar(0, 255, 0)); // Green rectangle } } diff --git a/src/detector.cpp b/src/detector.cpp index 3b29884..db29231 100644 --- a/src/detector.cpp +++ b/src/detector.cpp @@ -99,8 +99,7 @@ class UVDARDetector : public nodelet::Nodelet{ } //} - /* create subscribers //{ */ - //ros::Subscriber sub_blinkers_seen = nh_.subscribe("pub_blinkers_seen_", 1, &UVDARDetector::blinkersSeenCallback, this); + if(_adaptive_threshold_){ @@ -177,12 +176,6 @@ class UVDARDetector : public nodelet::Nodelet{ detected_points_.push_back(std::vector()); sun_points_.push_back(std::vector()); - //mutex_camera_image_.push_back(std::make_unique()); - - /* image_yet_received_.push_back(false); */ - /* initial_delay_start_.push_back(ros::Time::now()); */ - - /* mutex_camera_image_.push_back(std::make_unique()); */ ROS_INFO("[UVDARDetector]: Initializing FAST-based marker detection..."); uvdf_ = std::make_unique( @@ -205,19 +198,6 @@ class UVDARDetector : public nodelet::Nodelet{ sub_images_.push_back(nh_.subscribe(_camera_topics[i], 1, cals_image_[i])); } - - - /* - for (size_t i = 0; i < _camera_topics.size(); ++i) { - sub_blinkers_seen_.push_back(nh_.subscribe(_camera_topics[i], 1, cals_blinkers_seen_[i])); - } - */ - - //ros::Subscriber sub_blinkers_seen = nh_.subscribe(pub_blinkers_seen_topic, 1, &UVDARDetector::blinkersSeenCallback, this); - - //TODO ASK ABOUT THIS _camera_topics load param - //} - /* create pubslishers //{ */ param_loader.loadParam("publish_sun_points", _publish_sun_points_, bool(false)); @@ -358,7 +338,6 @@ class UVDARDetector : public nodelet::Nodelet{ ros::NodeHandle nh("~"); timer_process_tracking_[image_index] = nh.createTimer(ros::Duration(0), boost::bind(&UVDARDetector::processTrackingPoints, this, _1, points_msg, image_index), true, true); - //ROS_INFO_STREAM("[UVDARDetector]: Received tracking points from camera " << image_index); } //} @@ -427,11 +406,6 @@ class UVDARDetector : public nodelet::Nodelet{ ROS_ERROR_STREAM("There are " << sun_points_[image_index].size() << " detected potential sun points! Check your exposure!"); } - /* //Print the standard points - for (int i = 0; i < detected_points_[image_index].size(); i++) { - ROS_INFO_STREAM("[UVDARDetector]: Standard detected point " << i << ": " << detected_points_[image_index][i]); - } */ - } //} @@ -460,10 +434,7 @@ class UVDARDetector : public nodelet::Nodelet{ return; } - /* //Print the adaptive points - for (int i = 0; i < static_cast(adaptive_detected_points_[image_index].size()); i++) { - ROS_INFO_STREAM("[UVDARDetector]: Adaptive detected point " << i << ": " << adaptive_detected_points_[image_index][i]); - } */ + } //} @@ -516,10 +487,6 @@ class UVDARDetector : public nodelet::Nodelet{ ROS_INFO_STREAM("[UVDARDetector]: No detected points. In camera: " << image_index); } - /* // Print the points - for (size_t i = 0; i < detected_points.size(); i++) { - ROS_INFO_STREAM("[UVDARDetector]: Detected point " << i << ": " << detected_points[i]); - } */ uvdar_core::ImagePointsWithFloatStamped msg_detected; msg_detected.stamp = image->header.stamp; @@ -574,10 +541,6 @@ class UVDARDetector : public nodelet::Nodelet{ ROS_INFO_STREAM("[UVDARDetector]: Tracking points per camera: " << trackingPointsPerCamera[image_index].size()); received_tracking_points_ = true; - /* //Print the points - for (int i = 0; i < static_cast(trackingPointsPerCamera[image_index].size()); i++) { - ROS_INFO_STREAM("[UVDARDetector]: Tracking point " << i << ": " << trackingPointsPerCamera[image_index][i]); - } */ { std::scoped_lock lock(mutex_camera_image_); @@ -625,14 +588,11 @@ class UVDARDetector : public nodelet::Nodelet{ processStandard(image, image_index); - // ROS_INFO_STREAM("Cam" << image_index << ". There are " << detected_points_[image_index].size() << " detected points."); - // ROS_INFO_STREAM("Cam" << image_index << ". There are " << sun_points_[image_index].size() << " detected sun points."); + } } - //ROS_INFO_STREAM("Cam" << image_index << ". There are " << detected_points_[image_index].size() << " detected points."); - //ROS_INFO_STREAM("Cam" << image_index << ". There are " << adaptive_detected_points_[image_index].size() << " detected adaptive points."); if (detected_points_[image_index].size()>MAX_POINTS_PER_IMAGE){ ROS_WARN_STREAM("[UVDARDetector]: Over " << MAX_POINTS_PER_IMAGE << " points received. Skipping noisy image."); @@ -720,21 +680,6 @@ class UVDARDetector : public nodelet::Nodelet{ cv::cvtColor(images_current_[image_index], image_rgb, cv::COLOR_GRAY2BGR); image_rgb.copyTo(output_image(cv::Rect(start_point.x,0,images_current_[image_index].cols,images_current_[image_index].rows))); - /* - - // Overlay binary ROIs - const auto& binaryROIs = uvda_[image_index]->getLastProcessedBinaryROIs(); - const auto& rois = uvda_[image_index]->getLastProcessedROIs(); - - for (size_t i = 0; i < binaryROIs.size(); ++i) { - const auto& binaryRoi = binaryROIs[i]; - const auto& roi = rois[i]; - binaryRoi.copyTo(output_image(roi)); // Overlay binary ROI - } - - */ - - for (int j = 0; j < (int)(detected_points_[image_index].size()); j++) { cv::circle(output_image, detected_points_[image_index][j]+start_point, 5, cv::Scalar(255,0,0)); diff --git a/thesis_attachments.zip b/thesis_attachments.zip new file mode 100644 index 0000000..2834a73 Binary files /dev/null and b/thesis_attachments.zip differ diff --git a/thesis_attachments/detector.cpp b/thesis_attachments/detector.cpp new file mode 100644 index 0000000..1c1b509 --- /dev/null +++ b/thesis_attachments/detector.cpp @@ -0,0 +1,792 @@ +#define camera_delay 0.50 +#define MAX_POINTS_PER_IMAGE 100 + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* #include */ +#include + +#include "detect/uv_led_detect_fast_cpu.h" +#include "detect/uv_led_detect_fast_gpu.h" + +#include "detect/uv_led_detect_adaptive.h" + +#include +#include +#include +#include + + + +namespace enc = sensor_msgs::image_encodings; + +namespace uvdar { +class UVDARDetector : public nodelet::Nodelet{ +public: + + /* onInit() //{ */ + void onInit() { + /** + * @brief Initializer - loads parameters and initializes necessary structures + */ + ros::NodeHandle nh_ = nodelet::Nodelet::getMTPrivateNodeHandle(); + + mrs_lib::ParamLoader param_loader(nh_, "UVDARDetector"); + + param_loader.loadParam("uav_name", _uav_name_); + + param_loader.loadParam("debug", _debug_, bool(false)); + param_loader.loadParam("gui", _gui_, bool(false)); + param_loader.loadParam("publish_visualization", _publish_visualization_, bool(false)); + + param_loader.loadParam("threshold", _threshold_, 200); + param_loader.loadParam("adaptive_threshold",_adaptive_threshold_,bool(false)); + param_loader.loadParam("adaptive_method",_adaptive_method_,std::string("Otsu")); + param_loader.loadParam("adaptive_debug",_adaptive_debug_,bool(false)); + + + //Print adaptive threshold + if(_adaptive_threshold_){ + ROS_INFO_STREAM("[UVDARDetector]: Adaptive thresholding enabled."); + + //Print selected adaptive method + ROS_INFO_STREAM("[UVDARDetector]: Adaptive method: " << _adaptive_method_); + } + else{ + ROS_INFO_STREAM("[UVDARDetector]: Adaptive thresholding disabled."); + } + + param_loader.loadParam("initial_delay", _initial_delay_, 5.0); + + /* subscribe to cameras //{ */ + std::vector _camera_topics; + param_loader.loadParam("camera_topics", _camera_topics, _camera_topics); + if (_camera_topics.empty()) { + ROS_ERROR("[UVDARDetector]: No camera topics were supplied!"); + return; + } + _camera_count_ = (unsigned int)(_camera_topics.size()); + + ROS_INFO_STREAM("[UVDARDetector]: Camera topics: " << _camera_topics.size()); + //} + + /* prepare masks if necessary //{ */ + param_loader.loadParam("use_masks", _use_masks_, bool(false)); + if (_use_masks_){ + param_loader.loadParam("mask_file_names", _mask_file_names_, _mask_file_names_); + + if (_mask_file_names_.size() != _camera_count_){ + ROS_ERROR_STREAM("[UVDARDetector]: Masks are enabled, but the number of mask filenames provided does not match the number of camera topics (" << _camera_count_ << ")!"); + return; + } + + if (!loadMasks()){ + ROS_ERROR("[UVDARDetector]: Masks are enabled, but the mask files could not be loaded!"); + return; + } + } + //} + + + + + if(_adaptive_threshold_){ + + // Load blinkers seen topics + std::vector _blinkers_seen_topics; + param_loader.loadParam("blinkers_seen_topics", _blinkers_seen_topics, _blinkers_seen_topics); + + + if (_blinkers_seen_topics.empty()) { + ROS_ERROR("[UVDARDetector]: No blinkers seen topics were supplied!"); + return; + } else { + ROS_INFO_STREAM("[UVDARDetector]: Blinkers seen topics: " << _blinkers_seen_topics.size()); + } + + + ROS_INFO_STREAM("[UVDARDetector]: Initializing blinkers seen topic callbacks and objects..."); + + // Create callbacks, timers and process objects for each camera + + for (unsigned int i = 0; i < _blinkers_seen_topics.size(); ++i) { + blinkers_seen_callback_t callback = [image_index=i, this] (const uvdar_core::ImagePointsWithFloatStampedConstPtr& points_msg) { + blinkersSeenCallback(points_msg, image_index); + }; + + ROS_INFO_STREAM("[UVDARDetector]: Initializing blinkers seen topic callback for camera " << i << "..."); + cals_blinkers_seen_.push_back(callback); + timer_process_tracking_.push_back(ros::Timer()); + + //trackingPointsPerCamera.resize(_camera_count_); + trackingPointsPerCamera.push_back(std::vector()); + adaptive_detected_points_.push_back(std::vector()); + + ROS_INFO("[UVDARDetector]: Initializing ADAPTIVE-based marker detection..."); + uvda_.push_back(std::make_unique( + 20, + 5.0, + _adaptive_method_, + _adaptive_debug_ + )); + if (!uvda_.back()){ + ROS_ERROR("[UVDARDetector]: Failed to initialize ADAPTIVE-based marker detection!"); + return; + } + + } + + // Subscribe to blinkers seen topics + for (size_t i = 0; i < _blinkers_seen_topics.size(); ++i) { + sub_blinkers_seen_.push_back(nh_.subscribe(_blinkers_seen_topics[i], 1, cals_blinkers_seen_[i])); + } + + } + + + + ROS_INFO_STREAM("[UVDARDetector]: Initializing " << _camera_count_ << " cameras..."); + // Create callbacks, timers and process objects for each camera + for (unsigned int i = 0; i < _camera_count_; ++i) { + image_callback_t callback = [image_index=i,this] (const sensor_msgs::ImageConstPtr& image_msg) { + callbackImage(image_msg, image_index); + }; + cals_image_.push_back(callback); + + timer_process_.push_back(ros::Timer()); + + camera_image_sizes_.push_back(cv::Size(0,0)); + + images_current_.push_back(cv::Mat()); + + image_visualization_ = cv::Mat(); + + detected_points_.push_back(std::vector()); + sun_points_.push_back(std::vector()); + + + ROS_INFO("[UVDARDetector]: Initializing FAST-based marker detection..."); + uvdf_ = std::make_unique( + _gui_, + _debug_, + _threshold_, + _threshold_ / 2, + 150, + _masks_ + ); + if (!uvdf_){ + ROS_ERROR("[UVDARDetector]: Failed to initialize FAST-based marker detection!"); + return; + } + + } + + // Subscribe to corresponding topics + for (size_t i = 0; i < _camera_topics.size(); ++i) { + sub_images_.push_back(nh_.subscribe(_camera_topics[i], 1, cals_image_[i])); + } + + + /* create pubslishers //{ */ + param_loader.loadParam("publish_sun_points", _publish_sun_points_, bool(false)); + + std::vector _points_seen_topics; + param_loader.loadParam("points_seen_topics", _points_seen_topics, _points_seen_topics); + if (_points_seen_topics.size() != _camera_count_) { + ROS_ERROR_STREAM("[UVDARDetector] The number of output topics (" << _points_seen_topics.size() << ") does not match the number of cameras (" << _camera_count_ << ")!"); + return; + } + + // Create the publishers + for (size_t i = 0; i < _points_seen_topics.size(); ++i) { + pub_candidate_points_.push_back(nh_.advertise(_points_seen_topics[i], 1)); + + if (_publish_sun_points_){ + pub_sun_points_.push_back(nh_.advertise(_points_seen_topics[i]+"/sun", 1)); + } + } + + // Create a publisher for the visualization images + image_pub_ = nh_.advertise("visualization_image", 1); + + if (_publish_visualization_){ + ROS_INFO_STREAM("[UVDARDetector]: Publishing visualization."); + pub_visualization_ = std::make_unique(boost::make_shared(nh_)); + + } + + if(_adaptive_threshold_){ + + // Create publishers from adaptive logging topics + std::vector _adaptive_logging_topics; + + param_loader.loadParam("adaptive_logging_topics", _adaptive_logging_topics, _adaptive_logging_topics); + + if (_adaptive_logging_topics.size() != _camera_count_) { + ROS_ERROR_STREAM("[UVDARDetector] The number of adaptive logging topics (" << _adaptive_logging_topics.size() << ") does not match the number of cameras (" << _camera_count_ << ")!"); + return; + } + + for (size_t i = 0; i < _adaptive_logging_topics.size(); ++i) { + pub_adaptive_logging_.push_back(nh_.advertise(_adaptive_logging_topics[i], 1)); + ROI_data.push_back(ROIData()); + } + } + + + //} + + if (_gui_ || _publish_visualization_){ + timer_visualization_ = nh_.createTimer(ros::Duration(0.1), &UVDARDetector::VisualizationThread, this, false); + } + + + + ROS_INFO("[UVDARDetector]: Waiting for time..."); + ros::Time::waitForValid(); + + initialized_ = true; + ROS_INFO("[UVDARDetector]: Initialized."); + } + //} + + /* destructor //{ */ + /** + * @brief destructor + */ + ~UVDARDetector() { + } + //} + +private: + + /* loadMasks //{ */ + /** + * @brief Load the mask files - either form absolute path or composite filename found in the mrs_uav_general package. + * + * @return success + */ + bool loadMasks(){ + std::string file_name; + for (unsigned int i=0; i<_camera_count_; i++){ + + file_name = _mask_file_names_[i]; + + ROS_INFO_STREAM("[UVDARDetector]: Loading mask file [" << file_name << "]"); + if (!(boost::filesystem::exists(file_name))){ + ROS_ERROR_STREAM("[UVDARDetector]: Mask [" << file_name << "] does not exist!"); + return false; + } + + _masks_.push_back(cv::imread(file_name, cv::IMREAD_GRAYSCALE)); + if (!(_masks_.back().data)){ + ROS_ERROR_STREAM("[UVDARDetector]: Mask [" << file_name << "] could not be loaded!"); + return false; + } + + } + return true; + } + //} + + /* callbackImage //{ */ + /** + * @brief Callback for the input raw image topic from camera + * + * @param image_msg - current image message + * @param image_index - index of the camera that produced this image message + */ + void callbackImage(const sensor_msgs::ImageConstPtr& image_msg, int image_index) { + cv_bridge::CvImageConstPtr image; + image = cv_bridge::toCvShare(image_msg, enc::MONO8); + ros::NodeHandle nh("~"); + timer_process_[image_index] = nh.createTimer(ros::Duration(0), boost::bind(&UVDARDetector::processSingleImage, this, _1, image, image_index), true, true); + camera_image_sizes_[image_index] = image->image.size(); + + //ROS_INFO_STREAM("[UVDARDetector]: Received image from camera " << image_index << " with size " << image->image.cols << "x" << image->image.rows << " and encoding " << image->encoding); + + if (!all_cameras_detected_){ + unsigned int i = 0; + for (auto sz : camera_image_sizes_){ + if ( (sz.width > 0) && (sz.height > 0) ){ + i++; + } + } + + if ( i == _camera_count_){ + all_cameras_detected_ = true; + } + } + } + //} + + /* blinkersSeenCallback //{ */ + void blinkersSeenCallback(const uvdar_core::ImagePointsWithFloatStampedConstPtr& points_msg,int image_index) { + + ros::NodeHandle nh("~"); + timer_process_tracking_[image_index] = nh.createTimer(ros::Duration(0), boost::bind(&UVDARDetector::processTrackingPoints, this, _1, points_msg, image_index), true, true); + + + } + //} + + /* processTrackingPoints //{ */ + void processTrackingPoints([[maybe_unused]]const ros::TimerEvent& te, const uvdar_core::ImagePointsWithFloatStampedConstPtr& msg, int image_index) { + + trackingPointsPerCamera[image_index].clear(); + + for (const auto& point : msg->points) { + //cv::Point cvPoint(static_cast(point.x), static_cast(point.y)); + //Without casting + cv::Point cvPoint(point.x, point.y); + trackingPointsPerCamera[image_index].push_back(cvPoint); + } + + ROS_INFO_STREAM("[UVDARDetector]: Camera " << image_index << " Tracking points: " << trackingPointsPerCamera[image_index].size()); + + } + //} + + /* publishVisualizationImage //{ */ + void publishVisualizationImage(const cv::Mat& visualization_image) { + + if (!visualization_image.empty()) { + // Convert OpenCV image to ROS message + cv_bridge::CvImage cv_image; + cv_image.image = visualization_image; + cv_image.encoding = "mono8"; // image is grayscale + sensor_msgs::Image ros_image; + cv_image.toImageMsg(ros_image); + + // Publish the image + image_pub_.publish(ros_image); + } + } + //} + + + /* processStandard //{ */ + void processStandard(const cv_bridge::CvImageConstPtr& image, int image_index){ + /** + * @brief Extracts small bright points from input image + * + * @param image - the input image + * @param image_index - index of the camera that produced this image + * + * @return success + * + * + */ + + if ( ! (uvdf_->processImage( + image->image, + detected_points_[image_index], + sun_points_[image_index], + _use_masks_?image_index:-1 + ) + ) + ){ + ROS_ERROR_STREAM("Failed to extract markers from the image!"); + return; + } + + if(sun_points_[image_index].size() > 30){ + ROS_ERROR_STREAM("There are " << sun_points_[image_index].size() << " detected potential sun points! Check your exposure!"); + } + + } + //} + + /* processAdaptive //{ */ + void processAdaptive(const cv_bridge::CvImageConstPtr& image, int image_index, const std::vector& trackingPoints){ + /** + * @brief Extracts adaptively small bright points from input image + * + * @param image - the input image + * @param image_index - index of the camera that produced this image + * @param trackingPoints - the tracking points for the camera + * + * @return success + */ + + + if( ! (uvda_[image_index]->processImageAdaptive( + image->image, + trackingPoints, + adaptive_detected_points_[image_index], + detected_points_[image_index] + ) + ) + ){ + ROS_ERROR_STREAM("Failed to extract markers adaptively from the image!"); + return; + } + + + + } + //} + + /* publishAdaptive //{ */ + void publishAdaptive(const cv_bridge::CvImageConstPtr& image, int image_index, const std::vector& adaptive_detected_points) { + + ROS_INFO_STREAM("[UVDARDetector]: Publishing adaptive points. In camera: " << image_index); + + if (!adaptive_detected_points.empty()) { + //Current camera + ROS_INFO_STREAM("[UVDARDetector]: Detected adaptive points in camera " << image_index << ": " << adaptive_detected_points.size()); + } else { + ROS_INFO_STREAM("[UVDARDetector]: No detected adaptive points. In camera: " << image_index); + } + uvdar_core::ImagePointsWithFloatStamped msg_detected; + msg_detected.stamp = image->header.stamp; + msg_detected.image_width = image->image.cols; + msg_detected.image_height = image->image.rows; + for (auto& detected_point : adaptive_detected_points) { + uvdar_core::Point2DWithFloat point; + point.x = detected_point.x; + point.y = detected_point.y; + msg_detected.points.push_back(point); + } + pub_candidate_points_[image_index].publish(msg_detected); + + ROIData adaptiveData = uvda_[image_index]->prepareAdaptiveDataForLogging(); + uvdar_core::AdaptiveDataForLogging msg_adaptive; + msg_adaptive.stamp = image->header.stamp; + msg_adaptive.num_rois = adaptiveData.numRois; + msg_adaptive.roi_detected_points = adaptiveData.numberDetectedPoints; + msg_adaptive.roi_threshold_used = adaptiveData.thresholdValue; + msg_adaptive.roi_kl_divergence = adaptiveData.klDivergence; + msg_adaptive.roi_is_valid = adaptiveData.validRoi; + + pub_adaptive_logging_[image_index].publish(msg_adaptive); + } + //} + + + /* publishStandard //{ */ + void publishStandard(const cv_bridge::CvImageConstPtr& image, int image_index, const std::vector& detected_points) { + + ROS_INFO_STREAM("[UVDARDetector]: Detected points in camera " << image_index << ": " << detected_points.size()); + + if (!detected_points.empty()) { + ROS_INFO_STREAM("[UVDARDetector]: Detected points: " << detected_points.size()); + } else { + ROS_INFO_STREAM("[UVDARDetector]: No detected points. In camera: " << image_index); + } + + + uvdar_core::ImagePointsWithFloatStamped msg_detected; + msg_detected.stamp = image->header.stamp; + msg_detected.image_width = image->image.cols; + msg_detected.image_height = image->image.rows; + for (auto& detected_point : detected_points) { + uvdar_core::Point2DWithFloat point; + point.x = detected_point.x; + point.y = detected_point.y; + msg_detected.points.push_back(point); + } + pub_candidate_points_[image_index].publish(msg_detected); + } + //} + + /* processSingleImage //{ */ + + /** + * @brief Extracts small bright points from input image and publishes them. Optionally also publishes points corresponding to the sun. + * + * @param te - timer event - necessary for use of this method as a timer callback + * @param image - the input image + * @param image_index - index of the camera that produced this image + */ + + void processSingleImage([[maybe_unused]] const ros::TimerEvent& te, const cv_bridge::CvImageConstPtr image, int image_index) { + + if (!all_cameras_detected_){ + ROS_WARN_STREAM_THROTTLE(1.0, "[UVDARDetector]: Not all cameras have produced input, waiting..."); + return; + } + + if (!initial_delay_started_){ + initial_delay_start_ = ros::Time::now(); + initial_delay_started_ = true; + } + + /* double initial_delay = 5.0; //seconds. This delay is necessary to avoid strange segmentation faults with software rendering backend for OpenGL used in the buildfarm testing. */ + if ((ros::Time::now() - initial_delay_start_).toSec() < _initial_delay_){ + ROS_WARN_STREAM_THROTTLE(1.0, "[UVDARDetector]: Ignoring message for "<< _initial_delay_ <<"s..."); + return; + } + + + if (!initialized_){ + ROS_WARN_STREAM_THROTTLE(1.0,"[UVDARDetector]: Not yet initialized, dropping message..."); + return; + } + + if( _adaptive_threshold_ && trackingPointsPerCamera[image_index].size() > 0){ + + ROS_INFO_STREAM("[UVDARDetector]: Tracking points per camera: " << trackingPointsPerCamera[image_index].size()); + received_tracking_points_ = true; + + + { + std::scoped_lock lock(mutex_camera_image_); + images_current_[image_index] = image->image; + sun_points_[image_index].clear(); + detected_points_[image_index].clear(); + adaptive_detected_points_[image_index].clear(); + + //TODO: Check if its worth it, this to be able to detect new points that where not currently detected + ROS_INFO_STREAM("[UVDARDetector]: Processing image with standard thresholding. In camera: " << image_index); + + processStandard(image, image_index); + + //ROS_INFO_STREAM("[UVDARDetector]: Processing image with tracking points only."); + ROS_INFO_STREAM("[UVDARDetector]: Processing image with adaptive thresholding. In camera: " << image_index); + //Print number of points given + ROS_INFO_STREAM("[UVDARDetector]: Tracking points provided: " << trackingPointsPerCamera[image_index].size()); + + processAdaptive(image, image_index, trackingPointsPerCamera[image_index]); + + } + + } + else{ + + ROS_INFO_STREAM("[UVDARDetector]: No tracking points for camera " << image_index); + + /* ROS_INFO_STREAM("[UVDARDetector]: Locking cam image mutex " << image_index << "..."); */ + + { + /* std::scoped_lock lock(*mutex_camera_image_[image_index]); */ + std::scoped_lock lock(mutex_camera_image_); + + if (!uvdf_was_initialized_){ + if (!uvdf_->initDelayed(image->image)){ + ROS_WARN_STREAM_THROTTLE(1.0,"[UVDARDetector]: Failed to initialize, dropping message..."); + return; + } + uvdf_was_initialized_ = true; + } + + images_current_[image_index] = image->image; + sun_points_[image_index].clear(); + detected_points_[image_index].clear(); + + processStandard(image, image_index); + + + } + + } + + + if (detected_points_[image_index].size()>MAX_POINTS_PER_IMAGE){ + ROS_WARN_STREAM("[UVDARDetector]: Over " << MAX_POINTS_PER_IMAGE << " points received. Skipping noisy image."); + return; + } + + { + std::scoped_lock lock(mutex_pub_); + if (_publish_sun_points_){ + uvdar_core::ImagePointsWithFloatStamped msg_sun; + msg_sun.stamp = image->header.stamp; + msg_sun.image_width = image->image.cols; + msg_sun.image_height = image->image.rows; + + for (auto& sun_point : sun_points_[image_index]) { + uvdar_core::Point2DWithFloat point; + point.x = sun_point.x; + point.y = sun_point.y; + msg_sun.points.push_back(point); + } + pub_sun_points_[image_index].publish(msg_sun); + } + + if (_adaptive_threshold_ && adaptive_detected_points_[image_index].size() > 0){ + + publishAdaptive(image, image_index, adaptive_detected_points_[image_index]); + } + else{ + publishStandard(image, image_index, detected_points_[image_index]); + } + + } + + } + //} + + /* VisualizationThread() //{ */ + void VisualizationThread([[maybe_unused]] const ros::TimerEvent& te) { + if (initialized_){ + cv::Mat visualization_image; + if(_adaptive_threshold_){ + int image_index = 0; + cv::Mat white_background = cv::Mat::ones(images_current_[image_index].size(),images_current_[image_index].type()) * 255; + uvda_[image_index]->generateVisualizationAdaptive(white_background,visualization_image,adaptive_detected_points_[image_index]); + //publishVisualizationImage(visualization_image); + } + //generateVisualization(image_visualization_); + + if ((visualization_image.cols != 0) && (visualization_image.rows != 0)){ + if (_publish_visualization_){ + ROS_INFO_STREAM("[UVDARDetector]: Publishing visualization."); + pub_visualization_->publish("uvdar_detection_visualization", 0.01, visualization_image, true); + } + if (_gui_){ + cv::imshow("ocv_uvdar_detection_" + _uav_name_, visualization_image); + cv::waitKey(25); + } + } + } + } + //} + + /* generateVisualization //{ */ + int generateVisualization(cv::Mat& output_image) { + int max_image_height = 0; + int sum_image_width = 0; + std::vector start_widths; + for (auto curr_size : camera_image_sizes_){ + if (max_image_height < curr_size.height){ + max_image_height = curr_size.height; + } + start_widths.push_back(sum_image_width); + sum_image_width += curr_size.width; + } + + output_image = cv::Mat(cv::Size(sum_image_width+((int)(camera_image_sizes_.size())-1), max_image_height),CV_8UC3); + output_image = cv::Scalar(255, 255, 255); + + int image_index = 0; + for ([[maybe_unused]] auto curr_size : camera_image_sizes_){ + /* std::scoped_lock lock(*(mutex_camera_image_[image_index])); */ + std::scoped_lock lock(mutex_camera_image_); + cv::Point start_point = cv::Point(start_widths[image_index]+image_index, 0); + cv::Mat image_rgb; + cv::cvtColor(images_current_[image_index], image_rgb, cv::COLOR_GRAY2BGR); + image_rgb.copyTo(output_image(cv::Rect(start_point.x,0,images_current_[image_index].cols,images_current_[image_index].rows))); + + + for (int j = 0; j < (int)(detected_points_[image_index].size()); j++) { + cv::circle(output_image, detected_points_[image_index][j]+start_point, 5, cv::Scalar(255,0,0)); + } + for (int j = 0; j < (int)(sun_points_[image_index].size()); j++) { + cv::circle(output_image, sun_points_[image_index][j]+start_point, 10, cv::Scalar(0,0,255)); + } + + image_index++; + } + + if ( (output_image.cols == 0) || (output_image.rows == 0) ){ + return -1; + } + else { + return 0; + } + } + //} + + +private: + std::string _uav_name_; + bool initialized_ = false; + + std::vector sub_images_; + std::vector sub_blinkers_seen_; + + unsigned int _camera_count_; + using image_callback_t = boost::function; + std::vector cals_image_; + + + using blinkers_seen_callback_t = boost::function; + std::vector cals_blinkers_seen_; + + + + + bool _publish_sun_points_ = false; + + std::vector pub_sun_points_; + std::vector pub_candidate_points_; + std::vector pub_adaptive_logging_; + + ros::Publisher image_pub_; + + + bool _debug_; + + std::vector images_current_; + std::vector> detected_points_; + std::vector> sun_points_; + + bool _gui_; + bool _publish_visualization_; + std::unique_ptr pub_visualization_; + /* std::vector> mutex_camera_image_; */ + std::mutex mutex_camera_image_; + ros::Timer timer_visualization_; + ros::Timer timer_gui_visualization_; + ros::Timer timer_publish_visualization_; + cv::Mat image_visualization_; + std::mutex mutex_visualization_; + + std::vector camera_image_sizes_; + + bool all_cameras_detected_ = false; + + int _threshold_; + bool _adaptive_threshold_; + std::string _adaptive_method_; + bool _adaptive_debug_; + + double _initial_delay_ = 5.0; + + bool _use_masks_; + std::vector _mask_file_names_; + std::vector _masks_; + + /* std::vector> uvdf_; */ + std::unique_ptr uvdf_; + std::mutex mutex_pub_; + std::vector timer_process_; + + bool uvdf_was_initialized_ = false; + bool initial_delay_started_ = false; + ros::Time initial_delay_start_; + + + + std::vector timer_process_tracking_; + + + std::vector trackingPoints; + std::vector> trackingPointsPerCamera; + std::vector> uvda_; + std::vector> adaptive_detected_points_; + std::vector> combinedPoints_; + + bool received_tracking_points_ = false; + + std::vector ROI_data; +}; + + +} //namespace uvdar + +#include +PLUGINLIB_EXPORT_CLASS(uvdar::UVDARDetector, nodelet::Nodelet) diff --git a/thesis_attachments/uv_led_detect_adaptive.cpp b/thesis_attachments/uv_led_detect_adaptive.cpp new file mode 100644 index 0000000..e2ff1ea --- /dev/null +++ b/thesis_attachments/uv_led_detect_adaptive.cpp @@ -0,0 +1,764 @@ +#include "uv_led_detect_adaptive.h" +#include +#include +#include // C++17 and above +#include + +#include +#include +#include // Include for std::fixed and std::setprecision +#include // Include for std::tuple +#include +#include +#include + + + + +namespace fs = std::filesystem; + + +struct PointComparator { + bool operator() (const cv::Point& a, const cv::Point& b) const { + return (a.x < b.x) || (a.x == b.x && a.y < b.y); + } +}; + +namespace uvdar { + +UVDARLedDetectAdaptive::UVDARLedDetectAdaptive(int neighborhoodSize, double point_similarity_threshold, std::string adaptive_method, bool adaptive_debug ) : neighborhoodSize_(neighborhoodSize), +point_similarity_threshold_(point_similarity_threshold), adaptive_method_(adaptive_method), adaptive_debug_(adaptive_debug){} + +UVDARLedDetectAdaptive::~UVDARLedDetectAdaptive() {} + +/* processImageAdaptive //{ */ +bool UVDARLedDetectAdaptive::processImageAdaptive(const cv::Mat& inputImage, const std::vector& trackingPoints, std::vector& detectedPoints, +const std::vector& standardPoints) { + + /** + * @brief: This function processes the input image to detect UV-LEDs using adaptive thresholding + * + * Args: + * inputImage: The input image + * trackingPoints: The points around which the adaptive thresholding is applied + * detectedPoints: The detected points + * standardPoints: The standard points + * + * @returns: + * True if the process is successful, False otherwise + */ + + // Use a set to store unique points + std::set uniquePoints; + + // Reset detected points + detectedPoints.clear(); + lastProcessedBinaryROIs_.clear(); + lastProcessedROIs_.clear(); + + //Reset the number of ROIs + numRois = 0; + numberDetectedPoints.clear(); + thresholdValue.clear(); + klDivergence.clear(); + validRoi.clear(); + + + if (trackingPoints.size() == 0 || trackingPoints.size() > 50) { + std::cout << "[UVDARLedDetectAdaptive]: INVALID NUMBER OF TRACKING POINTS" << std::endl; + return false; + } + + + std::vector rois; + for (const auto& point : trackingPoints) { + cv::Rect roi = calculateROI(inputImage, point, neighborhoodSize_); + rois.push_back(roi); + } + + if (adaptive_debug_){ + //Print the size of the rois + std::cout << "[UVDARLedDetectAdaptive]: SIZE OF ROIS: " << rois.size() << std::endl; + } + + std::vector mergedROIs = mergeOverlappingROIs(rois,inputImage.size(), 0.05); // Overlap threshold of 50% + + if (adaptive_debug_){ + //Print number of merged ROIs + std::cout << "[UVDARLedDetectAdaptive]: NUMBER OF MERGED ROIS: " << mergedROIs.size() << std::endl; + } + + for (const auto& roi : mergedROIs) { + std::vector roiDetectedPoints = applyAdaptiveThreshold(inputImage, roi); + detectedPoints.insert(detectedPoints.end(), roiDetectedPoints.begin(), roiDetectedPoints.end()); + } + + + std::vector final_detected_points = mergePoints(detectedPoints, standardPoints, point_similarity_threshold_); + + detectedPoints = final_detected_points; + + return true; +} +//} + +/* applyAdaptiveThreshold //{ */ +std::vector UVDARLedDetectAdaptive::applyAdaptiveThreshold(const cv::Mat& inputImage, const cv::Rect& roi) { + + /** + * + * @brief: This function applies adaptive thresholding to the neighborhood around a point and detects the points within the thresholded region + * + * Args: + * image: The input image + * point: The point around which the adaptive thresholding is applied + * neighborhoodSize: The size of the neighborhood around the point + * + * @returns: + * roiDetectedPoints: The points detected within the region of interest + */ + + cv::Mat grayImage; + if (inputImage.channels() == 3) { + cv::cvtColor(inputImage(roi), grayImage, cv::COLOR_BGR2GRAY); + } else { + grayImage = inputImage(roi).clone(); + } + + int MAX_AREA = 50; + + + // Compute and plot histograms + cv::Mat histOriginal = plotHistogram(grayImage); + + cv::Mat blurred; + double sigmaX = 6.0; + double sigmaY = 6.0; + cv::GaussianBlur(grayImage, blurred, cv::Size(0, 0), sigmaX, sigmaY); + + // Create unsharp mask by subtracting the blurred version from the original image + cv::Mat unsharpMask = grayImage - blurred; + + // Apply the unsharp mask to the original image to enhance edges + cv::Mat enhancedImage; + cv::addWeighted(grayImage, 0.25, unsharpMask, 1.75, 0, enhancedImage); + + + cv::Mat histEnhanced = plotHistogram(enhancedImage); + + // Specify the output directory + std::string outputDir = "/home/rivermar/Desktop/MRS_Master_Project/roi_images"; + + // Ensure the output directory exists + fs::create_directories(outputDir); + + + cv::Mat binaryRoi; + + cv::Mat binaryRoiOriginal; + if( adaptive_method_ == "Otsu" || adaptive_method_ == "otsu"){ + //Apply Otsu's thresholding with the enhanced ROI + int thresholdValue= cv::threshold(enhancedImage, binaryRoi, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU); // Apply Otsu's thresholding + thresholdValue_ = thresholdValue; + + //Apply Otsu's thresholding with the original ROI + + int thresholdValueOriginal = cv::threshold(grayImage, binaryRoiOriginal, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU); // Apply Otsu's thresholding + } + else{ + //std::cout << "[UVDARLedDetectAdaptive]: APPLYING KL DIVERGENCE" << std::endl; + auto [thresholdValue, minKLDivergence] = findOptimalThresholdUsingKL(enhancedImage); + thresholdValue_ = thresholdValue; + minKLDivergence_ = minKLDivergence; + cv::threshold(enhancedImage,binaryRoi, thresholdValue_, 255, cv::THRESH_BINARY); + } + + + // Find contours in the binary ROI + std::vector> contours; + cv::findContours(binaryRoi, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE); + // Create a mask to draw the filtered contours + cv::Mat mask = cv::Mat::zeros(binaryRoi.size(), CV_8UC1); + + + if (adaptive_debug_){ + //Print the contours size + std::cout << "[UVDARLedDetectAdaptive]: NUMBER OF CONTOURS: " << contours.size() << std::endl; + } + + if (contours.size() >= 4){ + //Return empty roiDetectedPoints + std::vector empty_roiDetectedPoints = {}; + return empty_roiDetectedPoints; + + } + + for (const auto& contour : contours) { + // Calculate the area of the contour + double area = cv::contourArea(contour); + // Filter based on area + if (area < MAX_AREA) { + + if (adaptive_debug_){ + std::cout << "[UVDARLedDetectAdaptive]: IN AREA: " << area << std::endl; + } + // Draw the contour on the mask + cv::drawContours(mask, std::vector>{contour}, -1, cv::Scalar(255), cv::FILLED); + }else{ + if (adaptive_debug_){ + std::cout << "[UVDARLedDetectAdaptive]: OUT OF AREA: " << area << std::endl; + } + } + } + + // Apply the mask to the binary ROI + binaryRoi &= mask; + + // Detect points within this ROI + std::vector roiDetectedPoints = detectPointsFromRoi(binaryRoi, roi); + + if (adaptive_debug_){ + //Print the number of detected points + std::cout << "[UVDARLedDetectAdaptive]: ROI NUMBER OF DETECTED POINTS: " << roiDetectedPoints.size() << std::endl; + } + + if (roiDetectedPoints.size() > 5){ + + if (adaptive_debug_){ + //Print the number of detected points + std::cout << "[UVDARLedDetectAdaptive]: NOISY ROI: " << roiDetectedPoints.size() << std::endl; + } + //saveRoiImage(binaryRoi, point, roiIndex_++, thresholdValue, -1.0); + + numberDetectedPoints.push_back(roiDetectedPoints.size()); + thresholdValue.push_back(thresholdValue_); + if(adaptive_method_ == "Otsu" || adaptive_method_ == "otsu"){ + klDivergence.push_back(0.0); + } + else{ + klDivergence.push_back(minKLDivergence_); + } + validRoi.push_back(0); + + //Return empty roiDetectedPoints + std::vector empty_roiDetectedPoints = {}; + return empty_roiDetectedPoints; + //Clear the lastProcessedROIs_ and lastProcessedBinaryROIs_ vectors + lastProcessedROIs_.clear(); + lastProcessedBinaryROIs_.clear(); + } + else{ + lastProcessedROIs_.push_back(roi); // Store the ROI for visualization + // Store the binary ROI (For debugging/visualization) + lastProcessedBinaryROIs_.push_back(binaryRoi); + + + numberDetectedPoints.push_back(roiDetectedPoints.size()); + thresholdValue.push_back(thresholdValue_); + if(adaptive_method_ == "Otsu" || adaptive_method_ == "otsu"){ + klDivergence.push_back(0.0); + } + else{ + klDivergence.push_back(minKLDivergence_); + } + validRoi.push_back(1); + + return roiDetectedPoints; + } + +} +//} + +cv::Rect UVDARLedDetectAdaptive::adjustROI(const cv::Rect& inputROI, const cv::Size& imageSize) { + int x = std::max(0, inputROI.x); + int y = std::max(0, inputROI.y); + int width = std::min(inputROI.width, imageSize.width - x); + int height = std::min(inputROI.height, imageSize.height - y); + + return cv::Rect(x, y, width, height); +} + +cv::Rect UVDARLedDetectAdaptive::calculateROI(const cv::Mat& image, const cv::Point& point, int neighborhoodSize) { + int x = std::max(0, point.x - neighborhoodSize); + int y = std::max(0, point.y - neighborhoodSize); + int width = std::min(neighborhoodSize * 2, image.cols - x); + int height = std::min(neighborhoodSize * 2, image.rows - y); + return cv::Rect(x, y, width, height); +} + +std::vector UVDARLedDetectAdaptive::mergeOverlappingROIs(const std::vector& rois, const cv::Size& imageSize, double overlapThreshold) { + std::vector mergedROIs; + std::vector merged(rois.size(), false); + + for (int i = 0; i < rois.size(); i++) { + if (merged[i]) continue; + cv::Rect currentROI = rois[i]; + for (int j = i + 1; j < rois.size(); j++) { + if (merged[j]) continue; + if (isOverlapping(currentROI, rois[j], overlapThreshold)) { + std::vector corners; + corners.push_back(cv::Point(currentROI.x, currentROI.y)); + corners.push_back(cv::Point(currentROI.x + currentROI.width, currentROI.y + currentROI.height)); + corners.push_back(cv::Point(rois[j].x, rois[j].y)); + corners.push_back(cv::Point(rois[j].x + rois[j].width, rois[j].y + rois[j].height)); + // Calculate the bounding rectangle of these corners + currentROI = cv::boundingRect(corners); + merged[j] = true; + } + } + + currentROI = adjustROI(currentROI, imageSize); + mergedROIs.push_back(currentROI); + merged[i] = true; + } + return mergedROIs; +} + +bool UVDARLedDetectAdaptive::isOverlapping(const cv::Rect& roi1, const cv::Rect& roi2, double overlapThreshold) { + cv::Rect intersection = roi1 & roi2; + double iou = double(intersection.area()) / (roi1.area() + roi2.area() - intersection.area()); + return iou > overlapThreshold; +} + + +/* adjustNeighborhoodSizeBasedOnArea //{ */ +int UVDARLedDetectAdaptive::adjustNeighborhoodSizeBasedOnArea(const cv::Mat& roiImage, const std::vector>& contours, int currentSize) { + double totalArea = 0; + for (const auto& contour : contours) { + totalArea += cv::contourArea(contour); + } + + // Print the total area + std::cout << "[UVDARLedDetectAdaptive]: TOTAL AREA: " << totalArea << std::endl; + + // Logic to adjust size based on the area + // This is an example; adjust thresholds and sizes as needed + double areaRatio = totalArea / (roiImage.size().width * roiImage.size().height); + + // Print the area ratio + std::cout << "[UVDARLedDetectAdaptive]: AREA RATIO: " << areaRatio << std::endl; + + const double targetAreaRatioSparse = 0.1, targetAreaRatioDense = 0.5; + double adjustmentFactor = 0.5; // Proportion of currentSize to adjust by, needs tuning + + if (areaRatio < targetAreaRatioSparse) { + // For sparse features, decrease size + + return std::max(static_cast(currentSize * (1 - adjustmentFactor)), MIN_SIZE); + } else if (areaRatio > targetAreaRatioDense) { + + // For dense features, increase size + return std::min(static_cast(currentSize * (1 + adjustmentFactor)), MAX_SIZE); + } + return currentSize; +} +//} + +/* detectPointsFromRoi //{ */ +std::vector UVDARLedDetectAdaptive::detectPointsFromRoi(const cv::Mat& mask, const cv::Rect& roi) { + /** + * @brief: This function detects points from the mask and returns the centroid of the detected points + * + * Args: + * mask: The binary mask of the ROI, where the blobs are expected to be detected. + * roi: The rectangle of interest, used to adjust the coordinates of the detected points to the full image coordinate system. + * + * @returns: + * points: The centroid of the detected points + */ + + std::vector points; + + // Use connected components to find blobs in the mask + cv::Mat labels; + int numComponents = cv::connectedComponents(mask, labels, 8, CV_32S); + + // Iterate through components to calculate centroid + for (int i = 1; i < numComponents; i++) { // Start from 1 to ignore the background + cv::Mat componentMask = (labels == i); + // Calculate the centroid of the component + cv::Moments m = cv::moments(componentMask, true); + cv::Point centroid(static_cast(m.m10 / m.m00) + roi.x, static_cast(m.m01 / m.m00) + roi.y); + points.push_back(centroid); + } + + return points; +} +//} + +/* isClose //{ */ +bool UVDARLedDetectAdaptive::isClose(const cv::Point& p1, const cv::Point& p2, double threshold) { +/** +* @brief: This function checks if two points are close to each other +* +* Args: +* p1: The first point +* p2: The second point +* threshold: The maximum distance between the two points for them to be considered close +* +* @returns: +* True if the points are close, False otherwise +*/ + + + double dist = cv::norm(p1 - p2); + return dist < threshold; +} +//} + +/* mergePoints //{ */ +std::vector UVDARLedDetectAdaptive::mergePoints(const std::vector& adaptivePoints, + const std::vector& standardPoints, double threshold) { + + /** + * @brief: This function merges the adaptive points with the standard points, ensuring that no two points are too close to each other + * + * Args: + * adaptivePoints: The points detected using adaptive thresholding + * standardPoints: The standard points + * threshold: The maximum distance between two points for them to be considered close + * + * @returns: + * combinedPoints: The merged points + */ + + + std::vector combinedPoints; + // Add all adaptive points + combinedPoints.insert(combinedPoints.end(), adaptivePoints.begin(), adaptivePoints.end()); + + // Add standard points if they are not close to any adaptive point + for (const auto& standardPoint : standardPoints) { + bool isOverlap = false; + for (const auto& adaptivePoint : adaptivePoints) { + if (isClose(standardPoint, adaptivePoint, threshold)) { + isOverlap = true; + break; + } + } + if (!isOverlap) { + combinedPoints.push_back(standardPoint); + } + } + + + return combinedPoints; +} +//} + + + +/* calculateKLDivergence //{ */ + // Function to calculate KL divergence + double UVDARLedDetectAdaptive::calculateKLDivergence( + const std::vector& segmentHist, + const std::vector& overallHist, + size_t limit) { // Add a 'limit' parameter to restrict calculations + + /** + * @brief: This function calculates the Kullback-Leibler divergence between two distributions + * + * Args: + * segmentHist: The histogram of the segment + * overallHist: The histogram of the overall image + * limit: The index up to which the KL divergence should be calculated + * + * @returns: + * klDivergence: The Kullback-Leibler divergence + */ + + double klDivergence = 0.0; + double epsilon = 1e-10; // Small value to avoid log(0) + for (size_t i = 0; i < overallHist.size(); ++i) { + // Make sure we only calculate where both distributions have positive values + if (segmentHist[i] > 0 && overallHist[i] > 0) { + klDivergence += overallHist[i] * log(overallHist[i] / segmentHist[i] + epsilon); + } + } + return klDivergence; + +} + + +double UVDARLedDetectAdaptive::calculateEntropy(const std::vector& histSegment) { + double entropy = 0.0; + for (double p : histSegment) { + if (p > 0) { // Only calculate for positive probabilities to avoid log(0) + entropy -= p * log(p); // Using natural log + } + } + return entropy; +} + +/* findOptimalThresholdUsingKL */ +std::tuple UVDARLedDetectAdaptive::findOptimalThresholdUsingKL(const cv::Mat& roiImage) { + /** + * @brief: This function finds the optimal threshold for adaptive thresholding using Kullback-Leibler divergence + * + * Args: + * roiImage: The region of interest + * + * @returns: + * optimalThreshold: The optimal threshold + */ + + + int countZeros = cv::countNonZero(roiImage); + int totalPixels = roiImage.total(); + int countOnes = totalPixels - countZeros; + + + + // Calculate the histogram of the ROI image + int histSize = 256; + float range[] = {0, 256}; + const float* histRange = {range}; + cv::Mat hist; + cv::calcHist(&roiImage, 1, 0, cv::Mat(), hist, 1, &histSize, &histRange, true, false); + cv::normalize(hist, hist, 1, 0, cv::NORM_L1, -1, cv::Mat()); + + + + double minKLDivergence = std::numeric_limits::max(); + int optimalThreshold = 0; + + + std::vector Q(histSize); + for (int i = 0; i < histSize; ++i) { + Q[i] = hist.at(i); + } + + double epsilon = 1e-10; + + std::vector P_below(histSize, 0), P_above(Q); // Use the full Q for P_above initially + double sumBelow = 0.0, sumAbove = std::accumulate(Q.begin(), Q.end(), 0.0); + for (int t = 1; t < histSize - 1; ++t) { + sumBelow += Q[t - 1]; + sumAbove -= Q[t - 1]; + P_below[t] = sumBelow; // Track cumulative sum below the threshold + + if (sumBelow == 0 || sumAbove == 0) continue; // Skip invalid cases + + // Normalize probabilities up to the current threshold + std::vector normalizedP_below(P_below.begin(), P_below.begin() + t + 1); + std::vector normalizedP_above(P_above.begin() + t + 1, P_above.end()); + + for (auto& val : normalizedP_below) val /= sumBelow; + for (auto& val : normalizedP_above) val /= sumAbove; + + + double klDivBelow = calculateKLDivergence(normalizedP_below, Q, t + 1); + double klDivAbove = calculateKLDivergence(normalizedP_above, Q, histSize - t - 1); + + + double totalKLDiv = klDivBelow + klDivAbove; + + if (totalKLDiv < minKLDivergence && totalKLDiv > 0.0) { + + + minKLDivergence = totalKLDiv; + optimalThreshold = t; + } + + + } + + + return std::make_tuple(optimalThreshold, minKLDivergence); + +} +//} + +std::tuple UVDARLedDetectAdaptive::findOptimalThresholdUsingEntropy(const cv::Mat& roiImage) { + if (roiImage.empty()) { + throw std::runtime_error("Input image is empty."); + } + + // Calculate the histogram as before + int histSize = 256; + float range[] = {0, 256}; + const float* histRange = {range}; + cv::Mat hist; + cv::calcHist(&roiImage, 1, 0, cv::Mat(), hist, 1, &histSize, &histRange, true, false); + cv::normalize(hist, hist, 1, 0, cv::NORM_L1, -1, cv::Mat()); + + if (cv::sum(hist)[0] == 0) { + throw std::runtime_error("Histogram normalization failed."); + } + + double maxEntropy = std::numeric_limits::min(); + int optimalThreshold = 0; + std::vector Q(histSize); + for (int i = 0; i < histSize; ++i) { + Q[i] = hist.at(i); + } + + for (int t = 1; t < histSize - 1; ++t) { + std::vector P_below(Q.begin(), Q.begin() + t); + std::vector P_above(Q.begin() + t, Q.end()); + + double entropyBelow = calculateEntropy(P_below); + double entropyAbove = calculateEntropy(P_above); + double totalEntropy = entropyBelow + entropyAbove; + + if (totalEntropy > maxEntropy) { + maxEntropy = totalEntropy; + optimalThreshold = t; + } + } + + + return std::make_tuple(optimalThreshold, maxEntropy); +} + +/* generateVisualizationAdaptive //{ */ +void UVDARLedDetectAdaptive::generateVisualizationAdaptive(const cv::Mat& inputImage,cv::Mat& visualization_image, const std::vector& detectedPoints) { + + /** + * @brief: This function generates a visualization of the detected points + * + * Args: + * inputImage: The input image + * visualization_image: The output visualization image + * + * @returns: + * None + */ + + // Create a copy of the current image for visualization + //3the a black background with the same size as the input image + + visualization_image = inputImage.clone(); + + + + // Check if the image is not empty + if (visualization_image.empty()) { + return; + } + + // Overlay binary ROIs + for (size_t i = 0; i < lastProcessedBinaryROIs_.size(); i++) { + const auto& binaryRoi = lastProcessedBinaryROIs_[i]; + const auto& roi = lastProcessedROIs_[i]; + + // Ensure the ROI is within the bounds of the image + cv::Rect imageBounds(0, 0, visualization_image.cols, visualization_image.rows); + if (imageBounds.contains(cv::Point(roi.x, roi.y)) && + imageBounds.contains(cv::Point(roi.x + roi.width, roi.y + roi.height))) { + + // Overlay the binary ROI + binaryRoi.copyTo(visualization_image(roi)); + + // Optionally draw a rectangle around the ROI + cv::rectangle(visualization_image, roi, cv::Scalar(0, 255, 0)); // Green rectangle + + } + } + + + + +} +//} + + +cv::Mat UVDARLedDetectAdaptive::plotHistogram(const cv::Mat& image) { + // Compute the histogram + int histSize = 256; + float range[] = { 0, 256 }; + const float* histRange = { range }; + cv::Mat hist; + cv::calcHist(&image, 1, 0, cv::Mat(), hist, 1, &histSize, &histRange, true, false); + + + + // Apply logarithmic scaling to the histogram values + hist += 1; // Avoid log(0) + cv::log(hist, hist); + + // Create an image to display the histogram + int hist_w = 512; int hist_h = 400; + int bin_w = std::round((double)hist_w / histSize); + cv::Mat histImage(hist_h, hist_w, CV_8UC1, cv::Scalar(0)); + + + // Find the maximum value of the histogram for scaling + double maxVal; + cv::minMaxLoc(hist, 0, &maxVal); + + // Draw the histogram + for (int i = 0; i < histSize; i++) { + int binVal = std::round(hist.at(i) * hist_h / maxVal); + if (binVal > hist_h) binVal = hist_h; // Cap the bin value to fit within the image height + cv::line(histImage, + cv::Point(bin_w * i, hist_h), + cv::Point(bin_w * i, hist_h - binVal), + cv::Scalar(255), 2, 8, 0); + } + + return histImage; + + +} + +/* saveRoiImage //{ */ + + +void UVDARLedDetectAdaptive::saveRoiImage(const cv::Mat& binaryRoi, const cv::Point& center, int index, int thresholdValue = 0, double minKLDivergence = 0.0) { + /** + * @brief: This function saves the binary ROI as an image + * + * Args: + * binaryRoi: The binary ROI + * center: The center of the ROI + * index: The index of the ROI + * thresholdValue: The threshold value used for adaptive thresholding + * + * @returns: + * None + */ + // Specify the output directory + std::string outputDir = "/home/rivermar/Desktop/MRS_Master_Project/roi_images"; + + // Ensure the output directory exists + fs::create_directories(outputDir); + + // Convert minKLDivergence to string with fixed decimal places + std::stringstream minKLDivStream; + minKLDivStream << std::fixed << std::setprecision(2) << minKLDivergence; // Adjust precision as needed + + // Format the filename + std::string filename = "roi_" + std::to_string(center.x) + "_" + std::to_string(center.y) + "_" + std::to_string(index) + "_" + std::to_string(thresholdValue) + "_" + minKLDivStream.str() + ".png"; + std::string fullPath = fs::path(outputDir) / filename; + + // Save the image + cv::imwrite(fullPath, binaryRoi); +} +//} + + +/* prepareAdaptiveDataForLogging //{ */ +ROIData UVDARLedDetectAdaptive::prepareAdaptiveDataForLogging() { + /** + * @brief: This function prepares the adaptive data for logging + * + * Args: + * None + * + * @returns: + * A tuple containing the number of detected points, the threshold value, the KL divergence, and the validity of the ROI + */ + + ROIData adaptiveData; + adaptiveData.numRois = numRois; + adaptiveData.numberDetectedPoints = numberDetectedPoints; + adaptiveData.thresholdValue = thresholdValue; + adaptiveData.klDivergence = klDivergence; + adaptiveData.validRoi = validRoi; + + return adaptiveData; +} + + +} // namespace uvdar \ No newline at end of file diff --git a/thesis_attachments/uv_led_detect_adaptive.h b/thesis_attachments/uv_led_detect_adaptive.h new file mode 100644 index 0000000..581e687 --- /dev/null +++ b/thesis_attachments/uv_led_detect_adaptive.h @@ -0,0 +1,104 @@ +#ifndef UV_LED_DETECT_ADAPTIVE_H +#define UV_LED_DETECT_ADAPTIVE_H + +#include +#include +#include + +#include +#include +#include +#include +#include // Include for std::fixed and std::setprecision +#include // Include for std::tuple +#include +#include +#include + + +struct ROIData{ + int numRois; + std::vector numberDetectedPoints; + std::vector thresholdValue; + std::vector klDivergence; + std::vector validRoi; +}; + + +namespace uvdar { + +class UVDARLedDetectAdaptive{ +public: + UVDARLedDetectAdaptive(int neighborhoodSize = 25, double point_similarity_threshold = 5.0, std::string adaptive_method = "Otsu", bool adaptive_debug = false); + ~UVDARLedDetectAdaptive(); + + bool processImageAdaptive(const cv::Mat& inputImage, const std::vector& trackingPoints, std::vector& detectedPoints, const std::vector& standardPoints); + + void generateVisualizationAdaptive(const cv::Mat& inputImage,cv::Mat& visualization_image,const std::vector& detectedPoints); + + ROIData prepareAdaptiveDataForLogging(); + + const std::vector& getLastProcessedBinaryROIs() const { + return lastProcessedBinaryROIs_; + } + + const std::vector& getLastProcessedROIs() const { + return lastProcessedROIs_; + } + +private: + + std::vector applyAdaptiveThreshold(const cv::Mat& inputImage, const cv::Rect& roi); + + cv::Rect adjustROI(const cv::Rect& inputROI, const cv::Size& imageSize); + + cv::Rect calculateROI(const cv::Mat& image, const cv::Point& point, int neighborhoodSize); + + std::vector mergeOverlappingROIs(const std::vector& rois, const cv::Size& imageSize, double overlapThreshold); + bool isOverlapping(const cv::Rect& roi1, const cv::Rect& roi2, double overlapThreshold); + + std::vector detectPointsFromRoi(const cv::Mat& mask, const cv::Rect& roi); + + bool isClose(const cv::Point& p1, const cv::Point& p2, double threshold); + + int adjustNeighborhoodSizeBasedOnArea(const cv::Mat& roiImage, const std::vector>& contours, int currentSize); + + std::vector mergePoints(const std::vector& adaptivePoints,const std::vector& standardPoints, double threshold); + + double calculateKLDivergence(const std::vector& segmentHist, const std::vector& overallHist, size_t limit); + double calculateEntropy(const std::vector& histSegment); + double calculateKLDivergence2(const cv::Mat& hist, const std::vector& Q, int start, int end); + + std::tuple findOptimalThresholdUsingKL(const cv::Mat& roiImage); + std::tuple findOptimalThresholdUsingEntropy(const cv::Mat& roiImage); + + cv::Mat plotHistogram(const cv::Mat& image); + void saveRoiImage(const cv::Mat& binaryRoi, const cv::Point& center, int index, int thresholdValue, double klDivergence); + + int neighborhoodSize_; + int thresholdValue_; + double minKLDivergence_; + + double point_similarity_threshold_; + std::string adaptive_method_; + bool adaptive_debug_; + int roiIndex_; + int MAX_SIZE = 50; + int MIN_SIZE = 5; + std::vector lastProcessedBinaryROIs_; + std::vector lastProcessedROIs_; // For storing ROI positions + std::vector allRoiDetectedPoints;// For storing ROI positions + + int numRois; + std::vector numberDetectedPoints; + std::vector thresholdValue; + std::vector klDivergence; + std::vector validRoi; + int index = 0; + + +}; + +} // namespace uvdar + +#endif // UV_LED_DETECT_ADAPTIVE_H \ No newline at end of file