diff --git a/README.md b/README.md index f8b8afe..2847bd1 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,9 @@ This is the README for the EU_Robot of Group D. // ...existing content... -## Calibration Instructions +## Complete Calibration and Detection Procedure -### Webcam Calibration +### Step 1: Setup Configuration Files -To calibrate the webcam using a chessboard pattern: - -1. Ensure you have a printed chessboard pattern with 9x6 inner corners. -2. Run the `calibrationWebcam` node: - \ No newline at end of file +1. Create `arena_config.yml` with fixed marker positions: + \ No newline at end of file diff --git a/test/src/QRcode.cpp b/test/src/QRcode.cpp index 5bcd453..a8e7181 100644 --- a/test/src/QRcode.cpp +++ b/test/src/QRcode.cpp @@ -41,8 +41,60 @@ void loadTransformationMatrix(const std::string& filename) fs.release(); } +cv::Mat upscaleImage(const cv::Mat& image, double scaleFactor) +{ + cv::Mat upscaled; + cv::resize(image, upscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR); + return upscaled; +} + +cv::Mat downscaleImage(const cv::Mat& image, double scaleFactor) +{ + if (scaleFactor <= 0) + { + throw std::invalid_argument("Scale factor must be greater than 0."); + } + cv::Mat downscaled; + cv::resize(image, downscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR); + return downscaled; +} + +cv::Mat sharpenImage(const cv::Mat& image) +{ + cv::Mat sharpened; + cv::Mat sharpeningKernel = (cv::Mat_(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0); + cv::filter2D(image, sharpened, -1, sharpeningKernel); + return sharpened; +} + +cv::Mat reduceNoise(const cv::Mat& image) +{ + cv::Mat denoised; + cv::GaussianBlur(image, denoised, cv::Size(5, 5), 0); + return denoised; +} + +cv::Mat gammaCorrection(const cv::Mat& image, double gamma) +{ + cv::Mat adjusted; + cv::Mat lookupTable(1, 256, CV_8U); + uchar* p = lookupTable.ptr(); + for (int i = 0; i < 256; ++i) + { + p[i] = cv::saturate_cast(pow(i / 255.0, gamma) * 255.0); + } + cv::LUT(image, lookupTable, adjusted); + return adjusted; +} + void processFrame(const cv::Mat& frame) { + // Apply image processing steps + cv::Mat processedFrame = gammaCorrection(frame, 0.4); + processedFrame = sharpenImage(processedFrame); + processedFrame = upscaleImage(processedFrame, 3.0); + processedFrame = reduceNoise(processedFrame); + // Create ArUco marker detector cv::Ptr dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50); @@ -51,18 +103,18 @@ void processFrame(const cv::Mat& frame) std::vector rvecs, tvecs; // Detect markers in the image - cv::aruco::detectMarkers(frame, dictionary, markerCorners, markerIds); + cv::aruco::detectMarkers(processedFrame, dictionary, markerCorners, markerIds); // Draw detected markers if (!markerIds.empty()) { - cv::aruco::drawDetectedMarkers(frame, markerCorners, markerIds); + cv::aruco::drawDetectedMarkers(processedFrame, markerCorners, markerIds); cv::aruco::estimatePoseSingleMarkers(markerCorners, 0.05, cameraMatrix, distCoeffs, rvecs, tvecs); ROS_INFO("Detected %zu ArUco markers", markerIds.size()); for (size_t i = 0; i < markerIds.size(); ++i) { ROS_INFO("Marker ID: %d", markerIds[i]); - cv::aruco::drawAxis(frame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1); + cv::aruco::drawAxis(processedFrame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1); // Print the rotation and translation vectors ROS_INFO_STREAM("Rotation Vector: " << rvecs[i]); @@ -88,19 +140,11 @@ void processFrame(const cv::Mat& frame) cv::Mat markerPosition = cv::Mat(tvecs[i]); ROS_INFO_STREAM("Marker Position in Arena: " << markerPosition); - // Retrieve the marker position from the configuration - if (markerPositions.find(markerIds[i]) != markerPositions.end()) - { - cv::Point2f cameraPosition = markerPositions[markerIds[i]]; - std::vector cameraPoints = {cameraPosition}; - std::vector arenaPoints; - cv::perspectiveTransform(cameraPoints, arenaPoints, transformationMatrix); - ROS_INFO_STREAM("Marker Position in Arena: " << arenaPoints[0]); - } - else - { - ROS_WARN("Marker ID %d not found in configuration", markerIds[i]); - } + // Transform the marker position to the arena frame + std::vector cameraPoints = {cv::Point2f(tvecs[i][0], tvecs[i][1])}; + std::vector arenaPoints; + cv::perspectiveTransform(cameraPoints, arenaPoints, transformationMatrix); + ROS_INFO_STREAM("Marker Position in Arena Frame: " << arenaPoints[0]); } } else @@ -109,7 +153,7 @@ void processFrame(const cv::Mat& frame) } // Display the result - cv::imshow("ArUco Detection", frame); + cv::imshow("ArUco Detection", processedFrame); cv::waitKey(1); } diff --git a/test/src/arenaCalibration.cpp b/test/src/arenaCalibration.cpp index eb90500..a69cc01 100644 --- a/test/src/arenaCalibration.cpp +++ b/test/src/arenaCalibration.cpp @@ -59,6 +59,13 @@ void calibrateArena() std::map markerPositions; cv::Mat transformationMatrix; + std::map fixedMarkerPositions = { + {1, cv::Point2f(0, 0)}, + {2, cv::Point2f(0, 1)}, + {3, cv::Point2f(1, 0)}, + {4, cv::Point2f(1, 1)} + }; + while (ros::ok()) { cap >> frame; // Capture a new frame @@ -108,15 +115,21 @@ void calibrateArena() } // Compute the transformation matrix (example using homography) - if (markerIds.size() >= 4) // Need at least 4 points to compute homography + if (markerIds.size() >= 3) // Need at least 3 points to compute homography { std::vector srcPoints, dstPoints; for (const auto& marker : markerPositions) { - srcPoints.push_back(marker.second); - dstPoints.push_back(cv::Point2f(marker.second.x, marker.second.y)); // Replace with actual arena coordinates + if (fixedMarkerPositions.find(marker.first) != fixedMarkerPositions.end()) + { + srcPoints.push_back(marker.second); + dstPoints.push_back(fixedMarkerPositions[marker.first]); + } + } + if (srcPoints.size() >= 3) + { + transformationMatrix = cv::findHomography(srcPoints, dstPoints); } - transformationMatrix = cv::findHomography(srcPoints, dstPoints); } } else