This commit is contained in:
Thomas BONNIER 2024-12-13 08:10:07 +01:00
parent ca7ede96f0
commit 239bf96d33
3 changed files with 82 additions and 28 deletions

View File

@ -2,12 +2,9 @@ This is the README for the EU_Robot of Group D.
// ...existing content...
## Calibration Instructions
## Complete Calibration and Detection Procedure
### Webcam Calibration
### Step 1: Setup Configuration Files
To calibrate the webcam using a chessboard pattern:
1. Ensure you have a printed chessboard pattern with 9x6 inner corners.
2. Run the `calibrationWebcam` node:
1. Create `arena_config.yml` with fixed marker positions:

View File

@ -41,8 +41,60 @@ void loadTransformationMatrix(const std::string& filename)
fs.release();
}
cv::Mat upscaleImage(const cv::Mat& image, double scaleFactor)
{
cv::Mat upscaled;
cv::resize(image, upscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR);
return upscaled;
}
cv::Mat downscaleImage(const cv::Mat& image, double scaleFactor)
{
if (scaleFactor <= 0)
{
throw std::invalid_argument("Scale factor must be greater than 0.");
}
cv::Mat downscaled;
cv::resize(image, downscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR);
return downscaled;
}
cv::Mat sharpenImage(const cv::Mat& image)
{
cv::Mat sharpened;
cv::Mat sharpeningKernel = (cv::Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
cv::filter2D(image, sharpened, -1, sharpeningKernel);
return sharpened;
}
cv::Mat reduceNoise(const cv::Mat& image)
{
cv::Mat denoised;
cv::GaussianBlur(image, denoised, cv::Size(5, 5), 0);
return denoised;
}
cv::Mat gammaCorrection(const cv::Mat& image, double gamma)
{
cv::Mat adjusted;
cv::Mat lookupTable(1, 256, CV_8U);
uchar* p = lookupTable.ptr();
for (int i = 0; i < 256; ++i)
{
p[i] = cv::saturate_cast<uchar>(pow(i / 255.0, gamma) * 255.0);
}
cv::LUT(image, lookupTable, adjusted);
return adjusted;
}
void processFrame(const cv::Mat& frame)
{
// Apply image processing steps
cv::Mat processedFrame = gammaCorrection(frame, 0.4);
processedFrame = sharpenImage(processedFrame);
processedFrame = upscaleImage(processedFrame, 3.0);
processedFrame = reduceNoise(processedFrame);
// Create ArUco marker detector
cv::Ptr<cv::aruco::Dictionary> dictionary =
cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
@ -51,18 +103,18 @@ void processFrame(const cv::Mat& frame)
std::vector<cv::Vec3d> rvecs, tvecs;
// Detect markers in the image
cv::aruco::detectMarkers(frame, dictionary, markerCorners, markerIds);
cv::aruco::detectMarkers(processedFrame, dictionary, markerCorners, markerIds);
// Draw detected markers
if (!markerIds.empty())
{
cv::aruco::drawDetectedMarkers(frame, markerCorners, markerIds);
cv::aruco::drawDetectedMarkers(processedFrame, markerCorners, markerIds);
cv::aruco::estimatePoseSingleMarkers(markerCorners, 0.05, cameraMatrix, distCoeffs, rvecs, tvecs);
ROS_INFO("Detected %zu ArUco markers", markerIds.size());
for (size_t i = 0; i < markerIds.size(); ++i)
{
ROS_INFO("Marker ID: %d", markerIds[i]);
cv::aruco::drawAxis(frame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1);
cv::aruco::drawAxis(processedFrame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1);
// Print the rotation and translation vectors
ROS_INFO_STREAM("Rotation Vector: " << rvecs[i]);
@ -88,19 +140,11 @@ void processFrame(const cv::Mat& frame)
cv::Mat markerPosition = cv::Mat(tvecs[i]);
ROS_INFO_STREAM("Marker Position in Arena: " << markerPosition);
// Retrieve the marker position from the configuration
if (markerPositions.find(markerIds[i]) != markerPositions.end())
{
cv::Point2f cameraPosition = markerPositions[markerIds[i]];
std::vector<cv::Point2f> cameraPoints = {cameraPosition};
std::vector<cv::Point2f> arenaPoints;
cv::perspectiveTransform(cameraPoints, arenaPoints, transformationMatrix);
ROS_INFO_STREAM("Marker Position in Arena: " << arenaPoints[0]);
}
else
{
ROS_WARN("Marker ID %d not found in configuration", markerIds[i]);
}
// Transform the marker position to the arena frame
std::vector<cv::Point2f> cameraPoints = {cv::Point2f(tvecs[i][0], tvecs[i][1])};
std::vector<cv::Point2f> arenaPoints;
cv::perspectiveTransform(cameraPoints, arenaPoints, transformationMatrix);
ROS_INFO_STREAM("Marker Position in Arena Frame: " << arenaPoints[0]);
}
}
else
@ -109,7 +153,7 @@ void processFrame(const cv::Mat& frame)
}
// Display the result
cv::imshow("ArUco Detection", frame);
cv::imshow("ArUco Detection", processedFrame);
cv::waitKey(1);
}

View File

@ -59,6 +59,13 @@ void calibrateArena()
std::map<int, cv::Point2f> markerPositions;
cv::Mat transformationMatrix;
std::map<int, cv::Point2f> fixedMarkerPositions = {
{1, cv::Point2f(0, 0)},
{2, cv::Point2f(0, 1)},
{3, cv::Point2f(1, 0)},
{4, cv::Point2f(1, 1)}
};
while (ros::ok())
{
cap >> frame; // Capture a new frame
@ -108,15 +115,21 @@ void calibrateArena()
}
// Compute the transformation matrix (example using homography)
if (markerIds.size() >= 4) // Need at least 4 points to compute homography
if (markerIds.size() >= 3) // Need at least 3 points to compute homography
{
std::vector<cv::Point2f> srcPoints, dstPoints;
for (const auto& marker : markerPositions)
{
srcPoints.push_back(marker.second);
dstPoints.push_back(cv::Point2f(marker.second.x, marker.second.y)); // Replace with actual arena coordinates
if (fixedMarkerPositions.find(marker.first) != fixedMarkerPositions.end())
{
srcPoints.push_back(marker.second);
dstPoints.push_back(fixedMarkerPositions[marker.first]);
}
}
if (srcPoints.size() >= 3)
{
transformationMatrix = cv::findHomography(srcPoints, dstPoints);
}
transformationMatrix = cv::findHomography(srcPoints, dstPoints);
}
}
else