This commit is contained in:
Thomas BONNIER 2024-12-13 09:03:08 +01:00
parent 239bf96d33
commit 61be3dd2f8
4 changed files with 133 additions and 12 deletions

View File

@ -1,10 +1,52 @@
This is the README for the EU_Robot of Group D.
// ...existing content...
## Complete Calibration and Detection Procedure
### Step 1: Setup Configuration Files
1. Create `arena_config.yml` with fixed marker positions:
```yaml
markers:
- id: 1
position: [0, 0]
- id: 2
position: [0, 1]
- id: 3
position: [0, 2]
```
2. Create `arena_transformation.yml` with an initial transformation matrix (example identity matrix):
```yaml
transformation_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
```
### Step 2: Calibrate the Camera
1. Run the `calibration_webcam` node to capture frames and calibrate the camera:
```bash
rosrun your_package calibration_webcam
```
2. Follow the on-screen instructions to capture at least 10 frames of the chessboard pattern from different angles and positions.
3. The calibration parameters will be saved to `camera_parameters.yml`.
### Step 3: Calibrate the Arena
1. Run the `arena_calibration` node to detect ArUco markers and calibrate the arena:
```bash
rosrun your_package arena_calibration
```
2. The detected marker positions and transformation matrix will be saved to `arena_config.yml` and `arena_transformation.yml` respectively.
### Step 4: Run the Main Node
1. Run the `aruco_detector` node to detect ArUco markers and display their positions in the arena:
```bash
rosrun your_package aruco_detector
```
2. The node will process frames from the camera, detect ArUco markers, and display their positions in the arena frame.

View File

@ -87,6 +87,15 @@ cv::Mat gammaCorrection(const cv::Mat& image, double gamma)
return adjusted;
}
cv::Mat enhanceImage(const cv::Mat& image)
{
cv::Mat enhanced;
cv::Mat kernel = (cv::Mat_<float>(3, 3) << 1, 1, 1, 1, -7, 1, 1, 1, 1);
cv::filter2D(image, enhanced, CV_32F, kernel);
cv::normalize(enhanced, enhanced, 0, 255, cv::NORM_MINMAX, CV_8UC1);
return enhanced;
}
void processFrame(const cv::Mat& frame)
{
// Apply image processing steps
@ -94,6 +103,7 @@ void processFrame(const cv::Mat& frame)
processedFrame = sharpenImage(processedFrame);
processedFrame = upscaleImage(processedFrame, 3.0);
processedFrame = reduceNoise(processedFrame);
processedFrame = enhanceImage(processedFrame);
// Create ArUco marker detector
cv::Ptr<cv::aruco::Dictionary> dictionary =
@ -144,7 +154,7 @@ void processFrame(const cv::Mat& frame)
std::vector<cv::Point2f> cameraPoints = {cv::Point2f(tvecs[i][0], tvecs[i][1])};
std::vector<cv::Point2f> arenaPoints;
cv::perspectiveTransform(cameraPoints, arenaPoints, transformationMatrix);
ROS_INFO_STREAM("Marker Position in Arena Frame: " << arenaPoints[0]);
ROS_INFO_STREAM("Marker ID: " << markerIds[i] << " Position in Arena Frame: [" << arenaPoints[0].x << ", " << arenaPoints[0].y << "]");
}
}
else

View File

@ -32,6 +32,70 @@ void saveTransformationMatrix(const std::string& filename, const cv::Mat& transf
fs.release();
}
cv::Mat upscaleImage(const cv::Mat& image, double scaleFactor)
{
cv::Mat upscaled;
cv::resize(image, upscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR);
return upscaled;
}
cv::Mat sharpenImage(const cv::Mat& image)
{
cv::Mat sharpened;
cv::Mat sharpeningKernel = (cv::Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
cv::filter2D(image, sharpened, -1, sharpeningKernel);
return sharpened;
}
cv::Mat reduceNoise(const cv::Mat& image)
{
cv::Mat denoised;
cv::GaussianBlur(image, denoised, cv::Size(5, 5), 0);
return denoised;
}
cv::Mat gammaCorrection(const cv::Mat& image, double gamma)
{
cv::Mat adjusted;
cv::Mat lookupTable(1, 256, CV_8U);
uchar* p = lookupTable.ptr();
for (int i = 0; i < 256; ++i)
{
p[i] = cv::saturate_cast<uchar>(pow(i / 255.0, gamma) * 255.0);
}
cv::LUT(image, lookupTable, adjusted);
return adjusted;
}
cv::Mat enhanceImage(const cv::Mat& image)
{
cv::Mat enhanced;
cv::Mat kernel = (cv::Mat_<float>(3, 3) << 1, 1, 1, 1, -7, 1, 1, 1, 1);
cv::filter2D(image, enhanced, CV_32F, kernel);
cv::normalize(enhanced, enhanced, 0, 255, cv::NORM_MINMAX, CV_8UC1);
return enhanced;
}
std::map<int, cv::Point2f> fixedMarkerPositions;
void loadMarkerPositions(const std::string& filename)
{
try
{
YAML::Node config = YAML::LoadFile(filename);
for (const auto& marker : config["markers"])
{
int id = marker["id"].as<int>();
std::vector<float> pos = marker["position"].as<std::vector<float>>();
fixedMarkerPositions[id] = cv::Point2f(pos[0], pos[1]);
}
}
catch (const YAML::BadFile& e)
{
ROS_ERROR("Failed to load marker positions: %s", e.what());
}
}
void calibrateArena()
{
// Create ArUco marker detector
@ -59,12 +123,7 @@ void calibrateArena()
std::map<int, cv::Point2f> markerPositions;
cv::Mat transformationMatrix;
std::map<int, cv::Point2f> fixedMarkerPositions = {
{1, cv::Point2f(0, 0)},
{2, cv::Point2f(0, 1)},
{3, cv::Point2f(1, 0)},
{4, cv::Point2f(1, 1)}
};
loadMarkerPositions("arena_config.yml");
while (ros::ok())
{
@ -75,6 +134,13 @@ void calibrateArena()
continue;
}
// Apply image processing steps
frame = gammaCorrection(frame, 0.4);
frame = sharpenImage(frame);
frame = upscaleImage(frame, 3.0);
frame = reduceNoise(frame);
frame = enhanceImage(frame);
// Detect markers in the image
cv::aruco::detectMarkers(frame, dictionary, markerCorners, markerIds);

View File

@ -26,6 +26,7 @@ int main(int argc, char** argv)
cv::Mat frame, gray;
int frameCount = 0;
ros::Rate rate(1); // Capture 1 frame per second
while (ros::ok())
{
cap >> frame;
@ -53,6 +54,8 @@ int main(int argc, char** argv)
cv::imshow("Calibration", frame);
if (cv::waitKey(30) == 27) break; // Exit on ESC key
rate.sleep(); // Sleep to maintain 1 frame per second
}
if (imagePoints.size() < 10)