Compare commits

..

25 Commits

Author SHA1 Message Date
Thomas BONNIER 32a0ba5fd5 work nice with a larger area + add comments on QRcode.cpp 2024-12-19 11:55:56 +01:00
Thomas BONNIER 8dbd79d5a4 test end of session 2024-12-19 11:38:24 +01:00
Thomas BONNIER ebf1f7480f test last session 2024-12-19 11:25:19 +01:00
Thomas BONNIER 20841f7d60 bug with arcuo tag 7 2024-12-13 11:58:05 +01:00
Thomas BONNIER 3b7ba64aa9 bug 2024-12-13 09:54:40 +01:00
Thomas BONNIER b1d50517c6 tt 2024-12-13 09:45:56 +01:00
Thomas BONNIER 0f80eccfd3 bug correction 2024-12-13 09:35:59 +01:00
Thomas BONNIER ddcf979dd6 add a break when calibration is done 2024-12-13 09:16:13 +01:00
Thomas BONNIER 61be3dd2f8 test 2024-12-13 09:03:08 +01:00
Thomas BONNIER 239bf96d33 to test 2024-12-13 08:10:07 +01:00
Thomas BONNIER ca7ede96f0 test3 2024-12-04 11:49:26 +01:00
Thomas BONNIER f8786a3b91 aa 2024-12-04 11:30:24 +01:00
Thomas BONNIER 7255eff715 test2 2024-12-04 11:14:22 +01:00
Thomas BONNIER 71a761d1c0 test 2024-12-04 11:06:07 +01:00
Thomas BONNIER 17afe3378f test for next session 2024-11-29 17:29:35 +01:00
Thomas BONNIER b9313d40c0 improve resolution 2024-11-29 16:46:08 +01:00
Thomas BONNIER 0f2cfce8f8 fix missing library 2024-11-29 16:01:31 +01:00
Thomas BONNIER e45145988e enhancement 2024-11-29 15:19:12 +01:00
Thomas BONNIER ec30c914e8 working 2024-11-29 14:46:01 +01:00
Thomas BONNIER ee764361f5 calib2 2024-11-29 14:23:10 +01:00
Thomas BONNIER 55939f7f9c calibration 2024-11-29 13:53:09 +01:00
Thomas BONNIER 04814bca27 enhancing with intrinsic matrix camera. 2024-11-19 11:51:59 +01:00
Thomas BONNIER 18cc0f789c functional node using OpenCV 2024-11-19 11:02:02 +01:00
Thomas BONNIER e15a7a9579 first test 2024-11-19 09:32:54 +01:00
Thomas BONNIER bd992d70b8 first commit for feature 2024-11-19 09:05:43 +01:00
8 changed files with 989 additions and 1 deletions

130
README.md
View File

@ -1 +1,129 @@
This is the README for the EU_Robot of Group D.
## Complete Calibration and Detection Procedure
### Step 1: Setup Configuration Files
1. Create `arena_config.yml` with fixed marker positions:
```yaml
markers:
- id: 1
position: [0, 0]
- id: 2
position: [0, 1]
- id: 3
position: [0, 2]
```
2. Create `arena_transformation.yml` with an initial transformation matrix (example identity matrix):
```yaml
transformation_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
```
### Step 2: Calibrate the Camera
1. Run the `calibration_webcam` node to capture frames and calibrate the camera:
```bash
rosrun your_package calibration_webcam
```
2. Follow the on-screen instructions to capture at least 10 frames of the chessboard pattern from different angles and positions.
3. The calibration parameters will be saved to `camera_parameters.yml`.
### Step 3: Calibrate the Arena
1. Run the `arena_calibration` node to detect ArUco markers and calibrate the arena:
```bash
rosrun your_package arena_calibration
```
2. The detected marker positions and transformation matrix will be saved to `arena_config.yml` and `arena_transformation.yml` respectively.
### Step 4: Run the Main Node
1. Run the `aruco_detector` node to detect ArUco markers and display their positions in the arena:
```bash
rosrun your_package aruco_detector
```
2. The node will process frames from the camera, detect ArUco markers, and display their positions in the arena frame.
### Additional Information
#### Dependencies
Ensure you have the following dependencies installed:
- ROS
- OpenCV
- yaml-cpp
#### Building the Package
1. Navigate to your catkin workspace:
```bash
cd ~/catkin_ws
```
2. Build the package:
```bash
catkin build test
```
3. Source the workspace:
```bash
source devel/setup.bash
```
#### Running the Nodes
1. Start the ROS master node:
```bash
roscore
```
2. In a new terminal, run the desired node as described in the steps above.
#### Troubleshooting
- If the camera is not detected, ensure it is properly connected and recognized by the system.
- If not enough markers are detected, try adjusting the lighting conditions or the camera position.
- Ensure all configuration files (`camera_parameters.yml`, `arena_config.yml`, `arena_transformation.yml`) are correctly formatted and located in the appropriate directory.
#### Example Configuration Files
`camera_parameters.yml`:
```yaml
camera_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [fx, 0, cx, 0, fy, cy, 0, 0, 1]
distortion_coefficients: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [k1, k2, p1, p2, k3]
```
`arena_config.yml`:
```yaml
markers:
- id: 1
position: [0, 0]
- id: 2
position: [0, 1]
- id: 3
position: [0, 2]
```
`arena_transformation.yml`:
```yaml
transformation_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [1, 0, 0, 0, 1, 0, 0, 0, 1]
```

96
test/CMakeLists.txt Normal file
View File

@ -0,0 +1,96 @@
cmake_minimum_required(VERSION 3.0.2)
project(test)
## Compile as C++11, supported in ROS Kinetic and newer
add_compile_options(-std=c++11)
## Find catkin macros and libraries
find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
sensor_msgs
image_transport
cv_bridge
)
## System dependencies are found with CMake's conventions
find_package(OpenCV REQUIRED)
find_package(yaml-cpp REQUIRED)
###################################
## catkin specific configuration ##
###################################
catkin_package(
CATKIN_DEPENDS roscpp rospy std_msgs sensor_msgs image_transport cv_bridge
)
###########
## Build ##
###########
## Specify additional locations of header files
include_directories(
${catkin_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${YAML_CPP_INCLUDE_DIRS}
)
## Declare a C++ executable
add_executable(qr_code_detector src/QRcode.cpp)
add_executable(calibrationWebcam src/calibrationWebcam.cpp)
add_executable(arenaCalibration src/arenaCalibration.cpp)
add_executable(aruco_detector src/QRcode.cpp)
add_executable(arena_calibration src/arenaCalibration.cpp)
## Add cmake target dependencies of the executable
add_dependencies(qr_code_detector ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
add_dependencies(arenaCalibration ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
target_link_libraries(qr_code_detector
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
yaml-cpp
)
target_link_libraries(calibrationWebcam
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
)
target_link_libraries(arenaCalibration
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
yaml-cpp
)
target_link_libraries(aruco_detector
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
yaml-cpp
)
target_link_libraries(arena_calibration
${catkin_LIBRARIES}
${OpenCV_LIBRARIES}
yaml-cpp
)
#############
## Install ##
#############
## Mark executables for installation
install(TARGETS qr_code_detector
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_test.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${catkin_LIBRARIES})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

10
test/arena_config.yml Normal file
View File

@ -0,0 +1,10 @@
# filepath: /catkin_ws/arena_config.yml
markers:
- id: 5
position: [0, 0, 0]
- id: 6
position: [1, 0, 0]
- id: 7
position: [1, 1, 0]
- id: 8
position: [0, 1, 0]

View File

@ -0,0 +1,5 @@
transformation_matrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [1, 0, 0, 0, 1, 0, 0, 0, 1] # Example identity matrix, replace with actual data

60
test/package.xml Normal file
View File

@ -0,0 +1,60 @@
<?xml version="1.0"?>
<package format="2">
<name>test</name>
<version>0.0.0</version>
<description>The test package</description>
<maintainer email="user@example.com">Your Name</maintainer>
<license>BSD</license>
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/test</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roscpp</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>image_transport</build_depend>
<build_depend>cv_bridge</build_depend>
<build_depend>opencv</build_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>sensor_msgs</exec_depend>
<exec_depend>image_transport</exec_depend>
<exec_depend>cv_bridge</exec_depend>
<exec_depend>opencv</exec_depend>
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

268
test/src/QRcode.cpp Normal file
View File

@ -0,0 +1,268 @@
#include <ros/ros.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/aruco.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <thread>
#include <yaml-cpp/yaml.h>
// Global variables for camera parameters and marker positions
cv::Mat cameraMatrix, distCoeffs;
std::map<int, cv::Point2f> markerPositions;
cv::Mat transformationMatrix;
// Function to load marker positions from a YAML file
void loadMarkerPositions(const std::string& filename)
{
try
{
YAML::Node config = YAML::LoadFile(filename);
for (const auto& marker : config["markers"])
{
int id = marker["id"].as<int>();
std::vector<float> pos = marker["position"].as<std::vector<float>>();
markerPositions[id] = cv::Point2f(pos[0], pos[1]);
}
}
catch (const YAML::BadFile& e)
{
ROS_ERROR("Failed to load marker positions: %s", e.what());
}
}
// Function to load the transformation matrix from a YAML file
void loadTransformationMatrix(const std::string& filename)
{
cv::FileStorage fs(filename, cv::FileStorage::READ);
if (!fs.isOpened())
{
ROS_ERROR("Unable to open transformation matrix file");
return;
}
fs["transformation_matrix"] >> transformationMatrix;
fs.release();
}
// Function to apply gamma correction to an image
cv::Mat gammaCorrection(const cv::Mat& image, double gamma = 0.4)
{
cv::Mat lookUpTable(1, 256, CV_8U);
uchar* p = lookUpTable.ptr();
for (int i = 0; i < 256; ++i)
p[i] = cv::saturate_cast<uchar>(pow(i / 255.0, 1.0 / gamma) * 255.0);
cv::Mat corrected;
cv::LUT(image, lookUpTable, corrected);
return corrected;
}
// Function to upscale an image
cv::Mat upscaleImage(const cv::Mat& image, double scaleFactor = 3.0)
{
cv::Mat upscaled;
cv::resize(image, upscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR);
return upscaled;
}
// Function to sharpen an image
cv::Mat sharpenImage(const cv::Mat& image)
{
cv::Mat sharpeningKernel = (cv::Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
cv::Mat sharpened;
cv::filter2D(image, sharpened, -1, sharpeningKernel);
return sharpened;
}
// Function to reduce noise in an image
cv::Mat reduceNoise(const cv::Mat& image)
{
cv::Mat denoised;
cv::GaussianBlur(image, denoised, cv::Size(5, 5), 0);
return denoised;
}
// Function to enhance an image by applying various processing steps
cv::Mat enhanceImage(const cv::Mat& image)
{
// Apply gamma correction
cv::Mat enhanced = gammaCorrection(image);
// Sharpen
enhanced = sharpenImage(enhanced);
// Upscale
enhanced = upscaleImage(enhanced, 3.0);
// Sharpen again after upscaling
enhanced = sharpenImage(enhanced);
// Reduce noise
enhanced = reduceNoise(enhanced);
return enhanced;
}
// Function to process a frame and detect ArUco markers
void processFrame(cv::Mat& frame)
{
// Apply image processing steps
cv::Mat processedFrame = enhanceImage(frame);
// Create ArUco marker detector with modified parameters
cv::Ptr<cv::aruco::DetectorParameters> parameters = cv::aruco::DetectorParameters::create();
parameters->adaptiveThreshWinSizeMin = 3;
parameters->adaptiveThreshWinSizeMax = 23;
parameters->adaptiveThreshWinSizeStep = 5;
parameters->minMarkerPerimeterRate = 0.005;
parameters->maxMarkerPerimeterRate = 3.0;
parameters->perspectiveRemoveIgnoredMarginPerCell = 0.05;
parameters->cornerRefinementMethod = cv::aruco::CORNER_REFINE_SUBPIX;
parameters->polygonalApproxAccuracyRate = 0.025;
cv::Ptr<cv::aruco::Dictionary> dictionary =
cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_1000);
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners;
std::vector<cv::Vec3d> rvecs, tvecs;
// Detect markers in the image
cv::aruco::detectMarkers(processedFrame, dictionary, markerCorners, markerIds, parameters);
// If no markers are detected, try multi-scale detection
if (markerIds.empty())
{
for (double scale = 0.5; scale <= 2.0; scale += 0.5)
{
cv::Mat scaledFrame;
cv::resize(processedFrame, scaledFrame, cv::Size(), scale, scale);
cv::aruco::detectMarkers(scaledFrame, dictionary, markerCorners, markerIds, parameters);
if (!markerIds.empty())
{
// Scale back the marker corners to the original frame size
for (auto& corners : markerCorners)
{
for (auto& corner : corners)
{
corner /= scale;
}
}
break;
}
}
}
// Draw detected markers
if (!markerIds.empty())
{
cv::aruco::drawDetectedMarkers(frame, markerCorners, markerIds);
cv::aruco::estimatePoseSingleMarkers(markerCorners, 0.05, cameraMatrix, distCoeffs, rvecs, tvecs);
ROS_INFO("Detected %zu ArUco markers", markerIds.size());
for (size_t i = 0; i < markerIds.size(); ++i)
{
ROS_INFO("Marker ID: %d", markerIds[i]);
cv::aruco::drawAxis(frame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1);
// Print the rotation and translation vectors
ROS_INFO_STREAM("Rotation Vector: " << rvecs[i]);
ROS_INFO_STREAM("Translation Vector: " << tvecs[i]);
// Calculate the distance from the camera to the marker
double distance = cv::norm(tvecs[i]);
ROS_INFO("Distance from camera: %f meters", distance);
// Convert rotation vector to rotation matrix
cv::Mat rotationMatrix;
cv::Rodrigues(rvecs[i], rotationMatrix);
// Extract Euler angles from the rotation matrix
cv::Vec3d eulerAngles;
eulerAngles[0] = atan2(rotationMatrix.at<double>(2, 1), rotationMatrix.at<double>(2, 2)); // Roll
eulerAngles[1] = atan2(-rotationMatrix.at<double>(2, 0), sqrt(pow(rotationMatrix.at<double>(2, 1), 2) + pow(rotationMatrix.at<double>(2, 2), 2))); // Pitch
eulerAngles[2] = atan2(rotationMatrix.at<double>(1, 0), rotationMatrix.at<double>(0, 0)); // Yaw
ROS_INFO_STREAM("Euler Angles (Roll, Pitch, Yaw): " << eulerAngles);
// Estimate the coordinates of the marker in the arena
cv::Mat markerPosition = cv::Mat(tvecs[i]);
ROS_INFO_STREAM("Marker Position in Arena: " << markerPosition);
// Transform the marker position to the arena frame
std::vector<cv::Point2f> cameraPoints = {cv::Point2f(tvecs[i][0], tvecs[i][1])};
std::vector<cv::Point2f> arenaPoints;
cv::perspectiveTransform(cameraPoints, arenaPoints, transformationMatrix);
ROS_INFO_STREAM("Marker ID: " << markerIds[i] << " Position in Arena Frame: [" << arenaPoints[0].x << ", " << arenaPoints[0].y << "]");
}
}
else
{
ROS_INFO("No ArUco markers detected");
}
// Display the result
cv::imshow("ArUco Detection", frame);
cv::waitKey(1);
}
// Thread function to continuously capture and process frames from the camera
void frameProcessingThread()
{
cv::VideoCapture cap(0, cv::CAP_V4L2); // Open the default camera with V4L2 backend
if (!cap.isOpened())
{
ROS_ERROR("Unable to open camera");
return;
}
// Set the desired resolution
cap.set(cv::CAP_PROP_FRAME_WIDTH, 1920); // Set width to 1920 pixels
cap.set(cv::CAP_PROP_FRAME_HEIGHT, 1080); // Set height to 1080 pixels
// Set the desired frame rate
cap.set(cv::CAP_PROP_FPS, 15); // Set frame rate to 15 Hz
while (ros::ok())
{
cv::Mat frame;
cap >> frame; // Capture a new frame
if (frame.empty())
{
ROS_ERROR("Captured empty frame");
continue;
}
processFrame(frame);
}
}
// Main function
int main(int argc, char** argv)
{
ros::init(argc, argv, "aruco_detector");
ros::NodeHandle nh;
// Load camera parameters
cv::FileStorage fs("camera_parameters.yml", cv::FileStorage::READ);
if (!fs.isOpened())
{
ROS_ERROR("Unable to open camera parameters file");
return -1;
}
fs["camera_matrix"] >> cameraMatrix;
fs["distortion_coefficients"] >> distCoeffs;
fs.release();
// Load marker positions
loadMarkerPositions("arena_config.yml");
// Load transformation matrix
loadTransformationMatrix("arena_transformation.yml");
// Start frame processing thread
std::thread processingThread(frameProcessingThread);
ros::spin();
processingThread.join();
return 0;
}

View File

@ -0,0 +1,340 @@
#include <ros/ros.h>
#include <opencv2/aruco.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <cv_bridge/cv_bridge.h>
#include <vector>
#include <thread>
#include <yaml-cpp/yaml.h>
#include <fstream>
cv::Mat cameraMatrix, distCoeffs;
std::map<int, cv::Point3f> fixedMarkerPositions;
void saveMarkerPositions(const std::string& filename, const std::map<int, cv::Point2f>& markerPositions)
{
YAML::Node config;
for (const auto& marker : markerPositions)
{
YAML::Node markerNode;
markerNode["id"] = marker.first;
markerNode["position"] = std::vector<float>{marker.second.x, marker.second.y};
config["markers"].push_back(markerNode);
}
std::ofstream fout(filename);
fout << config;
}
void saveTransformationMatrix(const std::string& filename, const cv::Mat& transformationMatrix)
{
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
fs << "transformation_matrix" << transformationMatrix;
fs.release();
}
cv::Mat gammaCorrection(const cv::Mat& image, double gamma = 0.4)
{
cv::Mat lookUpTable(1, 256, CV_8U);
uchar* p = lookUpTable.ptr();
for (int i = 0; i < 256; ++i)
p[i] = cv::saturate_cast<uchar>(pow(i / 255.0, 1.0 / gamma) * 255.0);
cv::Mat corrected;
cv::LUT(image, lookUpTable, corrected);
return corrected;
}
cv::Mat upscaleImage(const cv::Mat& image, double scaleFactor = 3.0)
{
cv::Mat upscaled;
cv::resize(image, upscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR);
return upscaled;
}
cv::Mat downscaleImage(const cv::Mat& image, double scaleFactor)
{
if (scaleFactor <= 0)
throw std::invalid_argument("Scale factor must be greater than 0.");
cv::Mat downscaled;
cv::resize(image, downscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR);
return downscaled;
}
cv::Mat sharpenImage(const cv::Mat& image)
{
cv::Mat sharpeningKernel = (cv::Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
cv::Mat sharpened;
cv::filter2D(image, sharpened, -1, sharpeningKernel);
return sharpened;
}
cv::Mat reduceNoise(const cv::Mat& image)
{
cv::Mat denoised;
cv::GaussianBlur(image, denoised, cv::Size(5, 5), 0);
return denoised;
}
cv::Mat enhanceImage(const cv::Mat& image)
{
// Apply gamma correction
cv::Mat enhanced = gammaCorrection(image);
// Sharpen
enhanced = sharpenImage(enhanced);
// Upscale
enhanced = upscaleImage(enhanced, 3.0);
// Sharpen again after upscaling
enhanced = sharpenImage(enhanced);
// Reduce noise
enhanced = reduceNoise(enhanced);
return enhanced;
}
void loadMarkerPositions(const std::string& filename)
{
try
{
YAML::Node config = YAML::LoadFile(filename);
if (!config["markers"])
{
throw std::runtime_error("No 'markers' node found in the configuration file.");
}
for (const auto& marker : config["markers"])
{
int id = marker["id"].as<int>();
std::vector<float> pos = marker["position"].as<std::vector<float>>();
fixedMarkerPositions[id] = cv::Point3f(pos[0], pos[1], pos[2]);
}
}
catch (const YAML::BadFile& e)
{
ROS_ERROR("Failed to load marker positions: %s", e.what());
}
catch (const std::runtime_error& e)
{
ROS_ERROR("Error in configuration file: %s", e.what());
}
catch (const std::exception& e)
{
ROS_ERROR("Unexpected error: %s", e.what());
}
}
void processFrame(cv::Mat& frame, std::map<int, cv::Point2f>& markerPositions, cv::Mat& transformationMatrix, bool& calibrationDone)
{
// Apply image processing steps
cv::Mat processedFrame = enhanceImage(frame);
// Create ArUco marker detector with modified parameters
cv::Ptr<cv::aruco::DetectorParameters> parameters = cv::aruco::DetectorParameters::create();
parameters->adaptiveThreshWinSizeMin = 3;
parameters->adaptiveThreshWinSizeMax = 23;
parameters->adaptiveThreshWinSizeStep = 5;
parameters->minMarkerPerimeterRate = 0.005;
parameters->maxMarkerPerimeterRate = 3.0;
parameters->perspectiveRemoveIgnoredMarginPerCell = 0.05;
parameters->cornerRefinementMethod = cv::aruco::CORNER_REFINE_SUBPIX;
parameters->polygonalApproxAccuracyRate = 0.025;
cv::Ptr<cv::aruco::Dictionary> dictionary =
cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners;
std::vector<cv::Vec3d> rvecs, tvecs;
// Detect markers in the image
cv::aruco::detectMarkers(processedFrame, dictionary, markerCorners, markerIds, parameters);
// If no markers are detected, try multi-scale detection
if (markerIds.empty())
{
for (double scale = 0.5; scale <= 2.0; scale += 0.5)
{
cv::Mat scaledFrame;
cv::resize(processedFrame, scaledFrame, cv::Size(), scale, scale);
cv::aruco::detectMarkers(scaledFrame, dictionary, markerCorners, markerIds, parameters);
if (!markerIds.empty())
{
// Scale back the marker corners to the original frame size
for (auto& corners : markerCorners)
{
for (auto& corner : corners)
{
corner /= scale;
}
}
break;
}
}
}
// Draw detected markers
if (!markerIds.empty())
{
cv::aruco::drawDetectedMarkers(frame, markerCorners, markerIds);
cv::aruco::estimatePoseSingleMarkers(markerCorners, 0.05, cameraMatrix, distCoeffs, rvecs, tvecs);
ROS_INFO("Detected %zu ArUco markers", markerIds.size());
for (size_t i = 0; i < markerIds.size(); ++i)
{
ROS_INFO("Marker ID: %d", markerIds[i]);
cv::aruco::drawAxis(frame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1);
// Print the rotation and translation vectors
ROS_INFO_STREAM("Rotation Vector: " << rvecs[i]);
ROS_INFO_STREAM("Translation Vector: " << tvecs[i]);
// Calculate the distance from the camera to the marker
double distance = cv::norm(tvecs[i]);
ROS_INFO("Distance from camera: %f meters", distance);
// Convert rotation vector to rotation matrix
cv::Mat rotationMatrix;
cv::Rodrigues(rvecs[i], rotationMatrix);
// Extract Euler angles from the rotation matrix
cv::Vec3d eulerAngles;
eulerAngles[0] = atan2(rotationMatrix.at<double>(2, 1), rotationMatrix.at<double>(2, 2)); // Roll
eulerAngles[1] = atan2(-rotationMatrix.at<double>(2, 0), sqrt(pow(rotationMatrix.at<double>(2, 1), 2) + pow(rotationMatrix.at<double>(2, 2), 2))); // Pitch
eulerAngles[2] = atan2(rotationMatrix.at<double>(1, 0), rotationMatrix.at<double>(0, 0)); // Yaw
ROS_INFO_STREAM("Euler Angles (Roll, Pitch, Yaw): " << eulerAngles);
// Save the marker position
markerPositions[markerIds[i]] = cv::Point2f(tvecs[i][0], tvecs[i][1]);
}
// Check if all required markers are detected
bool allMarkersDetected = true;
for (const auto& marker : fixedMarkerPositions)
{
if (markerPositions.find(marker.first) == markerPositions.end())
{
allMarkersDetected = false;
break;
}
}
// Compute the transformation matrix if all markers are detected
if (allMarkersDetected)
{
std::vector<cv::Point2f> srcPoints;
std::vector<cv::Point3f> dstPoints;
for (const auto& marker : markerPositions)
{
if (fixedMarkerPositions.find(marker.first) != fixedMarkerPositions.end())
{
srcPoints.push_back(marker.second);
dstPoints.push_back(fixedMarkerPositions[marker.first]);
}
}
if (srcPoints.size() >= 3)
{
transformationMatrix = cv::findHomography(srcPoints, dstPoints);
calibrationDone = true;
}
}
}
else
{
ROS_INFO("No ArUco markers detected");
}
}
void frameProcessingThread(cv::VideoCapture& cap, std::map<int, cv::Point2f>& markerPositions, cv::Mat& transformationMatrix, bool& calibrationDone)
{
while (ros::ok() && !calibrationDone)
{
cv::Mat frame;
cap >> frame; // Capture a new frame
if (frame.empty())
{
ROS_ERROR("Captured empty frame");
continue;
}
processFrame(frame, markerPositions, transformationMatrix, calibrationDone);
// Display the original frame with detected markers for debugging
cv::imshow("Arena Calibration", frame);
if (cv::waitKey(30) == 27) break; // Exit on ESC key
}
}
void calibrateArena()
{
// Create ArUco marker detector
cv::Ptr<cv::aruco::Dictionary> dictionary =
cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners;
std::vector<cv::Vec3d> rvecs, tvecs;
cv::VideoCapture cap(0, cv::CAP_V4L2); // Open the default camera with V4L2 backend
if (!cap.isOpened())
{
ROS_ERROR("Unable to open camera");
return;
}
// Set the desired resolution
cap.set(cv::CAP_PROP_FRAME_WIDTH, 1920); // Set width to 1920 pixels
cap.set(cv::CAP_PROP_FRAME_HEIGHT, 1080); // Set height to 1080 pixels
// Set the desired frame rate
cap.set(cv::CAP_PROP_FPS, 15); // Set frame rate to 15 Hz
cv::Mat frame;
std::map<int, cv::Point2f> markerPositions;
cv::Mat transformationMatrix;
loadMarkerPositions("arena_config.yml");
bool calibrationDone = false;
// Start the frame processing thread
std::thread processingThread(frameProcessingThread, std::ref(cap), std::ref(markerPositions), std::ref(transformationMatrix), std::ref(calibrationDone));
processingThread.join();
if (calibrationDone)
{
// Save the transformation matrix to a configuration file
saveTransformationMatrix("arena_transformation.yml", transformationMatrix);
ROS_INFO("Arena calibration completed and transformation saved to arena_transformation.yml");
}
else
{
ROS_ERROR("Calibration failed. Not enough markers detected.");
}
}
int main(int argc, char** argv)
{
ros::init(argc, argv, "arena_calibration");
ros::NodeHandle nh;
// Load camera parameters
cv::FileStorage fs("camera_parameters.yml", cv::FileStorage::READ);
if (!fs.isOpened())
{
ROS_ERROR("Unable to open camera parameters file");
return -1;
}
fs["camera_matrix"] >> cameraMatrix;
fs["distortion_coefficients"] >> distCoeffs;
fs.release();
// Load marker positions
loadMarkerPositions("arena_config.yml");
calibrateArena();
return 0;
}

View File

@ -0,0 +1,81 @@
#include <ros/ros.h>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <vector>
int main(int argc, char** argv)
{
ros::init(argc, argv, "calibration_webcam");
ros::NodeHandle nh;
cv::Size boardSize(9, 6); // Chessboard size
std::vector<std::vector<cv::Point2f>> imagePoints;
std::vector<std::vector<cv::Point3f>> objectPoints;
std::vector<cv::Point3f> obj;
for (int i = 0; i < boardSize.height; ++i)
for (int j = 0; j < boardSize.width; ++j)
obj.push_back(cv::Point3f(j, i, 0));
cv::VideoCapture cap(0, cv::CAP_V4L2);
if (!cap.isOpened())
{
ROS_ERROR("Unable to open camera");
return -1;
}
cv::Mat frame, gray;
int frameCount = 0;
ros::Rate rate(1); // Capture 1 frame per second
while (ros::ok())
{
cap >> frame;
if (frame.empty())
{
ROS_ERROR("Captured empty frame");
continue;
}
cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
std::vector<cv::Point2f> corners;
bool found = cv::findChessboardCorners(gray, boardSize, corners,
cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FAST_CHECK | cv::CALIB_CB_NORMALIZE_IMAGE);
if (found)
{
cv::cornerSubPix(gray, corners, cv::Size(11, 11), cv::Size(-1, -1),
cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::COUNT, 30, 0.1));
cv::drawChessboardCorners(frame, boardSize, corners, found);
imagePoints.push_back(corners);
objectPoints.push_back(obj);
frameCount++;
ROS_INFO("Frame %d captured", frameCount);
}
cv::imshow("Calibration", frame);
if (cv::waitKey(30) == 27) break; // Exit on ESC key
rate.sleep(); // Sleep to maintain 1 frame per second
}
if (imagePoints.size() < 10)
{
ROS_ERROR("Not enough frames captured for calibration");
return -1;
}
cv::Mat cameraMatrix, distCoeffs;
std::vector<cv::Mat> rvecs, tvecs;
cv::calibrateCamera(objectPoints, imagePoints, gray.size(), cameraMatrix, distCoeffs, rvecs, tvecs);
ROS_INFO("Calibration successful");
ROS_INFO_STREAM("Camera Matrix: " << cameraMatrix);
ROS_INFO_STREAM("Distortion Coefficients: " << distCoeffs);
cv::FileStorage fs("camera_parameters.yml", cv::FileStorage::WRITE);
fs << "camera_matrix" << cameraMatrix;
fs << "distortion_coefficients" << distCoeffs;
fs.release();
return 0;
}