340 lines
11 KiB
C++
340 lines
11 KiB
C++
#include <ros/ros.h>
|
|
#include <opencv2/aruco.hpp>
|
|
#include <opencv2/highgui/highgui.hpp>
|
|
#include <opencv2/imgproc/imgproc.hpp>
|
|
#include <opencv2/calib3d.hpp>
|
|
#include <cv_bridge/cv_bridge.h>
|
|
#include <vector>
|
|
#include <thread>
|
|
#include <yaml-cpp/yaml.h>
|
|
#include <fstream>
|
|
|
|
cv::Mat cameraMatrix, distCoeffs;
|
|
std::map<int, cv::Point3f> fixedMarkerPositions;
|
|
|
|
void saveMarkerPositions(const std::string& filename, const std::map<int, cv::Point2f>& markerPositions)
|
|
{
|
|
YAML::Node config;
|
|
for (const auto& marker : markerPositions)
|
|
{
|
|
YAML::Node markerNode;
|
|
markerNode["id"] = marker.first;
|
|
markerNode["position"] = std::vector<float>{marker.second.x, marker.second.y};
|
|
config["markers"].push_back(markerNode);
|
|
}
|
|
std::ofstream fout(filename);
|
|
fout << config;
|
|
}
|
|
|
|
void saveTransformationMatrix(const std::string& filename, const cv::Mat& transformationMatrix)
|
|
{
|
|
cv::FileStorage fs(filename, cv::FileStorage::WRITE);
|
|
fs << "transformation_matrix" << transformationMatrix;
|
|
fs.release();
|
|
}
|
|
|
|
cv::Mat gammaCorrection(const cv::Mat& image, double gamma = 0.4)
|
|
{
|
|
cv::Mat lookUpTable(1, 256, CV_8U);
|
|
uchar* p = lookUpTable.ptr();
|
|
for (int i = 0; i < 256; ++i)
|
|
p[i] = cv::saturate_cast<uchar>(pow(i / 255.0, 1.0 / gamma) * 255.0);
|
|
|
|
cv::Mat corrected;
|
|
cv::LUT(image, lookUpTable, corrected);
|
|
return corrected;
|
|
}
|
|
|
|
cv::Mat upscaleImage(const cv::Mat& image, double scaleFactor = 3.0)
|
|
{
|
|
cv::Mat upscaled;
|
|
cv::resize(image, upscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR);
|
|
return upscaled;
|
|
}
|
|
|
|
cv::Mat downscaleImage(const cv::Mat& image, double scaleFactor)
|
|
{
|
|
if (scaleFactor <= 0)
|
|
throw std::invalid_argument("Scale factor must be greater than 0.");
|
|
cv::Mat downscaled;
|
|
cv::resize(image, downscaled, cv::Size(), scaleFactor, scaleFactor, cv::INTER_LINEAR);
|
|
return downscaled;
|
|
}
|
|
|
|
cv::Mat sharpenImage(const cv::Mat& image)
|
|
{
|
|
cv::Mat sharpeningKernel = (cv::Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
|
|
cv::Mat sharpened;
|
|
cv::filter2D(image, sharpened, -1, sharpeningKernel);
|
|
return sharpened;
|
|
}
|
|
|
|
cv::Mat reduceNoise(const cv::Mat& image)
|
|
{
|
|
cv::Mat denoised;
|
|
cv::GaussianBlur(image, denoised, cv::Size(5, 5), 0);
|
|
return denoised;
|
|
}
|
|
|
|
cv::Mat enhanceImage(const cv::Mat& image)
|
|
{
|
|
// Apply gamma correction
|
|
cv::Mat enhanced = gammaCorrection(image);
|
|
|
|
// Sharpen
|
|
enhanced = sharpenImage(enhanced);
|
|
|
|
// Upscale
|
|
enhanced = upscaleImage(enhanced, 3.0);
|
|
|
|
// Sharpen again after upscaling
|
|
enhanced = sharpenImage(enhanced);
|
|
|
|
// Reduce noise
|
|
enhanced = reduceNoise(enhanced);
|
|
|
|
return enhanced;
|
|
}
|
|
|
|
void loadMarkerPositions(const std::string& filename)
|
|
{
|
|
try
|
|
{
|
|
YAML::Node config = YAML::LoadFile(filename);
|
|
if (!config["markers"])
|
|
{
|
|
throw std::runtime_error("No 'markers' node found in the configuration file.");
|
|
}
|
|
for (const auto& marker : config["markers"])
|
|
{
|
|
int id = marker["id"].as<int>();
|
|
std::vector<float> pos = marker["position"].as<std::vector<float>>();
|
|
fixedMarkerPositions[id] = cv::Point3f(pos[0], pos[1], pos[2]);
|
|
}
|
|
}
|
|
catch (const YAML::BadFile& e)
|
|
{
|
|
ROS_ERROR("Failed to load marker positions: %s", e.what());
|
|
}
|
|
catch (const std::runtime_error& e)
|
|
{
|
|
ROS_ERROR("Error in configuration file: %s", e.what());
|
|
}
|
|
catch (const std::exception& e)
|
|
{
|
|
ROS_ERROR("Unexpected error: %s", e.what());
|
|
}
|
|
}
|
|
|
|
void processFrame(cv::Mat& frame, std::map<int, cv::Point2f>& markerPositions, cv::Mat& transformationMatrix, bool& calibrationDone)
|
|
{
|
|
// Apply image processing steps
|
|
cv::Mat processedFrame = enhanceImage(frame);
|
|
|
|
// Create ArUco marker detector with modified parameters
|
|
cv::Ptr<cv::aruco::DetectorParameters> parameters = cv::aruco::DetectorParameters::create();
|
|
parameters->adaptiveThreshWinSizeMin = 3;
|
|
parameters->adaptiveThreshWinSizeMax = 23;
|
|
parameters->adaptiveThreshWinSizeStep = 5;
|
|
parameters->minMarkerPerimeterRate = 0.005;
|
|
parameters->maxMarkerPerimeterRate = 3.0;
|
|
parameters->perspectiveRemoveIgnoredMarginPerCell = 0.05;
|
|
parameters->cornerRefinementMethod = cv::aruco::CORNER_REFINE_SUBPIX;
|
|
parameters->polygonalApproxAccuracyRate = 0.025;
|
|
|
|
cv::Ptr<cv::aruco::Dictionary> dictionary =
|
|
cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
|
|
std::vector<int> markerIds;
|
|
std::vector<std::vector<cv::Point2f>> markerCorners;
|
|
std::vector<cv::Vec3d> rvecs, tvecs;
|
|
|
|
// Detect markers in the image
|
|
cv::aruco::detectMarkers(processedFrame, dictionary, markerCorners, markerIds, parameters);
|
|
|
|
// If no markers are detected, try multi-scale detection
|
|
if (markerIds.empty())
|
|
{
|
|
for (double scale = 0.5; scale <= 2.0; scale += 0.5)
|
|
{
|
|
cv::Mat scaledFrame;
|
|
cv::resize(processedFrame, scaledFrame, cv::Size(), scale, scale);
|
|
cv::aruco::detectMarkers(scaledFrame, dictionary, markerCorners, markerIds, parameters);
|
|
if (!markerIds.empty())
|
|
{
|
|
// Scale back the marker corners to the original frame size
|
|
for (auto& corners : markerCorners)
|
|
{
|
|
for (auto& corner : corners)
|
|
{
|
|
corner /= scale;
|
|
}
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Draw detected markers
|
|
if (!markerIds.empty())
|
|
{
|
|
cv::aruco::drawDetectedMarkers(frame, markerCorners, markerIds);
|
|
cv::aruco::estimatePoseSingleMarkers(markerCorners, 0.05, cameraMatrix, distCoeffs, rvecs, tvecs);
|
|
ROS_INFO("Detected %zu ArUco markers", markerIds.size());
|
|
|
|
for (size_t i = 0; i < markerIds.size(); ++i)
|
|
{
|
|
ROS_INFO("Marker ID: %d", markerIds[i]);
|
|
cv::aruco::drawAxis(frame, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 0.1);
|
|
|
|
// Print the rotation and translation vectors
|
|
ROS_INFO_STREAM("Rotation Vector: " << rvecs[i]);
|
|
ROS_INFO_STREAM("Translation Vector: " << tvecs[i]);
|
|
|
|
// Calculate the distance from the camera to the marker
|
|
double distance = cv::norm(tvecs[i]);
|
|
ROS_INFO("Distance from camera: %f meters", distance);
|
|
|
|
// Convert rotation vector to rotation matrix
|
|
cv::Mat rotationMatrix;
|
|
cv::Rodrigues(rvecs[i], rotationMatrix);
|
|
|
|
// Extract Euler angles from the rotation matrix
|
|
cv::Vec3d eulerAngles;
|
|
eulerAngles[0] = atan2(rotationMatrix.at<double>(2, 1), rotationMatrix.at<double>(2, 2)); // Roll
|
|
eulerAngles[1] = atan2(-rotationMatrix.at<double>(2, 0), sqrt(pow(rotationMatrix.at<double>(2, 1), 2) + pow(rotationMatrix.at<double>(2, 2), 2))); // Pitch
|
|
eulerAngles[2] = atan2(rotationMatrix.at<double>(1, 0), rotationMatrix.at<double>(0, 0)); // Yaw
|
|
|
|
ROS_INFO_STREAM("Euler Angles (Roll, Pitch, Yaw): " << eulerAngles);
|
|
|
|
// Save the marker position
|
|
markerPositions[markerIds[i]] = cv::Point2f(tvecs[i][0], tvecs[i][1]);
|
|
}
|
|
|
|
// Check if all required markers are detected
|
|
bool allMarkersDetected = true;
|
|
for (const auto& marker : fixedMarkerPositions)
|
|
{
|
|
if (markerPositions.find(marker.first) == markerPositions.end())
|
|
{
|
|
allMarkersDetected = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Compute the transformation matrix if all markers are detected
|
|
if (allMarkersDetected)
|
|
{
|
|
std::vector<cv::Point2f> srcPoints;
|
|
std::vector<cv::Point3f> dstPoints;
|
|
for (const auto& marker : markerPositions)
|
|
{
|
|
if (fixedMarkerPositions.find(marker.first) != fixedMarkerPositions.end())
|
|
{
|
|
srcPoints.push_back(marker.second);
|
|
dstPoints.push_back(fixedMarkerPositions[marker.first]);
|
|
}
|
|
}
|
|
if (srcPoints.size() >= 3)
|
|
{
|
|
transformationMatrix = cv::findHomography(srcPoints, dstPoints);
|
|
calibrationDone = true;
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
ROS_INFO("No ArUco markers detected");
|
|
}
|
|
}
|
|
|
|
void frameProcessingThread(cv::VideoCapture& cap, std::map<int, cv::Point2f>& markerPositions, cv::Mat& transformationMatrix, bool& calibrationDone)
|
|
{
|
|
while (ros::ok() && !calibrationDone)
|
|
{
|
|
cv::Mat frame;
|
|
cap >> frame; // Capture a new frame
|
|
if (frame.empty())
|
|
{
|
|
ROS_ERROR("Captured empty frame");
|
|
continue;
|
|
}
|
|
|
|
processFrame(frame, markerPositions, transformationMatrix, calibrationDone);
|
|
|
|
// Display the original frame with detected markers for debugging
|
|
cv::imshow("Arena Calibration", frame);
|
|
if (cv::waitKey(30) == 27) break; // Exit on ESC key
|
|
}
|
|
}
|
|
|
|
void calibrateArena()
|
|
{
|
|
// Create ArUco marker detector
|
|
cv::Ptr<cv::aruco::Dictionary> dictionary =
|
|
cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
|
|
std::vector<int> markerIds;
|
|
std::vector<std::vector<cv::Point2f>> markerCorners;
|
|
std::vector<cv::Vec3d> rvecs, tvecs;
|
|
|
|
cv::VideoCapture cap(0, cv::CAP_V4L2); // Open the default camera with V4L2 backend
|
|
if (!cap.isOpened())
|
|
{
|
|
ROS_ERROR("Unable to open camera");
|
|
return;
|
|
}
|
|
|
|
// Set the desired resolution
|
|
cap.set(cv::CAP_PROP_FRAME_WIDTH, 1920); // Set width to 1920 pixels
|
|
cap.set(cv::CAP_PROP_FRAME_HEIGHT, 1080); // Set height to 1080 pixels
|
|
|
|
// Set the desired frame rate
|
|
cap.set(cv::CAP_PROP_FPS, 15); // Set frame rate to 15 Hz
|
|
|
|
cv::Mat frame;
|
|
std::map<int, cv::Point2f> markerPositions;
|
|
cv::Mat transformationMatrix;
|
|
|
|
loadMarkerPositions("arena_config.yml");
|
|
|
|
bool calibrationDone = false;
|
|
|
|
// Start the frame processing thread
|
|
std::thread processingThread(frameProcessingThread, std::ref(cap), std::ref(markerPositions), std::ref(transformationMatrix), std::ref(calibrationDone));
|
|
|
|
processingThread.join();
|
|
|
|
if (calibrationDone)
|
|
{
|
|
// Save the transformation matrix to a configuration file
|
|
saveTransformationMatrix("arena_transformation.yml", transformationMatrix);
|
|
ROS_INFO("Arena calibration completed and transformation saved to arena_transformation.yml");
|
|
}
|
|
else
|
|
{
|
|
ROS_ERROR("Calibration failed. Not enough markers detected.");
|
|
}
|
|
}
|
|
|
|
int main(int argc, char** argv)
|
|
{
|
|
ros::init(argc, argv, "arena_calibration");
|
|
ros::NodeHandle nh;
|
|
|
|
// Load camera parameters
|
|
cv::FileStorage fs("camera_parameters.yml", cv::FileStorage::READ);
|
|
if (!fs.isOpened())
|
|
{
|
|
ROS_ERROR("Unable to open camera parameters file");
|
|
return -1;
|
|
}
|
|
fs["camera_matrix"] >> cameraMatrix;
|
|
fs["distortion_coefficients"] >> distCoeffs;
|
|
fs.release();
|
|
|
|
// Load marker positions
|
|
loadMarkerPositions("arena_config.yml");
|
|
|
|
calibrateArena();
|
|
|
|
return 0;
|
|
} |