updated ppg.cpp and debuged, both with GIBERT's help and close assistance

This commit is contained in:
Nicolas TRAGLIA 2023-03-01 12:32:57 +01:00
parent 58d4612738
commit dfc3b9b40a
1 changed files with 167 additions and 158 deletions

325
ppg.cpp
View File

@ -1,158 +1,167 @@
//C++ //C++
#include <iostream> #include <iostream>
//include those opencv2 files in our program //include those opencv2 files in our program
#include "opencv2/opencv.hpp" #include "opencv2/opencv.hpp"
#include "opencv2/videoio.hpp" #include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp" #include "opencv2/highgui.hpp"
int FPS=30; //FPS variable. FPS is the framerate of your video, aka your recording device's int FPS=10; //FPS variable. FPS is the framerate of your video, aka your recording device's
int DISCARD_DURATION=5; int DISCARD_DURATION=5;
bool isDiscardData=true; bool isDiscardData=true;
int countDiscard=0; int countDiscard=0;
bool isBufferFull = false; //buffer variables to initialise before main(); bool isBufferFull = false; //buffer variables to initialise before main();
int sampleIdBuffer = 0; int sampleIdBuffer = 0;
int BUFFER_DURATION= 5; int BUFFER_DURATION= 15;
//Display normalised signal //Display normalised signal
template <typename T> template <typename T>
cv::Mat plotGraph(std::vector<T>& vals, int YRange[2]) cv::Mat plotGraph(std::vector<T>& vals, int YRange[2])
{ {
auto it = minmax_element(vals.begin(), vals.end()); auto it = minmax_element(vals.begin(), vals.end());
float scale = 1./ceil(*it.second - *it.first); float scale = 1./ceil(*it.second - *it.first);
float bias = *it.first; float bias = *it.first;
int rows = YRange[1] - YRange[0] + 1; int rows = YRange[1] - YRange[0] + 1;
cv::Mat image = 255*cv::Mat::ones( rows, vals.size(), CV_8UC3 ); cv::Mat image = 255*cv::Mat::ones( rows, vals.size(), CV_8UC3 );
image.setTo(255); image.setTo(255);
for (int i = 0; i < (int)vals.size()-1; i++) for (int i = 0; i < (int)vals.size()-1; i++)
{ {
cv::line(image, cv::Point(i, rows - 1 - (vals[i] - cv::line(image, cv::Point(i, rows - 1 - (vals[i] -
bias)*scale*YRange[1]), cv::Point(i+1, rows - 1 - (vals[i+1] - bias)*scale*YRange[1]), cv::Point(i+1, rows - 1 - (vals[i+1] -
bias)*scale*YRange[1]), cv::Scalar(255, 0, 0), 1); bias)*scale*YRange[1]), cv::Scalar(255, 0, 0), 1);
} }
return image; return image;
} }
int main(){ int main(){
//Print "PPG algorithm" to terminal //Print "PPG algorithm" to terminal
//Note to self: std::endl; returns to line in terminal; use it everytime when done printing something. //Note to self: std::endl; returns to line in terminal; use it everytime when done printing something.
std::cout << "PPG algorithm"<< std::endl; std::cout << "PPG algorithm"<< std::endl;
cv::VideoCapture cap; cv::VideoCapture cap;
cap.open(0); cap.open(0);
if (!cap.isOpened()) if (!cap.isOpened())
{ {
//Check if we can access the camera //Check if we can access the camera
std::cerr<<"[ERROR] unable to open camera!"<<std::endl; std::cerr<<"[ERROR] unable to open camera!"<<std::endl;
return -2; return -2;
} }
cv::CascadeClassifier faceDetector; cv::CascadeClassifier faceDetector;
if(!faceDetector.load("./haarcascade_frontalface_alt.xml"))//Testing to see if cascade_frontalface.xml is available (necessary for the program to work) if(!faceDetector.load("./haarcascade_frontalface_alt.xml"))//Testing to see if cascade_frontalface.xml is available (necessary for the program to work)
{ {
std::cerr<<"[ERROR] Unable to load face cascade"<<std::endl; std::cerr<<"[ERROR] Unable to load face cascade"<<std::endl;
return -1; return -1;
}; };
while (true) while (true)
{ {
if(isDiscardData) //This function delays the beginning of the analysis of the data to avoid processing frames taken during white balancing. //Create a matrix to store the image from the cam
{ cv::Mat frame;
countDiscard++; //Wait for a new frame from camera and store it into "frame"
if (countDiscard == DISCARD_DURATION*FPS) cap.read(frame);
{
isDiscardData=false; //Check if we succeeded
} if (frame.empty()) //If the camera records a blank frame, returns an error.
} {
else std::cerr<<"[ERROR] Blank frame grabbed"<<std::endl;
{ break;
//Create a matrix to store the image from the cam }
cv::Mat frame;
//Wait for a new frame from camera and store it into "frame" if(isDiscardData) //This function delays the beginning of the analysis of the data to avoid processing frames taken during white balancing.
cap.read(frame); {
countDiscard++;
//Check if we succeeded if (countDiscard == DISCARD_DURATION*FPS)
if (frame.empty()) //If the camera records a blank frame, returns an error. {
{ isDiscardData=false;
std::cerr<<"[ERROR] Blank frame grabbed"<<std::endl; }
break; }
} else
cv::imshow("Color", frame); //shows the colored frame {
if (cv::waitKey(1000.0/FPS)>=0) //Stops after 1000/FPS frames cv::Mat frame_gray;
{ cv::cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY);
break; //cv::imshow("Gray", frame_gray); //Shows frame in greyscale
} std::vector<cv::Rect> faceRectangles;
faceDetector.detectMultiScale(frame_gray, faceRectangles, 1.1, 3, 0, cv::Size(20,20)); //Detects face
cv::Mat frame_gray;
cv::cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY); if (faceRectangles.size() > 0)
cv::imshow("Gray", frame_gray); //Shows frame in greyscale cv::rectangle(frame, faceRectangles[0], cv::Scalar(0,0,255),1,1,0);
std::vector<cv::Rect> faceRectangles;
faceDetector.detectMultiScale(frame_gray, faceRectangles, 1.1, 3, 0, cv::Size(20,20)); //Detects face cv::Rect foreheadROI; //create a forehead ROI equal to the face ROI slightly moved upward.
if (faceRectangles.size() > 0)
cv::Rect foreheadROI; //create a forehead ROI equal to the face ROI slightly moved upward. {
foreheadROI = faceRectangles[0]; foreheadROI = faceRectangles[0];
foreheadROI.height *= 0.3; foreheadROI.height *= 0.3;
cv::Mat frame_forehead = frame(foreheadROI); cv::Mat frame_forehead = frame(foreheadROI);
cv::Scalar avg_forehead = mean(frame_forehead); //calculates mean of object frame_forehead cv::Scalar avg_forehead = mean(frame_forehead); //calculates mean of object frame_forehead
//Buffer of average value for the green channel over the forehead ROI
cv::Mat greenSignal(1, FPS*BUFFER_DURATION, CV_64F); //Buffer of average value for the green channel over the forehead ROI
if (!isBufferFull) cv::Mat greenSignal(1, FPS*BUFFER_DURATION, CV_64F);
{ if (!isBufferFull)
greenSignal.at<double>(0, sampleIdBuffer) = avg_forehead[1] ; {
sampleIdBuffer++; std::cout << "sampleIdBuffer= " << sampleIdBuffer << " / " << FPS*BUFFER_DURATION << std::endl;
if (sampleIdBuffer == FPS*BUFFER_DURATION) greenSignal.at<double>(0, sampleIdBuffer) = avg_forehead[1] ;
{ sampleIdBuffer++;
isBufferFull = true; if (sampleIdBuffer == FPS*BUFFER_DURATION)
} {
} isBufferFull = true;
std::cout<<"greenSignal= "<<greenSignal<<std::endl;
//Normalisation of our signal }
std::vector<double> greenSignalNormalized; }
cv::Scalar mean, stddev; else
cv::meanStdDev(greenSignal, mean, stddev); {
for (int l_sample=0; l_sample < FPS*BUFFER_DURATION; l_sample++)
{ //Normalisation of our signal
greenSignalNormalized.push_back((greenSignal.at<double>(0, l_sample)-mean[0])/stddev[0]); std::vector<double> greenSignalNormalized;
} cv::Scalar mean, stddev;
//This is used in the main function to display the signal cv::meanStdDev(greenSignal, mean, stddev);
int range[2] = {0, (int)(FPS*BUFFER_DURATION)}; for (int l_sample=0; l_sample < FPS*BUFFER_DURATION; l_sample++)
cv::imshow("green", plotGraph(greenSignalNormalized, range)); {
greenSignalNormalized.push_back((greenSignal.at<double>(0, l_sample)-mean[0])/stddev[0]);
cv::Mat greenFFT; }
std::vector<double> greenFFTModule; //This is used in the main function to display the signal
cv::dft(greenSignalNormalized,greenFFT,cv::DFT_ROWS|cv::DFT_COMPLEX_OUTPUT); int range[2] = {0, (int)(FPS*BUFFER_DURATION)};
cv::Mat planes[] = {cv::Mat::zeros(greenSignalNormalized.size(),1, CV_64F), cv::imshow("green", plotGraph(greenSignalNormalized, range));
cv::Mat::zeros(greenSignalNormalized.size(),1, CV_64F)};
cv::split(greenFFT, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I)) cv::Mat greenFFT; //Fast Fourier Transform
greenFFTModule.clear(); std::vector<double> greenFFTModule;
for (int l=0; l < planes[1].cols; l++) cv::dft(greenSignalNormalized,greenFFT,cv::DFT_ROWS|cv::DFT_COMPLEX_OUTPUT);
{ cv::Mat planes[] = {cv::Mat::zeros(greenSignalNormalized.size(),1, CV_64F),
double moduleFFT = pow(planes[1].at<double>(0,l),2) + cv::Mat::zeros(greenSignalNormalized.size(),1, CV_64F)};
pow(planes[0].at<double>(0,l),2); cv::split(greenFFT, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
greenFFTModule.push_back(sqrt(moduleFFT)); greenFFTModule.clear();
} for (int l=0; l < planes[1].cols; l++)
// display green FFT {
cv::imshow("FFT module green", plotGraph(greenFFTModule, range)); double moduleFFT = pow(planes[1].at<double>(0,l),2) +
pow(planes[0].at<double>(0,l),2);
std::vector<double> sampleVector{}; greenFFTModule.push_back(sqrt(moduleFFT));
for (int i=0; i<greenFFTModule.length();++i) }
{ // display green FFT
if (greenFFTModule.at(i)>=0.5 && greenFFTModule.at(i)<=4) cv::imshow("FFT module green", plotGraph(greenFFTModule, range));
{
sampleVector.push_back(greenFFTModule.at(i)); float maxValue=-1;
} int indexValue=0;
} for(auto i=0.5*(BUFFER_DURATION);i<(4*BUFFER_DURATION);i++)
std::cout << "values in interval: "<<"\n"; {
for (auto i: sampleVector()) if(greenFFTModule[i]>maxValue)
{ {
std::cout << i << ' '; // will print vector's content maxValue=greenFFTModule[i];
} indexValue=i;
//get maximum value of sampleVector and print it }
std::cout<<"max frequency: "<<"\n"<<max_element(sampleVector.begin(cloud), sampleVector.end(cloud)); }
return 0; float HRBPM=(indexValue*60.0)/(BUFFER_DURATION);
} std::cout<<HRBPM << std::endl;
} }
} }
}
cv::imshow("Color", frame); //shows the colored frame
if (cv::waitKey(1000.0/FPS)>=0) //Stops after 1000/FPS frames
{
break;
}
}
}