SignalLab1/ppg.cpp

125 lines
3.7 KiB
C++

//C++
#include <iostream>
//include those opencv2 files in our program
#include "opencv2/opencv.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
int FPS=30; //FPS variable. FPS is the framerate of your video, aka your recording device's
int DISCARD_DURATION=5;
bool isDiscardData=true;
int countDiscard=0;
bool isBufferFull = false; //buffer variables to initialise before main();
int sampleIdBuffer = 0;
int BUFFER_DURATION= 5;
int main(){
//Print "PPG algorithm" to terminal
//Note to self: std::endl; returns to line in terminal; use it everytime when done printing something.
std::cout << "PPG algorithm"<< std::endl;
cv::VideoCapture cap;
cap.open(0);
if (!cap.isOpened())
{
//Check if we can access the camera
std::cerr<<"[ERROR] unable to open camera!"<<std::endl;
return -2;
}
cv::CascadeClassifier faceDetector;
if(!faceDetector.load("./haarcascade_frontalface_alt.xml"))//Testing to see if cascade_frontalface.xml is available (necessary for the program to work)
{
std::cerr<<"[ERROR] Unable to load face cascade"<<std::endl;
return -1;
};
while (true)
{
if(isDiscardData) //This function delays the beginning of the analysis of the data to avoid processing frames taken during white balancing.
{
countDiscard++;
if (countDiscard == DISCARD_DURATION*FPS)
{
isDiscardData=false;
}
}
else
{
//Create a matrix to store the image from the cam
cv::Mat frame;
//Wait for a new frame from camera and store it into "frame"
cap.read(frame);
//Check if we succeeded
if (frame.empty()) //If the camera records a blank frame, returns an error.
{
std::cerr<<"[ERROR] Blank frame grabbed"<<std::endl;
break;
}
cv::imshow("Color", frame); //shows the colored frame
if (cv::waitKey(1000.0/FPS)>=0) //Stops after 1000/FPS frames
{
break;
}
cv::Mat frame_gray;
cv::cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY);
cv::imshow("Gray", frame_gray); //Shows frame in greyscale
std::vector<cv::Rect> faceRectangles;
faceDetector.detectMultiScale(frame_gray, faceRectangles, 1.1, 3, 0, cv::Size(20,20)); //Detects face
cv::Rect foreheadROI; //create a forehead ROI equal to the face ROI slightly moved upward.
foreheadROI = faceRectangles[0];
foreheadROI.height *= 0.3;
cv::Mat frame_forehead = frame(foreheadROI);
cv::Scalar avg_forehead = mean(frame_forehead); //calculates mean of object frame_forehead
//Buffer of average value for the green channel over the forehead ROI
cv::Mat greenSignal(1, FPS*BUFFER_DURATION, CV_64F);
if (!isBufferFull)
{
greenSignal.at<double>(0, sampleIdBuffer) = avg_forehead[1] ;
sampleIdBuffer++;
if (sampleIdBuffer == FPS*BUFFER_DURATION)
{
isBufferFull = true;
}
}
//Normalisation of our signal
std::vector<double> greenSignalNormalized;
cv::Scalar mean, stddev;
cv::meanStdDev(greenSignal, mean, stddev);
for (int l_sample=0; l_sample < FPS*BUFFER_DURATION; l_sample++)
{
greenSignalNormalized.push_back((greenSignal.at<double>(0, l_sample)-mean[0])/stddev[0])
}
//Display normalised signal
template <typename T>
cv::Mat plotGraph(std::vector<T>& vals, int YRange[2])
{
auto it = minmax_element(vals.begin(), vals.end());
float scale = 1./ceil(*it.second - *it.first);
float bias = *it.first;
int rows = YRange[1] - YRange[0] + 1;
cv::Mat image = 255*cv::Mat::ones( rows, vals.size(), CV_8UC3 );
image.setTo(255);
for (int i = 0; i < (int)vals.size()-1; i++)
{
cv::line(image, cv::Point(i, rows - 1 - (vals[i] -
bias)*scale*YRange[1]), cv::Point(i+1, rows - 1 - (vals[i+1] -
bias)*scale*YRange[1]), cv::Scalar(255, 0, 0), 1);
}
return image;
}
}
}
}