SignalProjectFaceDetection/CompletedCode.py

75 lines
1.8 KiB
Python

import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load video using OpenCV
video = cv2.VideoCapture("PPG_Programming.mp4")
# Get video information
fps = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
# Load face detection model
face_detector = cv2.CascadeClassifier("Haar_Cascade.xml")
# Lists to store information for each frame
frame_matrices = []
average_rgb = []
# Loop through frames
for i in range(num_frames):
ret, frame = video.read()
# Check if frame was successfully retrieved
if not ret:
break
# Skip frames to get 15 frames per second
if i % int(fps/15) != 0:
continue
# Convert to grayscale
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_detector.detectMultiScale(gray_frame, 1.3, 5)
# If face is detected, store information
if len(faces) > 0:
x, y, w, h = faces[0]
face = frame[y:y+h, x:x+w]
# Split into RGB channels
b, g, r = cv2.split(face)
# Calculate average on each channel
avg_b = np.mean(b) / 255
avg_g = np.mean(g) / 255
avg_r = np.mean(r) / 255
# Add to list
average_rgb.append([avg_b, avg_g, avg_r])
frame_matrices.append(face)
# Convert to numpy array
average_rgb = np.array(average_rgb)
# Get number of frames
num_frames = average_rgb.shape[0]
# Create an array of frame numbers
frame_numbers = np.arange(num_frames)
# Plot the line graph
plt.plot(frame_numbers, average_rgb[:, 0], 'b', label='Blue')
plt.plot(frame_numbers, average_rgb[:, 1], 'g', label='Green')
plt.plot(frame_numbers, average_rgb[:, 2], 'r', label='Red')
# Add labels and legend
plt.xlabel('Frame Number')
plt.ylabel('Normalized Average RGB Value')
plt.legend(loc='upper right')
# Show the plot
plt.show()