From 85f955320c46ecb63ede3eac86f6c9a05bb52674 Mon Sep 17 00:00:00 2001 From: Alexandre Date: Sun, 19 Feb 2023 17:42:56 +0100 Subject: [PATCH] Change in the Program --- Normalization.m | 0 Numerization2.m | 34 ++++++++++++++ ProgrammingForFaceDetection.py | 26 ----------- TotalCode.py | 84 ++++++++++++++++++++++++++++++++++ 4 files changed, 118 insertions(+), 26 deletions(-) delete mode 100644 Normalization.m create mode 100644 Numerization2.m delete mode 100644 ProgrammingForFaceDetection.py create mode 100644 TotalCode.py diff --git a/Normalization.m b/Normalization.m deleted file mode 100644 index e69de29..0000000 diff --git a/Numerization2.m b/Numerization2.m new file mode 100644 index 0000000..170a315 --- /dev/null +++ b/Numerization2.m @@ -0,0 +1,34 @@ +clear; +close all; +clc; +pkg load io; + +% sample frequency (Hz) +fps = 30; + +%Load the data inside Octave from our first code +data = load("dataRGB"); +greenchannel = data(:,2); +n = data(:,2); + +%Numerization of the values +greenchannel_avg = mean(greenchannel); +greenchannel_std = std(greenchannel); +greenchannel_normalized = (greenchannel - greenchannel_avg)/greenchannel_std; + +%Fast Fourier Transform +y = fft(greenchannel_normalized); + +%number of samples +n = length(greenchannel_normalized); + +% frequency range + +fr = (0:n-1)*(fps/n); +power = abs(y).^2/n; + +figure(2) +plot(fr, power,'linewidth',3); +xlabel('Frequency') +ylabel('Power') +xlim([0.75 4]) \ No newline at end of file diff --git a/ProgrammingForFaceDetection.py b/ProgrammingForFaceDetection.py deleted file mode 100644 index 83e9afc..0000000 --- a/ProgrammingForFaceDetection.py +++ /dev/null @@ -1,26 +0,0 @@ -# Programmin for Face detection -# Haar Cascade -import cv2 - -# Charger le classificateur Haar Cascade -face_cascade = cv2.CascadeClassifier("Haar_Cascade.xml") - -# Charger l'image dans OpenCV -# Convertir l'image en niveaux de gris -img = cv2.imread("Image.jpg") -gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) -# Just to check if there is a photo -if gray_img.shape[0] == 0 or gray_img.shape[1] == 0: - print("Error: input image is empty") - -# Détection des visages dans l'image -faces = face_cascade.detectMultiScale( - gray_img, scaleFactor=1.1, minNeighbors=5) - -# Dessiner un rectangle autour de chaque visage détecté -for x, y, w, h in faces: - img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3) - -# Afficher l'image -cv2.imshow("Faces", img) -cv2.waitKey(0) diff --git a/TotalCode.py b/TotalCode.py new file mode 100644 index 0000000..88e6849 --- /dev/null +++ b/TotalCode.py @@ -0,0 +1,84 @@ +import cv2 +import numpy as np +import matplotlib.pyplot as plt + +# Load video using OpenCV +video = cv2.VideoCapture("Sophia.mp4") + +# Get video information +fps = video.get(cv2.CAP_PROP_FPS) +num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + +# Load face detection model +face_detector = cv2.CascadeClassifier("Haar_Cascade.xml") + +# Lists to store information for each frame +frame_matrices = [] +average_rgb = [] + +# Loop through frames +for i in range(num_frames): + ret, frame = video.read() + + # Check if frame was successfully retrieved + if not ret: + break + + # Skip frames to get 15 frames per second + if i % int(fps/15) != 0: + continue + + # Convert to grayscale + gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + + # Detect faces + face = face_detector.detectMultiScale( + gray_frame, scaleFactor=1.1, minNeighbors=5) + # print(face) + + # Draw a rectangle on each detected face and if face is detected store information + if len(face) > 0: + x, y, w, h = face[0] + face = frame[y:y+h, x:x+w] + frame = cv2.rectangle(frame, (int(x*1.15), int(y*1)), + (x + int(w*0.7), y + int(h*0.2)), (0, 255, 0), 3) + # Show image to check if the rectangle is well positioned on the forehead + # cv2.imshow("Faces", frame) + # cv2.waitKey(0) + + # Split into RGB channels + b, g, r = cv2.split(face) + + # Calculate average on each channel + avg_b = np.mean(b) / 255 + avg_g = np.mean(g) / 255 + avg_r = np.mean(r) / 255 + + # Add to list + average_rgb.append([avg_b, avg_g, avg_r]) + # frame_matrices.append(face) + print(average_rgb) + # print(frame_matrices) + +# Convert to numpy array +average_rgb = np.array(average_rgb) +print(average_rgb) +np.savetxt("dataRGB", average_rgb) +# Get number of frames +num_frames = average_rgb.shape[0] + +# Create an array of frame numbers +frame_numbers = np.arange(num_frames) + +# Plot the line graph +plt.plot(frame_numbers, average_rgb[:, 0], 'b', label='Blue') +plt.plot(frame_numbers, average_rgb[:, 1], 'g', label='Green') +plt.plot(frame_numbers, average_rgb[:, 2], 'r', label='Red') + +# Add labels and legend +plt.xlabel('Frame Number') +plt.ylabel('Normalized Average RGB Value') +plt.legend(loc='upper right') + +# Show the plot +plt.show()