diff --git a/.idea/Identikit-Robot.iml b/.idea/Identikit-Robot.iml
index 8d90bee..7c91e7f 100644
--- a/.idea/Identikit-Robot.iml
+++ b/.idea/Identikit-Robot.iml
@@ -2,7 +2,7 @@
-
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
index 933f283..1260ee9 100644
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/Identikit-Robot/.idea/workspace.xml b/Identikit-Robot/.idea/workspace.xml
deleted file mode 100644
index d3fcf3b..0000000
--- a/Identikit-Robot/.idea/workspace.xml
+++ /dev/null
@@ -1,236 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 1697549245784
-
-
- 1697549245784
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/ROI_face.py b/ROI_face.py
new file mode 100644
index 0000000..01cdb8b
--- /dev/null
+++ b/ROI_face.py
@@ -0,0 +1,33 @@
+import cv2
+import numpy as np
+
+def roi(frame):
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+ blurred = cv2.GaussianBlur(gray, (3, 3), 0)
+ height, width = blurred.shape
+ face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
+ faces = face_cascade.detectMultiScale(blurred, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))
+
+ for (x, y, w, h) in faces:
+ c_x = int(x + w / 2)
+ c_y = int(y + h / 2)
+
+ c_x2 = int((int(c_x - w / 2 * 0.8) + int(c_x + w / 2 * 0.8)) / 2)
+ c_y2 = int((int(c_y - h / 2 * 1.4) + int(c_y + h / 2 * 1.1)) / 2)
+
+ center_ellipse = (c_x2, c_y2)
+ axes = (int(w / 2 * 1), int(h / 2 * 1.4)) # Major axis, Minor axis
+ angle = 0 # Rotation angle
+ color = (255, 255, 255) # Color in BGR format (white)
+ # thickness = 2
+ mask = np.zeros((height, width), dtype=np.uint8)
+ cv2.ellipse(mask, center_ellipse, axes, angle, 0, 360, 255, -1) # 255 for a white oval
+ # Apply the inverted mask to the face
+ result = cv2.bitwise_and(frame, frame, mask=mask)
+
+ #black_mask = (result[:, :, 0] == 0) & (result[:, :, 1] == 0) & (result[:, :, 2] == 0)
+ #print(result[c_x2, c_y2,1])
+ # Replace black pixels with white pixels
+ #result[black_mask] = [result[10, 10, 0], result[10, 10, 1], result[10, 10, 2]]
+
+ return result
diff --git a/conditions.png b/conditions.png
new file mode 100644
index 0000000..f763b65
Binary files /dev/null and b/conditions.png differ
diff --git a/get_head.py b/get_head.py
index bf061e1..1ad0ab8 100644
--- a/get_head.py
+++ b/get_head.py
@@ -1,47 +1,37 @@
import cv2
-import numpy as np
-
-# Démarrez la capture vidéo depuis la webcam
+import time
+cap = cv2.VideoCapture(0)
def get_image():
- cap = cv2.VideoCapture(0)
- count = 0
- condition = False
- while condition == False:
+ # Set the window name
+ window_name = 'Camera Capture'
+
+ # Get the default frames per second (fps) of the camera
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
+
+ # Set the countdown duration in seconds
+ countdown_duration = 10
+ # Start the countdown
+ for countdown in range(countdown_duration, 10, -1):
ret, frame = cap.read()
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- blurred = cv2.GaussianBlur(gray, (3, 3), 0)
- height, width = blurred.shape
- face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
- faces = face_cascade.detectMultiScale(blurred, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))
- color_cicle = (0, 0, 255)
- for (x, y, w, h) in faces:
- if (x + w // 2 ) - int(width / 2) <= 10 and (x + w // 2 ) - int(width / 2) >= -10:
- if (y + h // 2) - int(height / 2) <= 10 and (y + h // 2) - int(height / 2) >= -10:
- if (w + h)//4 >= 140 and (w + h)//4 <= 160:
- color_cicle = (0, 255, 0)
- count += 1
- else:
- count = 0
- else:
- count = 0
- else:
- count = 0
+ # Display the camera feed
+ cv2.imshow(window_name, frame)
+ # Display the countdown on the image
+ font = cv2.FONT_HERSHEY_SIMPLEX
+ cv2.putText(frame, str(countdown)[0], (10, 30), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
+ cv2.imshow(window_name, frame)
- if count == 5:
- print("take picture here")
- image = gray
- condition = True
- return image
- cv2.circle(frame, (x + w // 2, y + h // 2), (w + h) // 4, color_cicle, 5)
- cv2.circle(frame, (int(width / 2), int(height / 2)), 150, (255, 255, 255), 2)
-
- cv2.imshow('Head Detection', frame)
- if cv2.waitKey(1) & 0xFF == ord('q'):
+ # Wait for 1 second and check for key press
+ if cv2.waitKey(100) & 0xFF == 27: # 27 is the ASCII code for the 'ESC' key
break
- cap.release()
- cv2.destroyAllWindows()
+ # Take a picture at the end of the countdown
+ ret, frame = cap.read()
+ #cv2.imwrite("image.png", frame)
+ # Release the camera and close the OpenCV window
+ cap.release()
+ cv2
+ return frame
diff --git a/image.jpg b/image.jpg
new file mode 100644
index 0000000..1289e65
Binary files /dev/null and b/image.jpg differ
diff --git a/image.png b/image.png
new file mode 100644
index 0000000..0ca0111
Binary files /dev/null and b/image.png differ
diff --git a/img.jpg b/img.jpg
deleted file mode 100644
index d9839ad..0000000
Binary files a/img.jpg and /dev/null differ
diff --git a/main.py b/main.py
index 4aa3afe..42190c8 100644
--- a/main.py
+++ b/main.py
@@ -1,12 +1,14 @@
import cv2
-
+import show_img_conditions
+import ROI_face
import get_head
+#show_img_conditions.show()
gray = get_head.get_image()
-
cv2.imshow('Grey Image', gray)
-
-edges = cv2.Canny(gray, 10, 100)
+face = ROI_face.roi(gray)
+cv2.imshow('Face', face)
+edges = cv2.Canny(face, 10, 100)
cv2.imshow('Image avec Contours de Visage', edges)
cv2.waitKey(0)
diff --git a/show_img_conditions.py b/show_img_conditions.py
new file mode 100644
index 0000000..91738cf
--- /dev/null
+++ b/show_img_conditions.py
@@ -0,0 +1,11 @@
+import cv2
+import time
+
+# Load an image
+image_path = "conditions.png" # Replace with the path to your image
+img = cv2.imread(image_path)
+
+def show():
+ cv2.imshow('Image', img)
+ cv2.waitKey(5000)
+ cv2.destroyAllWindows()
diff --git a/test b/test
deleted file mode 100644
index 5d3bb78..0000000
--- a/test
+++ /dev/null
@@ -1,145 +0,0 @@
-#include
-#include
-
-void ChatterCallback()
-{
- ROS_INFO("I heard : [%s]", msg.data.c_str());
-}
-
-int main( int argc, char **argv)
-{
- ros::init(argc, argv, "subcriber");
- ros::NodeHandle nh;
- ros::Subsciber subscriber=nh.subscribe("chatter", 1, ChatterCallback);
- ros::spin();
- return 0;
-}
-
-
-#include
-#include
-
-int main( int argc, char **argv )
-{
- ros::init(argc, argv, "Publisher");
- ros::NodeHandle nh;
- ros::Publisher publisher = nh.advertise("chatter",1);
- ros::Rate loopRate(10);
- while(ros::ok())
- {
- std_msgs::String message;
- message.data = "hello world" + std::to_string(count);
- ROS_INFO_STREAM(message.data);
- publisher.publish(message);
- ros::spinOnce();
- loopRate.sleep();
- count==;
- }
- return 0;
-}
-
-
-
-
-
-#include
-#include
-
-void ChatterCallback()
-{
- ROS_INFO("I heard [%s]"+msg.data.c_str());
-}
-
-int main(int argc, char **argv)
-{
- ros::init(argc, argv, "Subscriber");
- ros::NodeHandle nh;
- ros::Subsctiber subscriber = nh.subscribe("chatter", 10, ChatterCallback);
- ros::spin();
- return 0;
-}
-
-
-#include
-#include
-
-int main(int argc, char **argv)
-{
- ros::init(argc, argv, "publisher");
- ros::NodeHandle nh;
- ros::Punlisher publisher = nh.advretise("chatter", 1);
- ros::Rate loopRate(10);
- while(ros::ok()){
- std_msgs::String message;
- message.data = "Hello World"+std::to_string(count);
- ROS_INFO_STREAM(message.data);
- chatter_Publisher.publish(message);
- ros::spinOnce;
- loopRate.sleep();
- count++;
- }
- return 0;
-}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-#inclulde
-#include
-
-void ChatterCallback()
-{
- ROS_INFO("I hear [%s]"std_msg.data.c_str());
-}
-
-int main(int argc, char **argv)
-{
- ros::init(argc, argv, "listener/subscriber";
- ros::NodeHandle nh;
- ros::Subscriber subscriber = nh.subscribe("chatter", 10, ChatterCallback);
- ros::spin;
- return 0;
-}
-
-#include
-#include
-
-int main(int argc, char **argv)
-{
- ros::init(argc, argv, "talker/publisher";
- ros::NodeHandle nh;
- ros::Publisher publisher = nh.advertise("chatter",1);
- ros::Rate loopRate(10);
- while(ros::ok())
- {
- std_msgs::String message;
- message.data = "Hello World "+std::to_string(count);
- ROS_INFO_STREAM(message.data);
- Chatter_Publisher.publish(message);
- ros::spinOnce();
- loopRate.sleep();
- count++;
- }
- return 0;
-}
-
-
-
-
-
-
diff --git a/test.py b/test.py
index d4bda3f..d5a516f 100644
--- a/test.py
+++ b/test.py
@@ -1,46 +1,57 @@
import cv2
-import dlib
import numpy as np
-from imutils import face_utils
-from deeplabv3 import DeepLabV3
-# Load face detector model from Dlib
-detector = dlib.get_frontal_face_detector()
+cap = cv2.VideoCapture(0)
+count = 0
+condition = False
+while condition == False:
+ ret, frame = cap.read()
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+ blurred = cv2.GaussianBlur(gray, (3, 3), 0)
+ height, width = blurred.shape
+ face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
+ faces = face_cascade.detectMultiScale(blurred, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))
-# Load DeepLabV3 model for hair segmentation
-deeplab_model = DeepLabV3(weights='pascal_voc', input_shape=(None, None, 3))
+ for (x, y, w, h) in faces:
+ c_x = int(x + w/2)
+ c_y = int(y + h/2)
+ center = (c_x, c_y)
+ top = (c_x, int(c_y-h/2*1.4))
+ bottom = (c_x, int(c_y+h/2*1.1))
+ left = (int(c_x - w / 2 * 0.8), c_y)
+ right = (int(c_x + w / 2 * 0.8), c_y)
-# Load the input image
-image_path = "img.jpg"
-image = cv2.imread(image_path)
-original_image = image.copy()
+ c_x2 = int(( int(c_x - w / 2 * 0.8) + int(c_x + w / 2 * 0.8) )/2)
+ c_y2 = int(( int(c_y-h/2*1.4) + int(c_y+h/2*1.1) )/2)
+ '''
+ cv2.circle(frame, center, 10, (255, 255, 255), 5) #center
+ cv2.circle(frame, top, 10, (255, 255, 255), 5) # top
+ cv2.circle(frame, bottom, 10, (255, 255, 255), 5) # bottom
+ cv2.circle(frame, left, 10, (255, 255, 255), 5) # left
+ cv2.circle(frame, right, 10, (255, 255, 255), 5) # right
+
+ cv2.circle(frame, (x, y), 10, (255, 255, 255), 5) # top left
+ cv2.circle(frame, (x+w, y), 10, (255, 255, 255), 5) # top right
+ cv2.circle(frame, (x, y+h), 10, (255, 255, 255), 5) # bottom left
+ cv2.circle(frame, (x+h, y+h), 10, (255, 255, 255), 5) # bottom right
+ '''
+ center_ellipse = (c_x2, c_y2)
+ axes = (int(w/2* 0.9), int(h/2*1.4)) # Major axis, Minor axis
+ angle = 0 # Rotation angle
+ color = (0, 255, 0) # Color in BGR format
+ thickness = 2
-# Convert image to RGB (DeepLabV3 model requires RGB format)
-image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ # Draw the ellipse on the image
+ #cv2.ellipse(frame, center_ellipse, axes, angle, 0, 360, color, thickness)
-# Detect face using Dlib
-faces = detector(image_rgb)
-for face in faces:
- (x, y, w, h) = face_utils.rect_to_bb(face)
- cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
+ mask = np.zeros((height, width), dtype=np.uint8)
+ cv2.ellipse(mask, center_ellipse, axes, angle, 0, 360, 255, -1)
+ result = cv2.bitwise_and(frame, frame, mask=mask)
- # Perform hair segmentation using DeepLabV3
- face_roi = original_image[y:y + h, x:x + w]
- resized_face_roi = cv2.resize(face_roi, (512, 512)) # Resize for DeepLabV3 model input size
- hair_mask = deeplab_model.predict(resized_face_roi)
- hair_mask = (hair_mask.squeeze() == 15).astype(np.uint8) * 255 # Hair class in Pascal VOC dataset is 15
+ cv2.imshow('Head Detection', result)
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
- # Apply hair mask to the original image
- hair_segmented = cv2.bitwise_and(face_roi, face_roi, mask=hair_mask)
-
- # Replace hair in the original image with the segmented hair
- original_image[y:y + h, x:x + w] = cv2.bitwise_and(original_image[y:y + h, x:x + w], original_image[y:y + h, x:x + w], mask=cv2.bitwise_not(hair_mask))
- original_image[y:y + h, x:x + w] += hair_segmented
-
-# Save the segmented image
-cv2.imwrite("segmented_face_and_hair.jpg", original_image)
-
-# Display the segmented image
-cv2.imshow("Segmented Face and Hair", original_image)
-cv2.waitKey(0)
+cap.release()
cv2.destroyAllWindows()
+
diff --git a/test2.py b/test2.py
new file mode 100644
index 0000000..41330b2
--- /dev/null
+++ b/test2.py
@@ -0,0 +1,35 @@
+import cv2
+import numpy as np
+
+# Load an image
+image = cv2.imread('image.jpg')
+
+# Convert the image to grayscale
+gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+
+# Apply thresholding to create a binary image
+_, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
+
+# Find contours in the binary image
+contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
+
+# Choose the contour you want to remove (in this example, the first contour is removed)
+contour_to_remove = contours[0]
+
+# Create a mask with the same shape as the image
+mask = np.zeros_like(image)
+
+# Fill the contour with a background color (white in this case)
+cv2.fillPoly(mask, [contour_to_remove], (255, 255, 255))
+
+# Invert the mask to keep the rest of the image
+mask_inv = cv2.bitwise_not(mask)
+
+# Bitwise AND the original image with the inverted mask
+result = cv2.bitwise_and(image, mask_inv)
+
+# Display the original and processed images
+cv2.imshow('Original Image', image)
+cv2.imshow('Result', result)
+cv2.waitKey(0)
+cv2.destroyAllWindows()