This commit is contained in:
Rémi BUSSIERE 2023-11-15 16:55:29 +01:00
parent b33875167d
commit c3d23ae2e1
14 changed files with 161 additions and 460 deletions

View File

@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4"> <module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager"> <component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" /> <content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Python 3.9 (3)" jdkType="Python SDK" /> <orderEntry type="jdk" jdkName="Python 3.9 (2)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
</module> </module>

View File

@ -1,4 +1,4 @@
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (3)" project-jdk-type="Python SDK" /> <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (2)" project-jdk-type="Python SDK" />
</project> </project>

View File

@ -1,236 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="AutoImportSettings">
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="BranchesTreeState">
<expand>
<path>
<item name="ROOT" type="e8cecc67:BranchNodeDescriptor" />
<item name="LOCAL_ROOT" type="e8cecc67:BranchNodeDescriptor" />
</path>
<path>
<item name="ROOT" type="e8cecc67:BranchNodeDescriptor" />
<item name="REMOTE_ROOT" type="e8cecc67:BranchNodeDescriptor" />
</path>
<path>
<item name="ROOT" type="e8cecc67:BranchNodeDescriptor" />
<item name="REMOTE_ROOT" type="e8cecc67:BranchNodeDescriptor" />
<item name="GROUP_NODE:origin" type="e8cecc67:BranchNodeDescriptor" />
</path>
</expand>
<select />
</component>
<component name="ChangeListManager">
<list default="true" id="3b0d5ef2-6301-45ac-b9be-73a17800d1e1" name="Default Changelist" comment="">
<change afterPath="$PROJECT_DIR$/.idea/.gitignore" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/Identikit-Robot.iml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/inspectionProfiles/Project_Default.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/inspectionProfiles/profiles_settings.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/misc.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/modules.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/.idea/vcs.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/get_head.py" afterDir="false" />
<change afterPath="$PROJECT_DIR$/haarcascade_frontalface_default.xml" afterDir="false" />
<change afterPath="$PROJECT_DIR$/img.jpg" afterDir="false" />
<change afterPath="$PROJECT_DIR$/main.py" afterDir="false" />
<change afterPath="$PROJECT_DIR$/test" afterDir="false" />
<change afterPath="$PROJECT_DIR$/test.py" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="Python Script" />
</list>
</option>
</component>
<component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="ProjectId" id="2WtQ9d3jW2CaYzd1wkVhiAep3yM" />
<component name="ProjectLevelVcsManager">
<ConfirmationsSetting value="2" id="Add" />
</component>
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">
<property name="ASKED_ADD_EXTERNAL_FILES" value="true" />
<property name="RunOnceActivity.OpenProjectViewOnStart" value="true" />
<property name="com.intellij.ide.scratch.LRUPopupBuilder$1/New Scratch File" value="Python" />
<property name="last_opened_file_path" value="$PROJECT_DIR$" />
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
</component>
<component name="RunManager" selected="Python.main">
<configuration name="get_head" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="Identikit-Robot" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/get_head.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="get_visage" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="Identikit-Robot" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$APPLICATION_CONFIG_DIR$/scratches" />
<option name="IS_MODULE_SDK" value="false" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$APPLICATION_CONFIG_DIR$/scratches/get_visage.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="main" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="Identikit-Robot" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/main.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="test" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
<module name="Identikit-Robot" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<recent_temporary>
<list>
<item itemvalue="Python.main" />
<item itemvalue="Python.get_head" />
<item itemvalue="Python.test" />
<item itemvalue="Python.get_visage" />
</list>
</recent_temporary>
</component>
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="3b0d5ef2-6301-45ac-b9be-73a17800d1e1" name="Default Changelist" comment="" />
<created>1697549245784</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1697549245784</updated>
</task>
<servers />
</component>
<component name="Vcs.Log.Tabs.Properties">
<option name="TAB_STATES">
<map>
<entry key="MAIN">
<value>
<State />
</value>
</entry>
</map>
</option>
<option name="oldMeFiltersMigrated" value="true" />
</component>
<component name="VcsManagerConfiguration">
<option name="ADD_EXTERNAL_FILES_SILENTLY" value="true" />
</component>
<component name="WindowStateProjectService">
<state x="543" y="210" key="#com.intellij.fileTypes.FileTypeChooser" timestamp="1699896592711">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state x="543" y="210" key="#com.intellij.fileTypes.FileTypeChooser/0.0.1536.816@0.0.1536.816" timestamp="1699896592711" />
<state x="596" y="253" key="FileChooserDialogImpl" timestamp="1697549719587">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state x="596" y="253" key="FileChooserDialogImpl/0.0.1536.816@0.0.1536.816" timestamp="1697549719587" />
<state width="1515" height="162" key="GridCell.Tab.0.bottom" timestamp="1700052760623">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state width="1515" height="162" key="GridCell.Tab.0.bottom/0.0.1536.816@0.0.1536.816" timestamp="1700052760623" />
<state width="1515" height="162" key="GridCell.Tab.0.center" timestamp="1700052760623">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state width="1515" height="162" key="GridCell.Tab.0.center/0.0.1536.816@0.0.1536.816" timestamp="1700052760623" />
<state width="1515" height="162" key="GridCell.Tab.0.left" timestamp="1700052760623">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state width="1515" height="162" key="GridCell.Tab.0.left/0.0.1536.816@0.0.1536.816" timestamp="1700052760623" />
<state width="1515" height="162" key="GridCell.Tab.0.right" timestamp="1700052760623">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state width="1515" height="162" key="GridCell.Tab.0.right/0.0.1536.816@0.0.1536.816" timestamp="1700052760623" />
<state width="1493" height="258" key="GridCell.Tab.1.bottom" timestamp="1698034237886">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state width="1493" height="258" key="GridCell.Tab.1.bottom/0.0.1536.816@0.0.1536.816" timestamp="1698034237886" />
<state width="1493" height="258" key="GridCell.Tab.1.center" timestamp="1698034237886">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state width="1493" height="258" key="GridCell.Tab.1.center/0.0.1536.816@0.0.1536.816" timestamp="1698034237886" />
<state width="1493" height="258" key="GridCell.Tab.1.left" timestamp="1698034237886">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state width="1493" height="258" key="GridCell.Tab.1.left/0.0.1536.816@0.0.1536.816" timestamp="1698034237886" />
<state width="1493" height="258" key="GridCell.Tab.1.right" timestamp="1698034237886">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state width="1493" height="258" key="GridCell.Tab.1.right/0.0.1536.816@0.0.1536.816" timestamp="1698034237886" />
<state x="277" y="53" key="SettingsEditor" timestamp="1697549721753">
<screen x="0" y="0" width="1536" height="816" />
</state>
<state x="277" y="53" key="SettingsEditor/0.0.1536.816@0.0.1536.816" timestamp="1697549721753" />
</component>
</project>

33
ROI_face.py Normal file
View File

@ -0,0 +1,33 @@
import cv2
import numpy as np
def roi(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
height, width = blurred.shape
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(blurred, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in faces:
c_x = int(x + w / 2)
c_y = int(y + h / 2)
c_x2 = int((int(c_x - w / 2 * 0.8) + int(c_x + w / 2 * 0.8)) / 2)
c_y2 = int((int(c_y - h / 2 * 1.4) + int(c_y + h / 2 * 1.1)) / 2)
center_ellipse = (c_x2, c_y2)
axes = (int(w / 2 * 1), int(h / 2 * 1.4)) # Major axis, Minor axis
angle = 0 # Rotation angle
color = (255, 255, 255) # Color in BGR format (white)
# thickness = 2
mask = np.zeros((height, width), dtype=np.uint8)
cv2.ellipse(mask, center_ellipse, axes, angle, 0, 360, 255, -1) # 255 for a white oval
# Apply the inverted mask to the face
result = cv2.bitwise_and(frame, frame, mask=mask)
#black_mask = (result[:, :, 0] == 0) & (result[:, :, 1] == 0) & (result[:, :, 2] == 0)
#print(result[c_x2, c_y2,1])
# Replace black pixels with white pixels
#result[black_mask] = [result[10, 10, 0], result[10, 10, 1], result[10, 10, 2]]
return result

BIN
conditions.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 391 KiB

View File

@ -1,47 +1,37 @@
import cv2 import cv2
import numpy as np import time
# Démarrez la capture vidéo depuis la webcam
cap = cv2.VideoCapture(0)
def get_image(): def get_image():
cap = cv2.VideoCapture(0) # Set the window name
count = 0 window_name = 'Camera Capture'
condition = False
while condition == False: # Get the default frames per second (fps) of the camera
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Set the countdown duration in seconds
countdown_duration = 10
# Start the countdown
for countdown in range(countdown_duration, 10, -1):
ret, frame = cap.read() ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
height, width = blurred.shape
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(blurred, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))
color_cicle = (0, 0, 255) # Display the camera feed
for (x, y, w, h) in faces: cv2.imshow(window_name, frame)
if (x + w // 2 ) - int(width / 2) <= 10 and (x + w // 2 ) - int(width / 2) >= -10: # Display the countdown on the image
if (y + h // 2) - int(height / 2) <= 10 and (y + h // 2) - int(height / 2) >= -10: font = cv2.FONT_HERSHEY_SIMPLEX
if (w + h)//4 >= 140 and (w + h)//4 <= 160: cv2.putText(frame, str(countdown)[0], (10, 30), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
color_cicle = (0, 255, 0) cv2.imshow(window_name, frame)
count += 1
else:
count = 0
else:
count = 0
else:
count = 0
if count == 5: # Wait for 1 second and check for key press
print("take picture here") if cv2.waitKey(100) & 0xFF == 27: # 27 is the ASCII code for the 'ESC' key
image = gray
condition = True
return image
cv2.circle(frame, (x + w // 2, y + h // 2), (w + h) // 4, color_cicle, 5)
cv2.circle(frame, (int(width / 2), int(height / 2)), 150, (255, 255, 255), 2)
cv2.imshow('Head Detection', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break break
cap.release() # Take a picture at the end of the countdown
cv2.destroyAllWindows() ret, frame = cap.read()
#cv2.imwrite("image.png", frame)
# Release the camera and close the OpenCV window
cap.release()
cv2
return frame

BIN
image.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 58 KiB

BIN
image.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 295 KiB

BIN
img.jpg

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

10
main.py
View File

@ -1,12 +1,14 @@
import cv2 import cv2
import show_img_conditions
import ROI_face
import get_head import get_head
#show_img_conditions.show()
gray = get_head.get_image() gray = get_head.get_image()
cv2.imshow('Grey Image', gray) cv2.imshow('Grey Image', gray)
face = ROI_face.roi(gray)
edges = cv2.Canny(gray, 10, 100) cv2.imshow('Face', face)
edges = cv2.Canny(face, 10, 100)
cv2.imshow('Image avec Contours de Visage', edges) cv2.imshow('Image avec Contours de Visage', edges)
cv2.waitKey(0) cv2.waitKey(0)

11
show_img_conditions.py Normal file
View File

@ -0,0 +1,11 @@
import cv2
import time
# Load an image
image_path = "conditions.png" # Replace with the path to your image
img = cv2.imread(image_path)
def show():
cv2.imshow('Image', img)
cv2.waitKey(5000)
cv2.destroyAllWindows()

145
test
View File

@ -1,145 +0,0 @@
#include <ros/ros.h>
#include <std_msgs/String.h>
void ChatterCallback()
{
ROS_INFO("I heard : [%s]", msg.data.c_str());
}
int main( int argc, char **argv)
{
ros::init(argc, argv, "subcriber");
ros::NodeHandle nh;
ros::Subsciber subscriber=nh.subscribe("chatter", 1, ChatterCallback);
ros::spin();
return 0;
}
#include <ros/ros.h>
#include <std_msgs/String.h>
int main( int argc, char **argv )
{
ros::init(argc, argv, "Publisher");
ros::NodeHandle nh;
ros::Publisher publisher = nh.advertise<std_msgs::String>("chatter",1);
ros::Rate loopRate(10);
while(ros::ok())
{
std_msgs::String message;
message.data = "hello world" + std::to_string(count);
ROS_INFO_STREAM(message.data);
publisher.publish(message);
ros::spinOnce();
loopRate.sleep();
count==;
}
return 0;
}
#include <ros/ros.h>
#include <std_msgs/String.h>
void ChatterCallback()
{
ROS_INFO("I heard [%s]"+msg.data.c_str());
}
int main(int argc, char **argv)
{
ros::init(argc, argv, "Subscriber");
ros::NodeHandle nh;
ros::Subsctiber subscriber = nh.subscribe("chatter", 10, ChatterCallback);
ros::spin();
return 0;
}
#include <ros/ros.h>
#include <std_msgs/String.h>
int main(int argc, char **argv)
{
ros::init(argc, argv, "publisher");
ros::NodeHandle nh;
ros::Punlisher publisher = nh.advretise<std_msgs::String>("chatter", 1);
ros::Rate loopRate(10);
while(ros::ok()){
std_msgs::String message;
message.data = "Hello World"+std::to_string(count);
ROS_INFO_STREAM(message.data);
chatter_Publisher.publish(message);
ros::spinOnce;
loopRate.sleep();
count++;
}
return 0;
}
#inclulde <ros/ros.h>
#include <std_msgs/String.h>
void ChatterCallback()
{
ROS_INFO("I hear [%s]"std_msg.data.c_str());
}
int main(int argc, char **argv)
{
ros::init(argc, argv, "listener/subscriber";
ros::NodeHandle nh;
ros::Subscriber subscriber = nh.subscribe("chatter", 10, ChatterCallback);
ros::spin;
return 0;
}
#include <ros/ros.h>
#include <std_msgs/String.h>
int main(int argc, char **argv)
{
ros::init(argc, argv, "talker/publisher";
ros::NodeHandle nh;
ros::Publisher publisher = nh.advertise<std_msgs::String>("chatter",1);
ros::Rate loopRate(10);
while(ros::ok())
{
std_msgs::String message;
message.data = "Hello World "+std::to_string(count);
ROS_INFO_STREAM(message.data);
Chatter_Publisher.publish(message);
ros::spinOnce();
loopRate.sleep();
count++;
}
return 0;
}

81
test.py
View File

@ -1,46 +1,57 @@
import cv2 import cv2
import dlib
import numpy as np import numpy as np
from imutils import face_utils
from deeplabv3 import DeepLabV3
# Load face detector model from Dlib cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector() count = 0
condition = False
while condition == False:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
height, width = blurred.shape
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(blurred, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))
# Load DeepLabV3 model for hair segmentation for (x, y, w, h) in faces:
deeplab_model = DeepLabV3(weights='pascal_voc', input_shape=(None, None, 3)) c_x = int(x + w/2)
c_y = int(y + h/2)
center = (c_x, c_y)
top = (c_x, int(c_y-h/2*1.4))
bottom = (c_x, int(c_y+h/2*1.1))
left = (int(c_x - w / 2 * 0.8), c_y)
right = (int(c_x + w / 2 * 0.8), c_y)
# Load the input image c_x2 = int(( int(c_x - w / 2 * 0.8) + int(c_x + w / 2 * 0.8) )/2)
image_path = "img.jpg" c_y2 = int(( int(c_y-h/2*1.4) + int(c_y+h/2*1.1) )/2)
image = cv2.imread(image_path) '''
original_image = image.copy() cv2.circle(frame, center, 10, (255, 255, 255), 5) #center
cv2.circle(frame, top, 10, (255, 255, 255), 5) # top
cv2.circle(frame, bottom, 10, (255, 255, 255), 5) # bottom
cv2.circle(frame, left, 10, (255, 255, 255), 5) # left
cv2.circle(frame, right, 10, (255, 255, 255), 5) # right
# Convert image to RGB (DeepLabV3 model requires RGB format) cv2.circle(frame, (x, y), 10, (255, 255, 255), 5) # top left
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) cv2.circle(frame, (x+w, y), 10, (255, 255, 255), 5) # top right
cv2.circle(frame, (x, y+h), 10, (255, 255, 255), 5) # bottom left
cv2.circle(frame, (x+h, y+h), 10, (255, 255, 255), 5) # bottom right
'''
center_ellipse = (c_x2, c_y2)
axes = (int(w/2* 0.9), int(h/2*1.4)) # Major axis, Minor axis
angle = 0 # Rotation angle
color = (0, 255, 0) # Color in BGR format
thickness = 2
# Detect face using Dlib # Draw the ellipse on the image
faces = detector(image_rgb) #cv2.ellipse(frame, center_ellipse, axes, angle, 0, 360, color, thickness)
for face in faces:
(x, y, w, h) = face_utils.rect_to_bb(face)
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Perform hair segmentation using DeepLabV3 mask = np.zeros((height, width), dtype=np.uint8)
face_roi = original_image[y:y + h, x:x + w] cv2.ellipse(mask, center_ellipse, axes, angle, 0, 360, 255, -1)
resized_face_roi = cv2.resize(face_roi, (512, 512)) # Resize for DeepLabV3 model input size result = cv2.bitwise_and(frame, frame, mask=mask)
hair_mask = deeplab_model.predict(resized_face_roi)
hair_mask = (hair_mask.squeeze() == 15).astype(np.uint8) * 255 # Hair class in Pascal VOC dataset is 15
# Apply hair mask to the original image cv2.imshow('Head Detection', result)
hair_segmented = cv2.bitwise_and(face_roi, face_roi, mask=hair_mask) if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Replace hair in the original image with the segmented hair cap.release()
original_image[y:y + h, x:x + w] = cv2.bitwise_and(original_image[y:y + h, x:x + w], original_image[y:y + h, x:x + w], mask=cv2.bitwise_not(hair_mask))
original_image[y:y + h, x:x + w] += hair_segmented
# Save the segmented image
cv2.imwrite("segmented_face_and_hair.jpg", original_image)
# Display the segmented image
cv2.imshow("Segmented Face and Hair", original_image)
cv2.waitKey(0)
cv2.destroyAllWindows() cv2.destroyAllWindows()

35
test2.py Normal file
View File

@ -0,0 +1,35 @@
import cv2
import numpy as np
# Load an image
image = cv2.imread('image.jpg')
# Convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply thresholding to create a binary image
_, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
# Find contours in the binary image
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Choose the contour you want to remove (in this example, the first contour is removed)
contour_to_remove = contours[0]
# Create a mask with the same shape as the image
mask = np.zeros_like(image)
# Fill the contour with a background color (white in this case)
cv2.fillPoly(mask, [contour_to_remove], (255, 255, 255))
# Invert the mask to keep the rest of the image
mask_inv = cv2.bitwise_not(mask)
# Bitwise AND the original image with the inverted mask
result = cv2.bitwise_and(image, mask_inv)
# Display the original and processed images
cv2.imshow('Original Image', image)
cv2.imshow('Result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()