Compare commits
4 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
7b59711b16 | |
|
|
d9260e8365 | |
|
|
33bb713151 | |
|
|
adbb57a911 |
|
|
@ -0,0 +1,38 @@
|
||||||
|
import requests
|
||||||
|
|
||||||
|
# Define the API endpoint and parameters
|
||||||
|
api_url = "https://api.removal.ai/3.0/remove"
|
||||||
|
api_key = "93D96377-ED5E-7CC1-CD9D-05017285C46A"
|
||||||
|
|
||||||
|
# Define the file path to the image
|
||||||
|
image_path = "photo.png"
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Rm-Token": api_key
|
||||||
|
}
|
||||||
|
files = {
|
||||||
|
"image_file": open(image_path, "rb")
|
||||||
|
}
|
||||||
|
data = {
|
||||||
|
"get_file": "1" # Ensures the processed file is returned
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Make the POST request
|
||||||
|
response = requests.post(api_url, headers=headers, files=files, data=data)
|
||||||
|
|
||||||
|
# Save the output file if the request is successful
|
||||||
|
if response.status_code == 200:
|
||||||
|
with open("transparent_image.png", "wb") as output_file:
|
||||||
|
output_file.write(response.content)
|
||||||
|
print("Transparent image saved as 'transparent_image.png'")
|
||||||
|
else:
|
||||||
|
print(f"Error: Received status code {response.status_code}")
|
||||||
|
print("Response:", response.text)
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print("An error occurred:", e)
|
||||||
|
finally:
|
||||||
|
# Close the file
|
||||||
|
files["image_file"].close()
|
||||||
|
|
||||||
|
|
@ -0,0 +1,102 @@
|
||||||
|
import numpy as np
|
||||||
|
import cv2 as cv
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
|
imagepath = "ImagePNG/Dorian.png"
|
||||||
|
|
||||||
|
# Load Image
|
||||||
|
im = cv.imread(imagepath)
|
||||||
|
if im is None:
|
||||||
|
print("Error: Image not found at", imagepath)
|
||||||
|
exit()
|
||||||
|
|
||||||
|
# Get original dimensions
|
||||||
|
original_height, original_width = im.shape[:2]
|
||||||
|
max_dim = 800
|
||||||
|
|
||||||
|
if max(original_width, original_height) > max_dim: # Calculate scaling factor
|
||||||
|
if original_width > original_height:
|
||||||
|
scale = max_dim / original_width
|
||||||
|
else:
|
||||||
|
scale = max_dim / original_height
|
||||||
|
else:
|
||||||
|
scale = 1 # No resizing needed if already within the limit
|
||||||
|
|
||||||
|
new_width = int(original_width * scale) # Compute new dimensions
|
||||||
|
new_height = int(original_height * scale)
|
||||||
|
new_dim = (new_width, new_height)
|
||||||
|
|
||||||
|
# Resize image with preserved aspect ratio
|
||||||
|
Resized_Image = cv.resize(im, new_dim, interpolation=cv.INTER_AREA)
|
||||||
|
|
||||||
|
print(f"Resized image dimensions: {new_width}x{new_height}")
|
||||||
|
|
||||||
|
# Convert to Grayscale
|
||||||
|
Gray_Img = cv.cvtColor(Resized_Image, cv.COLOR_BGR2GRAY)
|
||||||
|
|
||||||
|
# Load Haar Cascade for Face Detection
|
||||||
|
face_cascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
||||||
|
faces = face_cascade.detectMultiScale(Gray_Img, scaleFactor=1.4, minNeighbors=5, minSize=(30, 30))
|
||||||
|
|
||||||
|
if len(faces) == 0:
|
||||||
|
print("No faces detected.")
|
||||||
|
exit()
|
||||||
|
|
||||||
|
|
||||||
|
contour_image = np.zeros_like(Resized_Image, dtype=np.uint8) # Create a blank image for contours
|
||||||
|
|
||||||
|
for (x, y, w, h) in faces:
|
||||||
|
# Expand the ROI to include more of the head
|
||||||
|
expansion_factor = 0.3 # Increase the size by 30%
|
||||||
|
new_x = max(0, int(x - expansion_factor * w)) # Ensure ROI doesn't go out of bounds
|
||||||
|
new_y = max(0, int(y - expansion_factor * h))
|
||||||
|
new_w = min(Gray_Img.shape[1], int(w + 2 * expansion_factor * w))
|
||||||
|
new_h = min(Gray_Img.shape[0], int(h + 2 * expansion_factor * h))
|
||||||
|
|
||||||
|
face_roi = Gray_Img[new_y:new_y + new_h, new_x:new_x + new_w] # Extract ROI
|
||||||
|
hist = cv.calcHist([face_roi], [0], None, [256], [0, 256]) # Calculate the histogram of the face ROI
|
||||||
|
|
||||||
|
non_black_pixels = face_roi[face_roi > 0] # Exclude pure black pixels (intensity = 0)
|
||||||
|
|
||||||
|
# Calculate the median intensity of non-black pixels
|
||||||
|
if len(non_black_pixels) > 0:
|
||||||
|
median_intensity = np.median(non_black_pixels)
|
||||||
|
else:
|
||||||
|
print("All pixels are black, skipping...")
|
||||||
|
median_intensity = 0 # Fallback if no valid pixels exist
|
||||||
|
|
||||||
|
# Adjust thresholds based on the median intensity
|
||||||
|
lower = int(max(0, 0.66 * median_intensity))
|
||||||
|
upper = int(min(255, 1.66 * median_intensity))
|
||||||
|
|
||||||
|
# Apply Canny Edge Detection with the updated thresholds
|
||||||
|
edges = cv.Canny(face_roi, lower, upper)
|
||||||
|
print(median_intensity)
|
||||||
|
|
||||||
|
# Find Contours in the expanded face ROI
|
||||||
|
contours, _ = cv.findContours(edges, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
|
# Draw Contours on the original resized image and on the blank contour image
|
||||||
|
for cnt in contours:
|
||||||
|
# Offset the contour points to match the original image
|
||||||
|
cnt[:, 0, 0] += new_x
|
||||||
|
cnt[:, 0, 1] += new_y
|
||||||
|
cv.drawContours(Resized_Image, [cnt], -1, (0, 255, 0), 1) # Draw on the resized image
|
||||||
|
cv.drawContours(contour_image, [cnt], -1, (0, 255, 0), 1) # Draw on the blank contour image
|
||||||
|
|
||||||
|
# Convert black background to transparent
|
||||||
|
b, g, r = cv.split(contour_image)# Split the channels
|
||||||
|
alpha = np.where((b == 0) & (g == 0) & (r == 0), 0, 255).astype(np.uint8)
|
||||||
|
|
||||||
|
# Merge the channels back with alpha
|
||||||
|
contour_image_with_alpha = cv.merge([b, g, r, alpha])
|
||||||
|
|
||||||
|
# Save the image with transparent background
|
||||||
|
cv.imwrite("contours_only.png", contour_image_with_alpha)
|
||||||
|
|
||||||
|
print("Contours-only PNG saved as 'contours_only.png'")
|
||||||
|
|
||||||
|
# Display Final Image with Face Contour
|
||||||
|
cv.imshow("Face Contour", Resized_Image)
|
||||||
|
cv.waitKey(0)
|
||||||
|
cv.destroyAllWindows()
|
||||||
|
After Width: | Height: | Size: 90 KiB |
|
After Width: | Height: | Size: 144 KiB |
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 128 KiB |
|
After Width: | Height: | Size: 119 KiB |
|
After Width: | Height: | Size: 2.2 MiB |
|
After Width: | Height: | Size: 5.9 KiB |
|
After Width: | Height: | Size: 18 KiB |
|
After Width: | Height: | Size: 105 KiB |
|
After Width: | Height: | Size: 51 KiB |
|
After Width: | Height: | Size: 382 KiB |
|
After Width: | Height: | Size: 144 KiB |
|
After Width: | Height: | Size: 443 KiB |
|
After Width: | Height: | Size: 119 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 30 KiB |
|
After Width: | Height: | Size: 63 KiB |
|
After Width: | Height: | Size: 271 KiB |
|
After Width: | Height: | Size: 305 KiB |
|
After Width: | Height: | Size: 270 KiB |
|
After Width: | Height: | Size: 236 KiB |
|
After Width: | Height: | Size: 67 KiB |
|
After Width: | Height: | Size: 110 KiB |
|
After Width: | Height: | Size: 110 KiB |
|
After Width: | Height: | Size: 108 KiB |
|
After Width: | Height: | Size: 59 KiB |
|
After Width: | Height: | Size: 103 KiB |
|
After Width: | Height: | Size: 105 KiB |
|
After Width: | Height: | Size: 68 KiB |
|
After Width: | Height: | Size: 113 KiB |
|
After Width: | Height: | Size: 65 KiB |
|
|
@ -0,0 +1 @@
|
||||||
|
add_filter.py
|
||||||
|
|
@ -0,0 +1,87 @@
|
||||||
|
import os
|
||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
mp_face_detection = mp.solutions.face_detection
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
mp_drawing = mp.solutions.drawing_utils
|
||||||
|
|
||||||
|
filter_image_path = "ImagePNG\MoustacheMario.png"
|
||||||
|
filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
def add_filter(image, filter_image, landmarks, size_factor=1.4):
|
||||||
|
"""
|
||||||
|
Adds a filter to an image based on facial landmarks.
|
||||||
|
Adjusts the filter size using a `size_factor`.
|
||||||
|
"""
|
||||||
|
# Use eyes as reference points
|
||||||
|
left_eye = landmarks[33] # Left eye landmark
|
||||||
|
right_eye = landmarks[263] # Right eye landmark
|
||||||
|
|
||||||
|
# Distance between eyes determines the filter size
|
||||||
|
eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
|
||||||
|
|
||||||
|
# Calculate filter size using the size factor
|
||||||
|
filter_width = int(eye_dist * size_factor) # Adjust for desired size
|
||||||
|
filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1])
|
||||||
|
resized_filter = cv2.resize(filter_image, (filter_width, filter_height))
|
||||||
|
|
||||||
|
# Determine filter position above the eyes
|
||||||
|
center_x = int((left_eye[0] + right_eye[0]) / 2)
|
||||||
|
center_y = int((left_eye[1] + right_eye[1]) / 2)
|
||||||
|
x = int(center_x - filter_width / 2)
|
||||||
|
y = int(center_y - filter_height / 2)
|
||||||
|
|
||||||
|
# Extract the alpha channel for blending
|
||||||
|
alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize alpha to [0, 1]
|
||||||
|
filter_rgb = resized_filter[:, :, :3]
|
||||||
|
|
||||||
|
# Overlay the filter onto the image
|
||||||
|
for i in range(resized_filter.shape[0]):
|
||||||
|
for j in range(resized_filter.shape[1]):
|
||||||
|
if 0 <= y + i < image.shape[0] and 0 <= x + j < image.shape[1]: # Bounds check
|
||||||
|
alpha = alpha_channel[i, j]
|
||||||
|
if alpha > 0: # Only apply non-transparent pixels
|
||||||
|
image[y + i, x + j] = (
|
||||||
|
(1 - alpha) * image[y + i, x + j] + alpha * filter_rgb[i, j]
|
||||||
|
)
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
input_image_path = "ImagePNG\Dorian.png"
|
||||||
|
input_image = cv2.imread(input_image_path)
|
||||||
|
|
||||||
|
# RGB for Mediapipe
|
||||||
|
rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
|
||||||
|
|
||||||
|
# FaceMesh init
|
||||||
|
with mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh:
|
||||||
|
# Face detection + key points
|
||||||
|
results = face_mesh.process(rgb_image)
|
||||||
|
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
for face_landmarks in results.multi_face_landmarks:
|
||||||
|
# key point
|
||||||
|
landmarks = [(lm.x * input_image.shape[1], lm.y * input_image.shape[0]) for lm in face_landmarks.landmark]
|
||||||
|
|
||||||
|
# filter to be added (glasses)
|
||||||
|
input_image = add_filter(input_image, filter_image, landmarks)
|
||||||
|
|
||||||
|
|
||||||
|
# Define the folder path
|
||||||
|
folder_path = "OutputImage"
|
||||||
|
|
||||||
|
# Extract the filter name from the filter image path
|
||||||
|
filter_name = os.path.splitext(os.path.basename(filter_image_path))[0]
|
||||||
|
|
||||||
|
# Define the full path to save the image with the filter name included
|
||||||
|
file_path = os.path.join(folder_path, f"{filter_name}_output_image_.jpg")
|
||||||
|
|
||||||
|
# Save the image
|
||||||
|
cv2.imwrite(file_path, input_image)
|
||||||
|
|
||||||
|
# Display result
|
||||||
|
cv2.imshow("Image with filter", input_image)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
@ -0,0 +1,85 @@
|
||||||
|
import os
|
||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# Mediapipe initialization
|
||||||
|
mp_face_detection = mp.solutions.face_detection
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
|
||||||
|
# Load filter image (transparent PNG)
|
||||||
|
filter_image_path = "ImagePNG/MArio.png"
|
||||||
|
filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
def add_filter_hat(image, filter_image, bbox, scale_factor=1.2):
|
||||||
|
"""
|
||||||
|
Add a filter image to a face image at a specified bounding box position,
|
||||||
|
scaling it dynamically based on the face size.
|
||||||
|
"""
|
||||||
|
x_min, y_min, box_width, box_height = bbox
|
||||||
|
|
||||||
|
# Scale the filter based on the face height and a scaling factor
|
||||||
|
filter_width = int(box_width * scale_factor)
|
||||||
|
filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1])
|
||||||
|
resized_filter = cv2.resize(filter_image, (filter_width, filter_height))
|
||||||
|
|
||||||
|
# Position filter above the head
|
||||||
|
x = int(x_min - (filter_width - box_width) / 2)
|
||||||
|
y = int(y_min - filter_height * 0.7) # Slight vertical offset above the face
|
||||||
|
|
||||||
|
# Extract alpha channel (transparency) from the filter
|
||||||
|
alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize to range [0, 1]
|
||||||
|
filter_rgb = resized_filter[:, :, :3]
|
||||||
|
|
||||||
|
# Overlay the filter on the image using alpha blending
|
||||||
|
for i in range(filter_height):
|
||||||
|
for j in range(filter_width):
|
||||||
|
if 0 <= y + i < image.shape[0] and 0 <= x + j < image.shape[1]:
|
||||||
|
alpha = alpha_channel[i, j]
|
||||||
|
if alpha > 0: # Apply only non-transparent pixels
|
||||||
|
image[y + i, x + j] = (1 - alpha) * image[y + i, x + j] + alpha * filter_rgb[i, j]
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
# Load input image
|
||||||
|
input_image_path = "ImagePNG/Dorian.png"
|
||||||
|
input_image = cv2.imread(input_image_path)
|
||||||
|
|
||||||
|
# Convert to RGB for Mediapipe
|
||||||
|
rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
|
||||||
|
|
||||||
|
# Use Mediapipe for face detection
|
||||||
|
with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=0.5) as face_detection:
|
||||||
|
results = face_detection.process(rgb_image)
|
||||||
|
|
||||||
|
if results.detections:
|
||||||
|
for detection in results.detections:
|
||||||
|
bbox = detection.location_data.relative_bounding_box
|
||||||
|
h, w, _ = input_image.shape
|
||||||
|
# Convert relative bounding box to absolute dimensions
|
||||||
|
x_min = int(bbox.xmin * w)
|
||||||
|
y_min = int(bbox.ymin * h)
|
||||||
|
box_width = int(bbox.width * w)
|
||||||
|
box_height = int(bbox.height * h)
|
||||||
|
|
||||||
|
# Adjust the scale factor based on face height
|
||||||
|
# Larger faces get proportionally larger hats
|
||||||
|
face_height_ratio = box_height / h # Ratio of face height to image height
|
||||||
|
dynamic_scale_factor = 2.75 + face_height_ratio # Base size + adjustment
|
||||||
|
|
||||||
|
# Add filter to the image with dynamic scaling
|
||||||
|
input_image = add_filter_hat(input_image, filter_image, (x_min, y_min, box_width, box_height), scale_factor=dynamic_scale_factor)
|
||||||
|
|
||||||
|
# Define output folder and save path
|
||||||
|
output_folder = "OutputImage"
|
||||||
|
os.makedirs(output_folder, exist_ok=True) # Ensure the folder exists
|
||||||
|
filter_name = os.path.splitext(os.path.basename(filter_image_path))[0]
|
||||||
|
output_path = os.path.join(output_folder, f"{filter_name}_output_image_dynamic.jpg")
|
||||||
|
|
||||||
|
# Save the output image
|
||||||
|
cv2.imwrite(output_path, input_image)
|
||||||
|
|
||||||
|
# Display result
|
||||||
|
cv2.imshow("Image with Filter", input_image)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
@ -0,0 +1,82 @@
|
||||||
|
import os
|
||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# Mediapipe setup
|
||||||
|
mp_face_detection = mp.solutions.face_detection
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
|
||||||
|
# Load the moustache filter
|
||||||
|
filter_image_path = "ImagePNG/MoustacheMario.png"
|
||||||
|
filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
def add_filter_moustache(image, filter_image, nose_tip, scale_factor):
|
||||||
|
"""
|
||||||
|
Add a moustache filter to an image based on the nose tip position.
|
||||||
|
"""
|
||||||
|
nose_x, nose_y = nose_tip
|
||||||
|
|
||||||
|
# Scale the filter image dynamically based on the face width
|
||||||
|
filter_width = int(image.shape[1] * scale_factor * 0.1) # Scale relative to image width
|
||||||
|
filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1])
|
||||||
|
resized_filter = cv2.resize(filter_image, (filter_width, filter_height))
|
||||||
|
|
||||||
|
# Adjust the position to place the moustache below the nose
|
||||||
|
x = int(nose_x - filter_width / 2)
|
||||||
|
y = int(nose_y + filter_height * 0.2)
|
||||||
|
|
||||||
|
# Extract alpha channel (transparency) from the filter
|
||||||
|
alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize to range [0, 1]
|
||||||
|
filter_rgb = resized_filter[:, :, :3]
|
||||||
|
|
||||||
|
# Overlay the filter on the image using alpha blending
|
||||||
|
for i in range(filter_height):
|
||||||
|
for j in range(filter_width):
|
||||||
|
if 0 <= y + i < image.shape[0] and 0 <= x + j < image.shape[1]:
|
||||||
|
alpha = alpha_channel[i, j]
|
||||||
|
if alpha > 0: # Apply only non-transparent pixels
|
||||||
|
image[y + i, x + j] = (
|
||||||
|
(1 - alpha) * image[y + i, x + j] + alpha * filter_rgb[i, j]
|
||||||
|
)
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
# Load input image
|
||||||
|
input_image_path = "ImagePNG/Dorian.png"
|
||||||
|
input_image = cv2.imread(input_image_path)
|
||||||
|
|
||||||
|
# Convert to RGB for Mediapipe
|
||||||
|
rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
|
||||||
|
|
||||||
|
# Use Mediapipe Face Mesh for robust landmark detection
|
||||||
|
with mp_face_mesh.FaceMesh(static_image_mode=True, min_detection_confidence=0.5) as face_mesh:
|
||||||
|
results = face_mesh.process(rgb_image)
|
||||||
|
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
for face_landmarks in results.multi_face_landmarks:
|
||||||
|
# Get the nose tip landmark (index 4 in Mediapipe Face Mesh)
|
||||||
|
nose_tip = face_landmarks.landmark[4]
|
||||||
|
nose_x = int(nose_tip.x * input_image.shape[1])
|
||||||
|
nose_y = int(nose_tip.y * input_image.shape[0])
|
||||||
|
|
||||||
|
# Dynamically calculate scale factor based on face size
|
||||||
|
face_width = abs(face_landmarks.landmark[454].x - face_landmarks.landmark[234].x) * input_image.shape[1]
|
||||||
|
dynamic_scale_factor = 1.5 + (face_width / input_image.shape[1]) # Base size + adjustment
|
||||||
|
|
||||||
|
# Add filter to the image
|
||||||
|
input_image = add_filter_moustache(input_image, filter_image, (nose_x, nose_y), scale_factor=dynamic_scale_factor)
|
||||||
|
|
||||||
|
# Define output folder and save path
|
||||||
|
output_folder = "OutputImage"
|
||||||
|
os.makedirs(output_folder, exist_ok=True) # Ensure the folder exists
|
||||||
|
filter_name = os.path.splitext(os.path.basename(filter_image_path))[0]
|
||||||
|
output_path = os.path.join(output_folder, f"{filter_name}_output_image_dynamic.jpg")
|
||||||
|
|
||||||
|
# Save the output image
|
||||||
|
cv2.imwrite(output_path, input_image)
|
||||||
|
|
||||||
|
# Display result
|
||||||
|
cv2.imshow("Image with Filter", input_image)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
@ -0,0 +1,31 @@
|
||||||
|
import tkinter as tk
|
||||||
|
from tkinter import ttk
|
||||||
|
|
||||||
|
# Function to toggle the button state
|
||||||
|
def toggle_button():
|
||||||
|
if toggle_var.get():
|
||||||
|
toggle_button.config(text="Activated")
|
||||||
|
else:
|
||||||
|
toggle_button.config(text="Deactivated")
|
||||||
|
|
||||||
|
# Main window
|
||||||
|
root = tk.Tk()
|
||||||
|
root.title("Toggle Button with Dropdown Menu")
|
||||||
|
|
||||||
|
# Dropdown menu for options
|
||||||
|
options = ["Option 1", "Option 2", "Option 3"]
|
||||||
|
selected_option = tk.StringVar(root)
|
||||||
|
selected_option.set(options[0]) # default value
|
||||||
|
|
||||||
|
dropdown_menu = ttk.Combobox(root, textvariable=selected_option, values=options)
|
||||||
|
dropdown_menu.pack(pady=10)
|
||||||
|
|
||||||
|
# Variable to store the toggle state
|
||||||
|
toggle_var = tk.BooleanVar()
|
||||||
|
|
||||||
|
# Toggle button
|
||||||
|
toggle_button = tk.Button(root, text="Deactivated", command=toggle_button)
|
||||||
|
toggle_button.pack(pady=20)
|
||||||
|
|
||||||
|
# Start the GUI
|
||||||
|
root.mainloop()
|
||||||
|
|
@ -0,0 +1,34 @@
|
||||||
|
import cv2 as cv
|
||||||
|
import numpy as np
|
||||||
|
# Chargement du modèle Mask R-CNN
|
||||||
|
net = cv.dnn.readNetFromTensorflow('frozen_inference_graph.pb', 'mask_rcnn_inception_v2_coco_2018_01_28.pbtxt')
|
||||||
|
# Charger l'image
|
||||||
|
imagepath = "photo.jpg"
|
||||||
|
image = cv.imread(imagepath)
|
||||||
|
h, w = image.shape[:2]
|
||||||
|
# Prétraiter l'image pour Mask R-CNN
|
||||||
|
blob = cv.dnn.blobFromImage(image, 1.0, (w, h), (104.0, 177.0, 123.0), swapRB=True, crop=False)
|
||||||
|
net.setInput(blob)
|
||||||
|
# Obtenir les sorties du modèle
|
||||||
|
output_layers = net.getUnconnectedOutLayersNames()
|
||||||
|
detections = net.forward(output_layers)
|
||||||
|
# Appliquer la segmentation pour la personne
|
||||||
|
mask_image = image.copy()
|
||||||
|
for detection in detections:
|
||||||
|
for obj in detection:
|
||||||
|
scores = obj[5:]
|
||||||
|
class_id = np.argmax(scores)
|
||||||
|
confidence = scores[class_id]
|
||||||
|
if class_id == 0 and confidence > 0.5: # Class 0 corresponds to "person"
|
||||||
|
# Coordonner la boîte englobante
|
||||||
|
box = obj[0:4] * np.array([w, h, w, h])
|
||||||
|
(x, y, x2, y2) = box.astype("int")
|
||||||
|
# Créer un masque de la personne
|
||||||
|
mask = np.zeros((h, w), dtype=np.uint8)
|
||||||
|
mask[y:y2, x:x2] = 255 # Définir la zone de la personne
|
||||||
|
# Appliquer le masque sur l'image originale
|
||||||
|
result = cv.bitwise_and(image, image, mask=mask)
|
||||||
|
# Montrer l'image avec la personne segmentée et l'arrière-plan supprimé
|
||||||
|
cv.imshow("Segmented Image", result)
|
||||||
|
cv.waitKey(0)
|
||||||
|
cv.destroyAllWindows()
|
||||||
|
|
@ -0,0 +1,118 @@
|
||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# Initialize MediaPipe Face Detection and Drawing utilities
|
||||||
|
mp_face_detection = mp.solutions.face_detection
|
||||||
|
mp_drawing = mp.solutions.drawing_utils
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.2)
|
||||||
|
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.2, min_tracking_confidence=0.5)
|
||||||
|
|
||||||
|
# Initialize camera
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
|
||||||
|
# Variables for button states
|
||||||
|
greyscale = False
|
||||||
|
sunglasses_on = False
|
||||||
|
saved_image = None
|
||||||
|
|
||||||
|
# Function to overlay sunglasses
|
||||||
|
def overlay_sunglasses(image, face_landmarks, sunglasses_img):
|
||||||
|
if len(face_landmarks) > 0:
|
||||||
|
# Coordinates for the eyes based on face mesh landmarks
|
||||||
|
left_eye = face_landmarks[33]
|
||||||
|
right_eye = face_landmarks[263]
|
||||||
|
|
||||||
|
# Calculate the center between the eyes for positioning sunglasses
|
||||||
|
eye_center_x = int((left_eye[0] + right_eye[0]) / 2)
|
||||||
|
eye_center_y = int((left_eye[1] + right_eye[1]) / 2)
|
||||||
|
|
||||||
|
# Calculate the scaling factor for sunglasses based on the distance between the eyes
|
||||||
|
eye_distance = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
|
||||||
|
scale_factor = eye_distance / sunglasses_img.shape[1]
|
||||||
|
|
||||||
|
# Resize sunglasses based on scale factor
|
||||||
|
sunglasses_resized = cv2.resize(sunglasses_img, None, fx=scale_factor, fy=scale_factor)
|
||||||
|
|
||||||
|
# Determine the region of interest (ROI) for sunglasses
|
||||||
|
start_x = int(eye_center_x - sunglasses_resized.shape[1] / 2)
|
||||||
|
start_y = int(eye_center_y - sunglasses_resized.shape[0] / 2)
|
||||||
|
|
||||||
|
# Overlay sunglasses on the face
|
||||||
|
for i in range(sunglasses_resized.shape[0]):
|
||||||
|
for j in range(sunglasses_resized.shape[1]):
|
||||||
|
if sunglasses_resized[i, j][3] > 0: # If not transparent
|
||||||
|
image[start_y + i, start_x + j] = sunglasses_resized[i, j][0:3] # Apply RGB channels
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
# Function to apply greyscale filter
|
||||||
|
def toggle_greyscale(image, greyscale):
|
||||||
|
if greyscale:
|
||||||
|
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||||
|
else:
|
||||||
|
return image
|
||||||
|
|
||||||
|
# Load sunglasses image with transparency (PNG)
|
||||||
|
sunglasses_img = cv2.imread("sunglasses.png", cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
while cap.isOpened():
|
||||||
|
ret, frame = cap.read()
|
||||||
|
|
||||||
|
if not ret:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Flip the frame horizontally for a mirror effect
|
||||||
|
frame = cv2.flip(frame, 1)
|
||||||
|
|
||||||
|
# Convert to RGB for MediaPipe processing
|
||||||
|
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
results_detection = face_detection.process(rgb_frame)
|
||||||
|
results_mesh = face_mesh.process(rgb_frame)
|
||||||
|
|
||||||
|
# Draw face detection bounding boxes
|
||||||
|
if results_detection.detections:
|
||||||
|
for detection in results_detection.detections:
|
||||||
|
mp_drawing.draw_detection(frame, detection)
|
||||||
|
|
||||||
|
# Draw face mesh landmarks
|
||||||
|
if results_mesh.multi_face_landmarks:
|
||||||
|
for face_landmarks in results_mesh.multi_face_landmarks:
|
||||||
|
mp_drawing.draw_landmarks(frame, face_landmarks, mp_face_mesh.FACEMESH_CONTOURS)
|
||||||
|
|
||||||
|
# Apply greyscale filter if enabled
|
||||||
|
frame = toggle_greyscale(frame, greyscale)
|
||||||
|
|
||||||
|
# Display the image
|
||||||
|
cv2.imshow('Face Capture Controls', frame)
|
||||||
|
|
||||||
|
key = cv2.waitKey(1) & 0xFF
|
||||||
|
|
||||||
|
# Save Image
|
||||||
|
if key == ord('s'): # Press 's' to save image
|
||||||
|
saved_image = frame.copy()
|
||||||
|
cv2.imwrite("captured_image.png", saved_image)
|
||||||
|
print("Image Saved!")
|
||||||
|
|
||||||
|
# Retake Image
|
||||||
|
elif key == ord('r'): # Press 'r' to retake image
|
||||||
|
saved_image = None
|
||||||
|
print("Image Retaken!")
|
||||||
|
|
||||||
|
# Toggle Greyscale
|
||||||
|
elif key == ord('g'): # Press 'g' to toggle greyscale
|
||||||
|
greyscale = not greyscale
|
||||||
|
print(f"Greyscale: {'Enabled' if greyscale else 'Disabled'}")
|
||||||
|
|
||||||
|
|
||||||
|
# Kill Switch
|
||||||
|
elif key == ord('q'): # Press 'q' to quit
|
||||||
|
break
|
||||||
|
|
||||||
|
# Release camera and close all windows
|
||||||
|
cap.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
|
@ -0,0 +1,19 @@
|
||||||
|
|
||||||
|
# Importing Required Modules
|
||||||
|
from rembg import remove
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
# Store path of the image in the variable input_path
|
||||||
|
input_path = 'ImagePNG/map.png'
|
||||||
|
|
||||||
|
# Store path of the output image in the variable output_path
|
||||||
|
output_path = 'ImagePNG/map1.png'
|
||||||
|
|
||||||
|
# Processing the image
|
||||||
|
input = Image.open(input_path)
|
||||||
|
|
||||||
|
# Removing the background from the given Image
|
||||||
|
output = remove(input)
|
||||||
|
|
||||||
|
#Saving the image in the given path
|
||||||
|
output.save(output_path)
|
||||||
|
|
@ -0,0 +1,238 @@
|
||||||
|
import cv2
|
||||||
|
import tkinter as tk
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
import math
|
||||||
|
|
||||||
|
# Load images with transparency
|
||||||
|
mario_hat_image_path = "ImagePNG/MArio.png"
|
||||||
|
sunglasses_image_path = "ImagePNG/Glasses.png"
|
||||||
|
moustache_image_path = "ImagePNG/MoustacheMario.png"
|
||||||
|
|
||||||
|
# Load images
|
||||||
|
mario_hat = cv2.imread(mario_hat_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
sunglasses = cv2.imread(sunglasses_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
moustache = cv2.imread(moustache_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
# Check if images were loaded correctly
|
||||||
|
if mario_hat is None:
|
||||||
|
print("Error: Mario hat image not found.")
|
||||||
|
exit()
|
||||||
|
if sunglasses is None:
|
||||||
|
print("Error: Sunglasses image not found.")
|
||||||
|
exit()
|
||||||
|
if moustache is None:
|
||||||
|
print("Error: Moustache image not found.")
|
||||||
|
exit()
|
||||||
|
|
||||||
|
# Initialize MediaPipe FaceMesh
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
|
||||||
|
|
||||||
|
# Variables for toggling filters
|
||||||
|
mario_hat_active = False
|
||||||
|
sunglasses_active = False
|
||||||
|
moustache_active = False
|
||||||
|
show_angles = False
|
||||||
|
|
||||||
|
# Open webcam for capturing live feed
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
if not cap.isOpened():
|
||||||
|
print("Error: The webcam cannot be opened")
|
||||||
|
exit()
|
||||||
|
|
||||||
|
# Variable to hold the freeze frame
|
||||||
|
freeze_frame = None
|
||||||
|
|
||||||
|
def calculate_angles(landmarks):
|
||||||
|
left_eye = np.array(landmarks[33])
|
||||||
|
right_eye = np.array(landmarks[263])
|
||||||
|
nose_tip = np.array(landmarks[1])
|
||||||
|
chin = np.array(landmarks[152])
|
||||||
|
yaw = math.degrees(math.atan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0]))
|
||||||
|
pitch = math.degrees(math.atan2(chin[1] - nose_tip[1], chin[0] - nose_tip[0]))
|
||||||
|
return yaw, pitch
|
||||||
|
|
||||||
|
def apply_mario_hat(frame, landmarks):
|
||||||
|
global mario_hat
|
||||||
|
if mario_hat_active and mario_hat is not None:
|
||||||
|
forehead = landmarks[10]
|
||||||
|
chin = landmarks[152]
|
||||||
|
left_side = landmarks[234]
|
||||||
|
right_side = landmarks[454]
|
||||||
|
face_width = int(np.linalg.norm(np.array(left_side) - np.array(right_side)))
|
||||||
|
hat_width = int(face_width * 4.0)
|
||||||
|
hat_height = int(hat_width * mario_hat.shape[0] / mario_hat.shape[1])
|
||||||
|
mario_hat_resized = cv2.resize(mario_hat, (hat_width, hat_height))
|
||||||
|
x = int(forehead[0] - hat_width / 2)
|
||||||
|
y = int(forehead[1] - hat_height * 0.7)
|
||||||
|
alpha_channel = mario_hat_resized[:, :, 3] / 255.0
|
||||||
|
hat_rgb = mario_hat_resized[:, :, :3]
|
||||||
|
for i in range(hat_height):
|
||||||
|
for j in range(hat_width):
|
||||||
|
if 0 <= y + i < frame.shape[0] and 0 <= x + j < frame.shape[1]:
|
||||||
|
alpha = alpha_channel[i, j]
|
||||||
|
if alpha > 0:
|
||||||
|
for c in range(3):
|
||||||
|
frame[y + i, x + j, c] = (1 - alpha) * frame[y + i, x + j, c] + alpha * hat_rgb[i, j, c]
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def apply_sunglasses(frame, landmarks):
|
||||||
|
global sunglasses
|
||||||
|
if sunglasses_active and sunglasses is not None:
|
||||||
|
left_eye = landmarks[33]
|
||||||
|
right_eye = landmarks[263]
|
||||||
|
eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
|
||||||
|
scaling_factor = 1.75
|
||||||
|
sunglasses_width = int(eye_dist * scaling_factor)
|
||||||
|
sunglasses_height = int(sunglasses_width * sunglasses.shape[0] / sunglasses.shape[1])
|
||||||
|
sunglasses_resized = cv2.resize(sunglasses, (sunglasses_width, sunglasses_height))
|
||||||
|
center_x = int((left_eye[0] + right_eye[0]) / 2)
|
||||||
|
center_y = int((left_eye[1] + right_eye[1]) / 2)
|
||||||
|
x = int(center_x - sunglasses_resized.shape[1] / 2)
|
||||||
|
y = int(center_y - sunglasses_resized.shape[0] / 2)
|
||||||
|
alpha_channel = sunglasses_resized[:, :, 3] / 255.0
|
||||||
|
sunglasses_rgb = sunglasses_resized[:, :, :3]
|
||||||
|
for i in range(sunglasses_resized.shape[0]):
|
||||||
|
for j in range(sunglasses_resized.shape[1]):
|
||||||
|
if alpha_channel[i, j] > 0:
|
||||||
|
for c in range(3):
|
||||||
|
frame[y + i, x + j, c] = (1 - alpha_channel[i, j]) * frame[y + i, x + j, c] + alpha_channel[i, j] * sunglasses_rgb[i, j, c]
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def apply_moustache(frame, landmarks):
|
||||||
|
global moustache
|
||||||
|
if moustache_active and moustache is not None:
|
||||||
|
nose_base = landmarks[1]
|
||||||
|
mouth_left = landmarks[61]
|
||||||
|
mouth_right = landmarks[291]
|
||||||
|
mouth_width = int(np.linalg.norm(np.array(mouth_left) - np.array(mouth_right)))
|
||||||
|
moustache_width = int(mouth_width * 1.5)
|
||||||
|
moustache_height = int(moustache_width * moustache.shape[0] / moustache.shape[1])
|
||||||
|
moustache_resized = cv2.resize(moustache, (moustache_width, moustache_height))
|
||||||
|
x = int(nose_base[0] - moustache_width / 2)
|
||||||
|
y = int(nose_base[1])
|
||||||
|
alpha_channel = moustache_resized[:, :, 3] / 255.0
|
||||||
|
moustache_rgb = moustache_resized[:, :, :3]
|
||||||
|
for i in range(moustache_height):
|
||||||
|
for j in range(moustache_width):
|
||||||
|
if 0 <= y + i < frame.shape[0] and 0 <= x + j < frame.shape[1]:
|
||||||
|
alpha = alpha_channel[i, j]
|
||||||
|
if alpha > 0:
|
||||||
|
for c in range(3):
|
||||||
|
frame[y + i, x + j, c] = (1 - alpha) * frame[y + i, x + j, c] + alpha * moustache_rgb[i, j, c]
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def update_frame():
|
||||||
|
global mario_hat_active, sunglasses_active, show_angles, freeze_frame, moustache_active
|
||||||
|
ret, frame = cap.read()
|
||||||
|
if ret:
|
||||||
|
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
results = face_mesh.process(rgb_frame)
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
for face_landmarks in results.multi_face_landmarks:
|
||||||
|
landmarks = [(lm.x * frame.shape[1], lm.y * frame.shape[0]) for lm in face_landmarks.landmark]
|
||||||
|
yaw, pitch = calculate_angles(landmarks)
|
||||||
|
if mario_hat_active:
|
||||||
|
frame = apply_mario_hat(frame, landmarks)
|
||||||
|
if sunglasses_active:
|
||||||
|
frame = apply_sunglasses(frame, landmarks)
|
||||||
|
if moustache_active:
|
||||||
|
frame = apply_moustache(frame, landmarks)
|
||||||
|
if show_angles:
|
||||||
|
cv2.putText(frame, f"Yaw: {yaw:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||||
|
cv2.putText(frame, f"Pitch: {pitch:.2f}", (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||||
|
cv2.imshow("Webcam Feed", frame)
|
||||||
|
freeze_frame = frame
|
||||||
|
root.after(10, update_frame)
|
||||||
|
|
||||||
|
def toggle_mario_hat():
|
||||||
|
global mario_hat_active
|
||||||
|
mario_hat_active = not mario_hat_active
|
||||||
|
status = "activated" if mario_hat_active else "deactivated"
|
||||||
|
print(f"Mario hat filter {status}")
|
||||||
|
|
||||||
|
def toggle_sunglasses():
|
||||||
|
global sunglasses_active
|
||||||
|
sunglasses_active = not sunglasses_active
|
||||||
|
status = "activated" if sunglasses_active else "deactivated"
|
||||||
|
print(f"Sunglasses filter {status}")
|
||||||
|
|
||||||
|
def toggle_moustache():
|
||||||
|
global moustache_active
|
||||||
|
moustache_active = not moustache_active
|
||||||
|
status = "activated" if moustache_active else "deactivated"
|
||||||
|
print(f"Moustache filter {status}")
|
||||||
|
|
||||||
|
def toggle_angles():
|
||||||
|
global show_angles
|
||||||
|
show_angles = not show_angles
|
||||||
|
status = "shown" if show_angles else "hidden"
|
||||||
|
print(f"Angles display {status}")
|
||||||
|
|
||||||
|
def show_freeze_frame():
|
||||||
|
if freeze_frame is not None:
|
||||||
|
cv2.imshow("Face Capture", freeze_frame)
|
||||||
|
|
||||||
|
def retake_image():
|
||||||
|
global freeze_frame
|
||||||
|
ret, frame = cap.read()
|
||||||
|
if ret:
|
||||||
|
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
results = face_mesh.process(rgb_frame)
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
for face_landmarks in results.multi_face_landmarks:
|
||||||
|
landmarks = [(lm.x * frame.shape[1], lm.y * frame.shape[0]) for lm in face_landmarks.landmark]
|
||||||
|
frame = apply_mario_hat(frame, landmarks)
|
||||||
|
frame = apply_sunglasses(frame, landmarks)
|
||||||
|
frame = apply_moustache(frame, landmarks)
|
||||||
|
freeze_frame = frame.copy()
|
||||||
|
show_freeze_frame()
|
||||||
|
|
||||||
|
def save_image():
|
||||||
|
global freeze_frame
|
||||||
|
if freeze_frame is not None:
|
||||||
|
save_path = "E:/ECAM/2024-25/IT and Robotics Labs/captured_face.png"
|
||||||
|
cv2.imwrite(save_path, freeze_frame)
|
||||||
|
print(f"Image saved to {save_path}")
|
||||||
|
|
||||||
|
|
||||||
|
# Tkinter GUI setup
|
||||||
|
root = tk.Tk()
|
||||||
|
root.title("Face Capture Controls")
|
||||||
|
root.geometry("300x400")
|
||||||
|
root.configure(bg="#ffffff")
|
||||||
|
|
||||||
|
# Buttons on the control window with updated font and colors
|
||||||
|
mario_hat_button = tk.Button(root, text="Toggle Mario Hat Filter", font=("Arial", 12, "bold"), command=toggle_mario_hat, bg="#5A4D41", fg="white", padx=10, pady=5)
|
||||||
|
mario_hat_button.pack(pady=10)
|
||||||
|
|
||||||
|
sunglasses_button = tk.Button(root, text="Toggle Sunglasses Filter", font=("Arial", 12, "bold"), command=toggle_sunglasses, bg="#B8860B", fg="white", padx=10, pady=5)
|
||||||
|
sunglasses_button.pack(pady=10)
|
||||||
|
|
||||||
|
moustache_button = tk.Button(root, text="Toggle Moustache Filter", font=("Arial", 12, "bold"), command=toggle_moustache, bg="#8B8000", fg="white", padx=10, pady=5)
|
||||||
|
moustache_button.pack(pady=10)
|
||||||
|
|
||||||
|
retake_image_button = tk.Button(root, text="Retake Image", font=("Arial", 12, "bold"), command=retake_image, bg="#2E8B57", fg="white", padx=10, pady=5)
|
||||||
|
retake_image_button.pack(pady=10)
|
||||||
|
|
||||||
|
save_image_button = tk.Button(root, text="Save Captured Image", font=("Arial", 12, "bold"), command=save_image, bg="#6A5ACD", fg="white", padx=10, pady=5)
|
||||||
|
save_image_button.pack(pady=10)
|
||||||
|
|
||||||
|
freeze_frame_button = tk.Button(root, text="Show Freeze Frame", font=("Arial", 12, "bold"), command=show_freeze_frame, bg="#D2691E", fg="white", padx=10, pady=5)
|
||||||
|
freeze_frame_button.pack(pady=10)
|
||||||
|
|
||||||
|
# Graceful exit
|
||||||
|
def on_closing():
|
||||||
|
cap.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
root.destroy()
|
||||||
|
|
||||||
|
root.protocol("WM_DELETE_WINDOW", on_closing)
|
||||||
|
|
||||||
|
show_freeze_frame()
|
||||||
|
|
||||||
|
# Start Tkinter event loop and OpenCV frame updates
|
||||||
|
update_frame()
|
||||||
|
root.mainloop()
|
||||||
|
After Width: | Height: | Size: 271 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 30 KiB |
|
After Width: | Height: | Size: 114 KiB |
|
After Width: | Height: | Size: 109 KiB |
|
After Width: | Height: | Size: 430 KiB |
|
After Width: | Height: | Size: 190 KiB |
|
After Width: | Height: | Size: 2.8 KiB |
|
|
@ -0,0 +1,267 @@
|
||||||
|
import cv2
|
||||||
|
import tkinter as tk
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
import math
|
||||||
|
from rembg import remove
|
||||||
|
from PIL import Image
|
||||||
|
#import dobot
|
||||||
|
|
||||||
|
import vector_draw
|
||||||
|
|
||||||
|
# Load images with transparency
|
||||||
|
mario_hat_image_path = "Filters/Mario hat.png"
|
||||||
|
sunglasses_image_path = "Filters/Glasses.png"
|
||||||
|
moustache_image_path = "Filters/MoustacheMario.png"
|
||||||
|
|
||||||
|
# Load images
|
||||||
|
mario_hat = cv2.imread(mario_hat_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
sunglasses = cv2.imread(sunglasses_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
moustache = cv2.imread(moustache_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
# Check if images were loaded correctly
|
||||||
|
if mario_hat is None:
|
||||||
|
print("Error: Mario hat image not found.")
|
||||||
|
exit()
|
||||||
|
if sunglasses is None:
|
||||||
|
print("Error: Sunglasses image not found.")
|
||||||
|
exit()
|
||||||
|
if moustache is None:
|
||||||
|
print("Error: Moustache image not found.")
|
||||||
|
exit()
|
||||||
|
|
||||||
|
# Initialize MediaPipe FaceMesh
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
|
||||||
|
|
||||||
|
# Variables for toggling filters
|
||||||
|
mario_hat_active = False
|
||||||
|
sunglasses_active = False
|
||||||
|
moustache_active = False
|
||||||
|
show_angles = False
|
||||||
|
|
||||||
|
# Open webcam for capturing live feed
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
if not cap.isOpened():
|
||||||
|
print("Error: The webcam cannot be opened")
|
||||||
|
exit()
|
||||||
|
|
||||||
|
# Variable to hold the contour frame
|
||||||
|
contour_frame = None
|
||||||
|
resized_edges = None
|
||||||
|
|
||||||
|
def calculate_angles(landmarks):
|
||||||
|
left_eye = np.array(landmarks[33])
|
||||||
|
right_eye = np.array(landmarks[263])
|
||||||
|
nose_tip = np.array(landmarks[1])
|
||||||
|
chin = np.array(landmarks[152])
|
||||||
|
yaw = math.degrees(math.atan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0]))
|
||||||
|
pitch = math.degrees(math.atan2(chin[1] - nose_tip[1], chin[0] - nose_tip[0]))
|
||||||
|
return yaw, pitch
|
||||||
|
|
||||||
|
def apply_mario_hat(frame, landmarks):
|
||||||
|
global mario_hat
|
||||||
|
if mario_hat_active and mario_hat is not None:
|
||||||
|
forehead = landmarks[10]
|
||||||
|
chin = landmarks[152]
|
||||||
|
left_side = landmarks[234]
|
||||||
|
right_side = landmarks[454]
|
||||||
|
face_width = int(np.linalg.norm(np.array(left_side) - np.array(right_side)))
|
||||||
|
hat_width = int(face_width * 4.0)
|
||||||
|
hat_height = int(hat_width * mario_hat.shape[0] / mario_hat.shape[1])
|
||||||
|
mario_hat_resized = cv2.resize(mario_hat, (hat_width, hat_height))
|
||||||
|
x = int(forehead[0] - hat_width / 2)
|
||||||
|
y = int(forehead[1] - hat_height * 0.7)
|
||||||
|
alpha_channel = mario_hat_resized[:, :, 3] / 255.0
|
||||||
|
hat_rgb = mario_hat_resized[:, :, :3]
|
||||||
|
for i in range(hat_height):
|
||||||
|
for j in range(hat_width):
|
||||||
|
if 0 <= y + i < frame.shape[0] and 0 <= x + j < frame.shape[1]:
|
||||||
|
alpha = alpha_channel[i, j]
|
||||||
|
if alpha > 0:
|
||||||
|
for c in range(3):
|
||||||
|
frame[y + i, x + j, c] = (1 - alpha) * frame[y + i, x + j, c] + alpha * hat_rgb[i, j, c]
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def apply_sunglasses(frame, landmarks):
|
||||||
|
global sunglasses
|
||||||
|
if sunglasses_active and sunglasses is not None:
|
||||||
|
left_eye = landmarks[33]
|
||||||
|
right_eye = landmarks[263]
|
||||||
|
eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
|
||||||
|
scaling_factor = 1.75
|
||||||
|
sunglasses_width = int(eye_dist * scaling_factor)
|
||||||
|
sunglasses_height = int(sunglasses_width * sunglasses.shape[0] / sunglasses.shape[1])
|
||||||
|
sunglasses_resized = cv2.resize(sunglasses, (sunglasses_width, sunglasses_height))
|
||||||
|
center_x = int((left_eye[0] + right_eye[0]) / 2)
|
||||||
|
center_y = int((left_eye[1] + right_eye[1]) / 2)
|
||||||
|
x = int(center_x - sunglasses_resized.shape[1] / 2)
|
||||||
|
y = int(center_y - sunglasses_resized.shape[0] / 2)
|
||||||
|
alpha_channel = sunglasses_resized[:, :, 3] / 255.0
|
||||||
|
sunglasses_rgb = sunglasses_resized[:, :, :3]
|
||||||
|
for i in range(sunglasses_resized.shape[0]):
|
||||||
|
for j in range(sunglasses_resized.shape[1]):
|
||||||
|
if alpha_channel[i, j] > 0:
|
||||||
|
for c in range(3):
|
||||||
|
frame[y + i, x + j, c] = (1 - alpha_channel[i, j]) * frame[y + i, x + j, c] + alpha_channel[i, j] * sunglasses_rgb[i, j, c]
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def apply_moustache(frame, landmarks):
|
||||||
|
global moustache
|
||||||
|
if moustache_active and moustache is not None:
|
||||||
|
nose_base = landmarks[1]
|
||||||
|
mouth_left = landmarks[61]
|
||||||
|
mouth_right = landmarks[291]
|
||||||
|
mouth_width = int(np.linalg.norm(np.array(mouth_left) - np.array(mouth_right)))
|
||||||
|
moustache_width = int(mouth_width * 1.5)
|
||||||
|
moustache_height = int(moustache_width * moustache.shape[0] / moustache.shape[1])
|
||||||
|
moustache_resized = cv2.resize(moustache, (moustache_width, moustache_height))
|
||||||
|
x = int(nose_base[0] - moustache_width / 2)
|
||||||
|
y = int(nose_base[1])
|
||||||
|
alpha_channel = moustache_resized[:, :, 3] / 255.0
|
||||||
|
moustache_rgb = moustache_resized[:, :, :3]
|
||||||
|
for i in range(moustache_height):
|
||||||
|
for j in range(moustache_width):
|
||||||
|
if 0 <= y + i < frame.shape[0] and 0 <= x + j < frame.shape[1]:
|
||||||
|
alpha = alpha_channel[i, j]
|
||||||
|
if alpha > 0:
|
||||||
|
for c in range(3):
|
||||||
|
frame[y + i, x + j, c] = (1 - alpha) * frame[y + i, x + j, c] + alpha * moustache_rgb[i, j, c]
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def update_frame():
|
||||||
|
global mario_hat_active, sunglasses_active, show_angles, contour_frame, moustache_active
|
||||||
|
ret, frame = cap.read()
|
||||||
|
if ret:
|
||||||
|
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
results = face_mesh.process(rgb_frame)
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
for face_landmarks in results.multi_face_landmarks:
|
||||||
|
landmarks = [(lm.x * frame.shape[1], lm.y * frame.shape[0]) for lm in face_landmarks.landmark]
|
||||||
|
yaw, pitch = calculate_angles(landmarks)
|
||||||
|
if mario_hat_active:
|
||||||
|
frame = apply_mario_hat(frame, landmarks)
|
||||||
|
if sunglasses_active:
|
||||||
|
frame = apply_sunglasses(frame, landmarks)
|
||||||
|
if moustache_active:
|
||||||
|
frame = apply_moustache(frame, landmarks)
|
||||||
|
if show_angles:
|
||||||
|
cv2.putText(frame, f"Yaw: {yaw:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||||
|
cv2.putText(frame, f"Pitch: {pitch:.2f}", (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
||||||
|
cv2.imshow("Webcam Feed", frame)
|
||||||
|
contour_frame = frame
|
||||||
|
root.after(100, update_frame)
|
||||||
|
|
||||||
|
def toggle_mario_hat():
|
||||||
|
global mario_hat_active
|
||||||
|
mario_hat_active = not mario_hat_active
|
||||||
|
status = "activated" if mario_hat_active else "deactivated"
|
||||||
|
print(f"Mario hat filter {status}")
|
||||||
|
|
||||||
|
def toggle_sunglasses():
|
||||||
|
global sunglasses_active
|
||||||
|
sunglasses_active = not sunglasses_active
|
||||||
|
status = "activated" if sunglasses_active else "deactivated"
|
||||||
|
print(f"Sunglasses filter {status}")
|
||||||
|
|
||||||
|
def toggle_moustache():
|
||||||
|
global moustache_active
|
||||||
|
moustache_active = not moustache_active
|
||||||
|
status = "activated" if moustache_active else "deactivated"
|
||||||
|
print(f"Moustache filter {status}")
|
||||||
|
|
||||||
|
def toggle_angles():
|
||||||
|
global show_angles
|
||||||
|
show_angles = not show_angles
|
||||||
|
status = "shown" if show_angles else "hidden"
|
||||||
|
print(f"Angles display {status}")
|
||||||
|
|
||||||
|
def show_contour_frame():
|
||||||
|
if contour_frame is not None:
|
||||||
|
# Display the result
|
||||||
|
cv2.imshow('Edges', resized_edges)
|
||||||
|
|
||||||
|
|
||||||
|
def save_image():
|
||||||
|
global contour_frame, resized_edges
|
||||||
|
if contour_frame is not None:
|
||||||
|
save_path = "Tmp/captured_face.png"
|
||||||
|
cv2.imwrite(save_path, contour_frame)
|
||||||
|
print(f"Image saved to {save_path}")
|
||||||
|
|
||||||
|
# Store path of the image in the variable input_path
|
||||||
|
input_path = 'Tmp/captured_face.png'
|
||||||
|
|
||||||
|
# Store path of the output image in the variable output_path
|
||||||
|
output_path = 'Tmp/captured_face_nobg.png'
|
||||||
|
|
||||||
|
# Processing the image
|
||||||
|
input = Image.open(input_path)
|
||||||
|
|
||||||
|
# Removing the background from the given Image
|
||||||
|
output = remove(input)
|
||||||
|
|
||||||
|
#Saving the image in the given path
|
||||||
|
output.save(output_path)
|
||||||
|
image = cv2.imread(output_path, cv2.IMREAD_GRAYSCALE)
|
||||||
|
mask = (image > 1) & (image < 254)
|
||||||
|
blurred_image = cv2.GaussianBlur(image, (11, 11), 0)
|
||||||
|
median_val = np.median(blurred_image[mask])
|
||||||
|
lower_threshold = int(max(0, 0.5 * median_val))
|
||||||
|
upper_threshold = int(min(255, 1.2 * median_val))
|
||||||
|
print(f"Automatic lower threshold: {lower_threshold}")
|
||||||
|
print(f"Automatic upper threshold: {upper_threshold}")
|
||||||
|
|
||||||
|
# Apply Canny edge detection using the calculated thresholds
|
||||||
|
edges = cv2.Canny(blurred_image, lower_threshold, upper_threshold)
|
||||||
|
|
||||||
|
# Resize the output image to a smaller size (e.g., 50% of the original size)
|
||||||
|
output_height, output_width = edges.shape[:2]
|
||||||
|
resized_edges = cv2.resize(edges, (output_width // 2, output_height // 2), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
|
# Save the resized result to a file
|
||||||
|
cv2.imwrite('Tmp/final_output_image.png', resized_edges)
|
||||||
|
|
||||||
|
def start_dobot():
|
||||||
|
vector_draw.vector_draw()
|
||||||
|
|
||||||
|
|
||||||
|
# Tkinter GUI setup
|
||||||
|
root = tk.Tk()
|
||||||
|
root.title("Control Tab")
|
||||||
|
root.geometry("300x370")
|
||||||
|
root.configure(bg="#004346")
|
||||||
|
|
||||||
|
# Buttons on the control window with updated font and colors
|
||||||
|
mario_hat_button = tk.Button(root, text="Add Mario Hat", font=("Arial", 12, "bold"), command=toggle_mario_hat, bg="#4C8577", fg="white", padx=10, pady=5, height=1, width=20)
|
||||||
|
mario_hat_button.pack(pady=10)
|
||||||
|
|
||||||
|
sunglasses_button = tk.Button(root, text="Add Glasses", font=("Arial", 12, "bold"), command=toggle_sunglasses, bg="#4C8577", fg="white", padx=10, pady=5, height=1, width=20)
|
||||||
|
sunglasses_button.pack(pady=10)
|
||||||
|
|
||||||
|
moustache_button = tk.Button(root, text="Add Mario Moustache", font=("Arial", 12, "bold"), command=toggle_moustache, bg="#4C8577", fg="white", padx=10, pady=5,height=1, width=20)
|
||||||
|
moustache_button.pack(pady=10)
|
||||||
|
|
||||||
|
save_image_button = tk.Button(root, text="Save/Retake Image", font=("Arial", 12, "bold"), command=save_image, bg="#49A078", fg="white", padx=10, pady=5,height=1, width=20)
|
||||||
|
save_image_button.pack(pady=10)
|
||||||
|
|
||||||
|
contour_frame_button = tk.Button(root, text="Show Contour Image", font=("Arial", 12, "bold"), command=show_contour_frame, bg="#216869", fg="white", padx=10, pady=5,height=1, width=20)
|
||||||
|
contour_frame_button.pack(pady=10)
|
||||||
|
|
||||||
|
#contour_frame_button = tk.Button(root, text="Start Dobot Drawing", font=("Arial", 12, "bold"), command=start_dobot, bg="#49A078", fg="white", padx=10, pady=5,height=1, width=20)
|
||||||
|
#contour_frame_button.pack(pady=10)
|
||||||
|
|
||||||
|
# Graceful exit
|
||||||
|
def on_closing():
|
||||||
|
cap.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
root.destroy()
|
||||||
|
|
||||||
|
root.protocol("WM_DELETE_WINDOW", on_closing)
|
||||||
|
|
||||||
|
show_contour_frame()
|
||||||
|
|
||||||
|
# Start Tkinter event loop and OpenCV frame updates
|
||||||
|
update_frame()
|
||||||
|
root.mainloop()
|
||||||
|
|
@ -0,0 +1,80 @@
|
||||||
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
import dobot
|
||||||
|
import time
|
||||||
|
|
||||||
|
def vector_draw():
|
||||||
|
# Drawing parameters
|
||||||
|
DRAW_SPEED = 1 # Drawing speed for
|
||||||
|
DRAW_DEPTH = -30.5 # Initial height (null)
|
||||||
|
INIT_POSITION = [-100, 150]
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------------
|
||||||
|
# IMAGE TREATMENT
|
||||||
|
# --------------------------------------------------------------------------
|
||||||
|
# Load the image in grayscale
|
||||||
|
image = cv2.imread("Tmp/captured_face.png", cv2.IMREAD_GRAYSCALE)
|
||||||
|
|
||||||
|
# Create a mask to exclude background pixels (assuming background is near white or black)
|
||||||
|
# For example, exclude pixels that are close to white (255) and black (0)
|
||||||
|
mask = (image > 1) & (image < 254) # Keep only pixels that are not close to white or black
|
||||||
|
|
||||||
|
# Apply Gaussian Blur to reduce noise
|
||||||
|
blurred_image = cv2.GaussianBlur(image, (11, 11), 0)
|
||||||
|
|
||||||
|
# Calculate the median of only the foreground pixels
|
||||||
|
median_val = np.median(blurred_image[mask])
|
||||||
|
|
||||||
|
# Automatically calculate thresholds based on the median pixel intensity
|
||||||
|
lower_threshold = int(max(0, 0.5 * median_val))
|
||||||
|
upper_threshold = int(min(255, 1.2 * median_val))
|
||||||
|
print(f"Automatic lower threshold: {lower_threshold}")
|
||||||
|
print(f"Automatic upper threshold: {upper_threshold}")
|
||||||
|
|
||||||
|
# Apply Canny edge detection using the calculated thresholds
|
||||||
|
edges = cv2.Canny(blurred_image, lower_threshold, upper_threshold)
|
||||||
|
|
||||||
|
# Find Contours
|
||||||
|
contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
|
# Initialize an array to store all points
|
||||||
|
all_points = []
|
||||||
|
|
||||||
|
# Define Dobot workspace dimensions (e.g., in mm)
|
||||||
|
robot_workspace = (200, 200*2/3) # Replace with your Dobot's range in mm
|
||||||
|
|
||||||
|
# Scale function to map image coordinates to Dobot's workspace
|
||||||
|
def scale_coordinates(point, img_dim, robot_dim):
|
||||||
|
img_x, img_y = point
|
||||||
|
img_width, img_height = img_dim
|
||||||
|
robot_x_range, robot_y_range = robot_dim
|
||||||
|
# Map x and y with scaling
|
||||||
|
robot_x = (img_x / img_width) * robot_x_range
|
||||||
|
robot_y = (img_y / img_height) * robot_y_range
|
||||||
|
return robot_x, robot_y
|
||||||
|
|
||||||
|
# Collect points for Dobot
|
||||||
|
for cnt in contours:
|
||||||
|
# Scale and store points
|
||||||
|
for point in cnt:
|
||||||
|
x, y = point[0]
|
||||||
|
x, y = scale_coordinates((x, y), (image.shape[1], image.shape[0]), robot_workspace)
|
||||||
|
all_points.append((x, y))
|
||||||
|
all_points.append((-1,-1))
|
||||||
|
|
||||||
|
robot_x_old = 0
|
||||||
|
robot_y_old = 0
|
||||||
|
for i, (robot_x, robot_y) in enumerate(all_points):
|
||||||
|
|
||||||
|
if robot_x == -1 or robot_y == -1:
|
||||||
|
# Lift the pen at the end of each contour
|
||||||
|
dobot.setCPCmd(1, robot_x_old + INIT_POSITION[0], robot_y_old + INIT_POSITION[1], DRAW_DEPTH+15, DRAW_SPEED, 1)
|
||||||
|
else:
|
||||||
|
if robot_x_old == -1 or robot_y_old == -1:
|
||||||
|
dobot.setCPCmd(1, robot_x + INIT_POSITION[0], robot_y + INIT_POSITION[1], DRAW_DEPTH+15, DRAW_SPEED, 1)
|
||||||
|
dobot.setCPCmd(1, robot_x + INIT_POSITION[0], robot_y + INIT_POSITION[1], DRAW_DEPTH, DRAW_SPEED, 1)
|
||||||
|
time.sleep(0.1)
|
||||||
|
robot_x_old = robot_x
|
||||||
|
robot_y_old = robot_y
|
||||||
|
|
||||||
|
vector_draw()
|
||||||
|
|
@ -1,3 +1,8 @@
|
||||||
# GrpC_Identikit
|
# GrpC_Identikit
|
||||||
|
|
||||||
This repository is used in IT & Robotics LAB for the Identikit project that aims to draw a face picture using a robot.
|
This repository is used in IT & Robotics LAB for the Identikit project that aims to draw a face picture using a robot.
|
||||||
|
This Project has different objectives :
|
||||||
|
|
||||||
|
- Having a drawing of the user's face
|
||||||
|
- Process the image, with background removal, and contouring
|
||||||
|
- Having additional features, such as a user interface, and filters on the user's face
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
import requests
|
||||||
|
|
||||||
|
# Define the API endpoint and parameters
|
||||||
|
api_url = "https://api.removal.ai/3.0/remove"
|
||||||
|
api_key = "93D96377-ED5E-7CC1-CD9D-05017285C46A"
|
||||||
|
|
||||||
|
# Define the file path to the image
|
||||||
|
image_path = "photo.png"
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Rm-Token": api_key
|
||||||
|
}
|
||||||
|
files = {
|
||||||
|
"image_file": open(image_path, "rb")
|
||||||
|
}
|
||||||
|
data = {
|
||||||
|
"get_file": "1" # Ensures the processed file is returned
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Make the POST request
|
||||||
|
response = requests.post(api_url, headers=headers, files=files, data=data)
|
||||||
|
|
||||||
|
# Save the output file if the request is successful
|
||||||
|
if response.status_code == 200:
|
||||||
|
with open("transparent_image.png", "wb") as output_file:
|
||||||
|
output_file.write(response.content)
|
||||||
|
print("Transparent image saved as 'transparent_image.png'")
|
||||||
|
else:
|
||||||
|
print(f"Error: Received status code {response.status_code}")
|
||||||
|
print("Response:", response.text)
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print("An error occurred:", e)
|
||||||
|
finally:
|
||||||
|
# Close the file
|
||||||
|
files["image_file"].close()
|
||||||
|
|
||||||
|
|
@ -0,0 +1,102 @@
|
||||||
|
import numpy as np
|
||||||
|
import cv2 as cv
|
||||||
|
from matplotlib import pyplot as plt
|
||||||
|
|
||||||
|
imagepath = "output_image.png"
|
||||||
|
|
||||||
|
# Load Image
|
||||||
|
im = cv.imread(imagepath)
|
||||||
|
if im is None:
|
||||||
|
print("Error: Image not found at", imagepath)
|
||||||
|
exit()
|
||||||
|
|
||||||
|
# Get original dimensions
|
||||||
|
original_height, original_width = im.shape[:2]
|
||||||
|
max_dim = 800
|
||||||
|
|
||||||
|
if max(original_width, original_height) > max_dim: # Calculate scaling factor
|
||||||
|
if original_width > original_height:
|
||||||
|
scale = max_dim / original_width
|
||||||
|
else:
|
||||||
|
scale = max_dim / original_height
|
||||||
|
else:
|
||||||
|
scale = 1 # No resizing needed if already within the limit
|
||||||
|
|
||||||
|
new_width = int(original_width * scale) # Compute new dimensions
|
||||||
|
new_height = int(original_height * scale)
|
||||||
|
new_dim = (new_width, new_height)
|
||||||
|
|
||||||
|
# Resize image with preserved aspect ratio
|
||||||
|
Resized_Image = cv.resize(im, new_dim, interpolation=cv.INTER_AREA)
|
||||||
|
|
||||||
|
print(f"Resized image dimensions: {new_width}x{new_height}")
|
||||||
|
|
||||||
|
# Convert to Grayscale
|
||||||
|
Gray_Img = cv.cvtColor(Resized_Image, cv.COLOR_BGR2GRAY)
|
||||||
|
|
||||||
|
# Load Haar Cascade for Face Detection
|
||||||
|
face_cascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
||||||
|
faces = face_cascade.detectMultiScale(Gray_Img, scaleFactor=1.4, minNeighbors=5, minSize=(30, 30))
|
||||||
|
|
||||||
|
if len(faces) == 0:
|
||||||
|
print("No faces detected.")
|
||||||
|
exit()
|
||||||
|
|
||||||
|
|
||||||
|
contour_image = np.zeros_like(Resized_Image, dtype=np.uint8) # Create a blank image for contours
|
||||||
|
|
||||||
|
for (x, y, w, h) in faces:
|
||||||
|
# Expand the ROI to include more of the head
|
||||||
|
expansion_factor = 0.3 # Increase the size by 30%
|
||||||
|
new_x = max(0, int(x - expansion_factor * w)) # Ensure ROI doesn't go out of bounds
|
||||||
|
new_y = max(0, int(y - expansion_factor * h))
|
||||||
|
new_w = min(Gray_Img.shape[1], int(w + 2 * expansion_factor * w))
|
||||||
|
new_h = min(Gray_Img.shape[0], int(h + 2 * expansion_factor * h))
|
||||||
|
|
||||||
|
face_roi = Gray_Img[new_y:new_y + new_h, new_x:new_x + new_w] # Extract ROI
|
||||||
|
hist = cv.calcHist([face_roi], [0], None, [256], [0, 256]) # Calculate the histogram of the face ROI
|
||||||
|
|
||||||
|
non_black_pixels = face_roi[face_roi > 0] # Exclude pure black pixels (intensity = 0)
|
||||||
|
|
||||||
|
# Calculate the median intensity of non-black pixels
|
||||||
|
if len(non_black_pixels) > 0:
|
||||||
|
median_intensity = np.median(non_black_pixels)
|
||||||
|
else:
|
||||||
|
print("All pixels are black, skipping...")
|
||||||
|
median_intensity = 0 # Fallback if no valid pixels exist
|
||||||
|
|
||||||
|
# Adjust thresholds based on the median intensity
|
||||||
|
lower = int(max(0, 0.66 * median_intensity))
|
||||||
|
upper = int(min(255, 1.66 * median_intensity))
|
||||||
|
|
||||||
|
# Apply Canny Edge Detection with the updated thresholds
|
||||||
|
edges = cv.Canny(face_roi, lower, upper)
|
||||||
|
print(median_intensity)
|
||||||
|
|
||||||
|
# Find Contours in the expanded face ROI
|
||||||
|
contours, _ = cv.findContours(edges, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
|
||||||
|
|
||||||
|
# Draw Contours on the original resized image and on the blank contour image
|
||||||
|
for cnt in contours:
|
||||||
|
# Offset the contour points to match the original image
|
||||||
|
cnt[:, 0, 0] += new_x
|
||||||
|
cnt[:, 0, 1] += new_y
|
||||||
|
cv.drawContours(Resized_Image, [cnt], -1, (0, 255, 0), 1) # Draw on the resized image
|
||||||
|
cv.drawContours(contour_image, [cnt], -1, (0, 255, 0), 1) # Draw on the blank contour image
|
||||||
|
|
||||||
|
# Convert black background to transparent
|
||||||
|
b, g, r = cv.split(contour_image)# Split the channels
|
||||||
|
alpha = np.where((b == 0) & (g == 0) & (r == 0), 0, 255).astype(np.uint8)
|
||||||
|
|
||||||
|
# Merge the channels back with alpha
|
||||||
|
contour_image_with_alpha = cv.merge([b, g, r, alpha])
|
||||||
|
|
||||||
|
# Save the image with transparent background
|
||||||
|
cv.imwrite("contours_only.png", contour_image_with_alpha)
|
||||||
|
|
||||||
|
print("Contours-only PNG saved as 'contours_only.png'")
|
||||||
|
|
||||||
|
# Display Final Image with Face Contour
|
||||||
|
cv.imshow("Face Contour", Resized_Image)
|
||||||
|
cv.waitKey(0)
|
||||||
|
cv.destroyAllWindows()
|
||||||
|
After Width: | Height: | Size: 110 KiB |
|
After Width: | Height: | Size: 110 KiB |
|
After Width: | Height: | Size: 106 KiB |
|
After Width: | Height: | Size: 105 KiB |
|
|
@ -0,0 +1 @@
|
||||||
|
add_filter.py
|
||||||
|
|
@ -0,0 +1,87 @@
|
||||||
|
import os
|
||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
mp_face_detection = mp.solutions.face_detection
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
mp_drawing = mp.solutions.drawing_utils
|
||||||
|
|
||||||
|
filter_image_path = "ImagePNG\MoustacheMario.png"
|
||||||
|
filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
def add_filter(image, filter_image, landmarks, size_factor=1.4):
|
||||||
|
"""
|
||||||
|
Adds a filter to an image based on facial landmarks.
|
||||||
|
Adjusts the filter size using a `size_factor`.
|
||||||
|
"""
|
||||||
|
# Use eyes as reference points
|
||||||
|
left_eye = landmarks[33] # Left eye landmark
|
||||||
|
right_eye = landmarks[263] # Right eye landmark
|
||||||
|
|
||||||
|
# Distance between eyes determines the filter size
|
||||||
|
eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
|
||||||
|
|
||||||
|
# Calculate filter size using the size factor
|
||||||
|
filter_width = int(eye_dist * size_factor) # Adjust for desired size
|
||||||
|
filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1])
|
||||||
|
resized_filter = cv2.resize(filter_image, (filter_width, filter_height))
|
||||||
|
|
||||||
|
# Determine filter position above the eyes
|
||||||
|
center_x = int((left_eye[0] + right_eye[0]) / 2)
|
||||||
|
center_y = int((left_eye[1] + right_eye[1]) / 2)
|
||||||
|
x = int(center_x - filter_width / 2)
|
||||||
|
y = int(center_y - filter_height / 2)
|
||||||
|
|
||||||
|
# Extract the alpha channel for blending
|
||||||
|
alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize alpha to [0, 1]
|
||||||
|
filter_rgb = resized_filter[:, :, :3]
|
||||||
|
|
||||||
|
# Overlay the filter onto the image
|
||||||
|
for i in range(resized_filter.shape[0]):
|
||||||
|
for j in range(resized_filter.shape[1]):
|
||||||
|
if 0 <= y + i < image.shape[0] and 0 <= x + j < image.shape[1]: # Bounds check
|
||||||
|
alpha = alpha_channel[i, j]
|
||||||
|
if alpha > 0: # Only apply non-transparent pixels
|
||||||
|
image[y + i, x + j] = (
|
||||||
|
(1 - alpha) * image[y + i, x + j] + alpha * filter_rgb[i, j]
|
||||||
|
)
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
input_image_path = "ImagePNG\Dorian.png"
|
||||||
|
input_image = cv2.imread(input_image_path)
|
||||||
|
|
||||||
|
# RGB for Mediapipe
|
||||||
|
rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
|
||||||
|
|
||||||
|
# FaceMesh init
|
||||||
|
with mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh:
|
||||||
|
# Face detection + key points
|
||||||
|
results = face_mesh.process(rgb_image)
|
||||||
|
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
for face_landmarks in results.multi_face_landmarks:
|
||||||
|
# key point
|
||||||
|
landmarks = [(lm.x * input_image.shape[1], lm.y * input_image.shape[0]) for lm in face_landmarks.landmark]
|
||||||
|
|
||||||
|
# filter to be added (glasses)
|
||||||
|
input_image = add_filter(input_image, filter_image, landmarks)
|
||||||
|
|
||||||
|
|
||||||
|
# Define the folder path
|
||||||
|
folder_path = "OutputImage"
|
||||||
|
|
||||||
|
# Extract the filter name from the filter image path
|
||||||
|
filter_name = os.path.splitext(os.path.basename(filter_image_path))[0]
|
||||||
|
|
||||||
|
# Define the full path to save the image with the filter name included
|
||||||
|
file_path = os.path.join(folder_path, f"{filter_name}_output_image_.jpg")
|
||||||
|
|
||||||
|
# Save the image
|
||||||
|
cv2.imwrite(file_path, input_image)
|
||||||
|
|
||||||
|
# Display result
|
||||||
|
cv2.imshow("Image with filter", input_image)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
@ -0,0 +1,85 @@
|
||||||
|
import os
|
||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# Mediapipe initialization
|
||||||
|
mp_face_detection = mp.solutions.face_detection
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
|
||||||
|
# Load filter image (transparent PNG)
|
||||||
|
filter_image_path = "ImagePNG/MArio.png"
|
||||||
|
filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
def add_filter_hat(image, filter_image, bbox, scale_factor=1.2):
|
||||||
|
"""
|
||||||
|
Add a filter image to a face image at a specified bounding box position,
|
||||||
|
scaling it dynamically based on the face size.
|
||||||
|
"""
|
||||||
|
x_min, y_min, box_width, box_height = bbox
|
||||||
|
|
||||||
|
# Scale the filter based on the face height and a scaling factor
|
||||||
|
filter_width = int(box_width * scale_factor)
|
||||||
|
filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1])
|
||||||
|
resized_filter = cv2.resize(filter_image, (filter_width, filter_height))
|
||||||
|
|
||||||
|
# Position filter above the head
|
||||||
|
x = int(x_min - (filter_width - box_width) / 2)
|
||||||
|
y = int(y_min - filter_height * 0.7) # Slight vertical offset above the face
|
||||||
|
|
||||||
|
# Extract alpha channel (transparency) from the filter
|
||||||
|
alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize to range [0, 1]
|
||||||
|
filter_rgb = resized_filter[:, :, :3]
|
||||||
|
|
||||||
|
# Overlay the filter on the image using alpha blending
|
||||||
|
for i in range(filter_height):
|
||||||
|
for j in range(filter_width):
|
||||||
|
if 0 <= y + i < image.shape[0] and 0 <= x + j < image.shape[1]:
|
||||||
|
alpha = alpha_channel[i, j]
|
||||||
|
if alpha > 0: # Apply only non-transparent pixels
|
||||||
|
image[y + i, x + j] = (1 - alpha) * image[y + i, x + j] + alpha * filter_rgb[i, j]
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
# Load input image
|
||||||
|
input_image_path = "ImagePNG/output.png"
|
||||||
|
input_image = cv2.imread(input_image_path)
|
||||||
|
|
||||||
|
# Convert to RGB for Mediapipe
|
||||||
|
rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
|
||||||
|
|
||||||
|
# Use Mediapipe for face detection
|
||||||
|
with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence=0.5) as face_detection:
|
||||||
|
results = face_detection.process(rgb_image)
|
||||||
|
|
||||||
|
if results.detections:
|
||||||
|
for detection in results.detections:
|
||||||
|
bbox = detection.location_data.relative_bounding_box
|
||||||
|
h, w, _ = input_image.shape
|
||||||
|
# Convert relative bounding box to absolute dimensions
|
||||||
|
x_min = int(bbox.xmin * w)
|
||||||
|
y_min = int(bbox.ymin * h)
|
||||||
|
box_width = int(bbox.width * w)
|
||||||
|
box_height = int(bbox.height * h)
|
||||||
|
|
||||||
|
# Adjust the scale factor based on face height
|
||||||
|
# Larger faces get proportionally larger hats
|
||||||
|
face_height_ratio = box_height / h # Ratio of face height to image height
|
||||||
|
dynamic_scale_factor = 2.75 + face_height_ratio # Base size + adjustment
|
||||||
|
|
||||||
|
# Add filter to the image with dynamic scaling
|
||||||
|
input_image = add_filter_hat(input_image, filter_image, (x_min, y_min, box_width, box_height), scale_factor=dynamic_scale_factor)
|
||||||
|
|
||||||
|
# Define output folder and save path
|
||||||
|
output_folder = "OutputImage"
|
||||||
|
os.makedirs(output_folder, exist_ok=True) # Ensure the folder exists
|
||||||
|
filter_name = os.path.splitext(os.path.basename(filter_image_path))[0]
|
||||||
|
output_path = os.path.join(output_folder, f"{filter_name}_output_image_dynamic.jpg")
|
||||||
|
|
||||||
|
# Save the output image
|
||||||
|
cv2.imwrite(output_path, input_image)
|
||||||
|
|
||||||
|
# Display result
|
||||||
|
cv2.imshow("Image with Filter", input_image)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
@ -0,0 +1,87 @@
|
||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
|
||||||
|
# List of filters
|
||||||
|
filter_images = {
|
||||||
|
1: "C:\Users\doria\Documents\ECAM\Année 4\IT & Robotics Lab\GrpC_Identikit\ImageProcessing\ImagePNG\Chien1.png", # Replace with your filter image paths
|
||||||
|
2: "C:\Users\doria\Documents\ECAM\Année 4\IT & Robotics Lab\GrpC_Identikit\ImageProcessing\ImagePNG\MoustacheMario.png",
|
||||||
|
3: "C:\Users\doria\Documents\ECAM\Année 4\IT & Robotics Lab\GrpC_Identikit\ImageProcessing\ImagePNG\MArio.png"
|
||||||
|
}
|
||||||
|
|
||||||
|
def add_filter(image, filter_image, landmarks):
|
||||||
|
# Use eyes as reference points
|
||||||
|
left_eye = landmarks[33]
|
||||||
|
right_eye = landmarks[263]
|
||||||
|
|
||||||
|
# Distance between both eyes --> filter size
|
||||||
|
eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
|
||||||
|
|
||||||
|
# Adjust the factor for a smaller filter size
|
||||||
|
scaling_factor = 2.75
|
||||||
|
filter_width = int(eye_dist * scaling_factor)
|
||||||
|
filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1])
|
||||||
|
resized_filter = cv2.resize(filter_image, (filter_width, filter_height))
|
||||||
|
|
||||||
|
# Filter position on the face
|
||||||
|
center_x = int((left_eye[0] + right_eye[0]) / 2)
|
||||||
|
center_y = int((left_eye[1] + right_eye[1]) / 2)
|
||||||
|
x = int(center_x - filter_width / 2)
|
||||||
|
y = int(center_y - filter_height / 2)
|
||||||
|
|
||||||
|
# Extract the alpha channel (transparency) from the filter image
|
||||||
|
alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize alpha to range [0, 1]
|
||||||
|
filter_rgb = resized_filter[:, :, :3] # Extract the RGB channels
|
||||||
|
|
||||||
|
# Overlay the filter onto the image, using the alpha channel as a mask
|
||||||
|
for i in range(resized_filter.shape[0]):
|
||||||
|
for j in range(resized_filter.shape[1]):
|
||||||
|
if alpha_channel[i, j] > 0: # Check if the pixel is not fully transparent
|
||||||
|
# Blend the pixels: (1 - alpha) * original + alpha * filter
|
||||||
|
for c in range(3):
|
||||||
|
image[y + i, x + j, c] = (1 - alpha_channel[i, j]) * image[y + i, x + j, c] + alpha_channel[i, j] * filter_rgb[i, j, c]
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
def apply_filter_by_choice(choice, input_image_path):
|
||||||
|
# Validate the filter choice
|
||||||
|
if choice not in filter_images:
|
||||||
|
print(f"Filter {choice} does not exist. Please choose a valid filter number.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Load the input image and filter
|
||||||
|
input_image = cv2.imread(input_image_path)
|
||||||
|
filter_image_path = filter_images[choice]
|
||||||
|
filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
# RGB for Mediapipe
|
||||||
|
rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
|
||||||
|
|
||||||
|
# FaceMesh init
|
||||||
|
with mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh:
|
||||||
|
# Face detection + key points
|
||||||
|
results = face_mesh.process(rgb_image)
|
||||||
|
|
||||||
|
if results.multi_face_landmarks:
|
||||||
|
for face_landmarks in results.multi_face_landmarks:
|
||||||
|
# Key points
|
||||||
|
landmarks = [(lm.x * input_image.shape[1], lm.y * input_image.shape[0]) for lm in face_landmarks.landmark]
|
||||||
|
|
||||||
|
# Apply the filter
|
||||||
|
input_image = add_filter(input_image, filter_image, landmarks)
|
||||||
|
|
||||||
|
# Display result
|
||||||
|
cv2.imshow("Image with Filter", input_image)
|
||||||
|
cv2.waitKey(0)
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
# Save the image
|
||||||
|
output_path = f"output_image_filter_{choice}.jpg"
|
||||||
|
cv2.imwrite(output_path, input_image)
|
||||||
|
print(f"Saved filtered image to {output_path}")
|
||||||
|
|
||||||
|
# Example usage:
|
||||||
|
filter_choice = int(input("Enter the filter number (1, 2, or 3): "))
|
||||||
|
apply_filter_by_choice(filter_choice, "Dorianvide.png")
|
||||||
|
|
@ -0,0 +1,34 @@
|
||||||
|
import cv2 as cv
|
||||||
|
import numpy as np
|
||||||
|
# Chargement du modèle Mask R-CNN
|
||||||
|
net = cv.dnn.readNetFromTensorflow('frozen_inference_graph.pb', 'mask_rcnn_inception_v2_coco_2018_01_28.pbtxt')
|
||||||
|
# Charger l'image
|
||||||
|
imagepath = "photo.jpg"
|
||||||
|
image = cv.imread(imagepath)
|
||||||
|
h, w = image.shape[:2]
|
||||||
|
# Prétraiter l'image pour Mask R-CNN
|
||||||
|
blob = cv.dnn.blobFromImage(image, 1.0, (w, h), (104.0, 177.0, 123.0), swapRB=True, crop=False)
|
||||||
|
net.setInput(blob)
|
||||||
|
# Obtenir les sorties du modèle
|
||||||
|
output_layers = net.getUnconnectedOutLayersNames()
|
||||||
|
detections = net.forward(output_layers)
|
||||||
|
# Appliquer la segmentation pour la personne
|
||||||
|
mask_image = image.copy()
|
||||||
|
for detection in detections:
|
||||||
|
for obj in detection:
|
||||||
|
scores = obj[5:]
|
||||||
|
class_id = np.argmax(scores)
|
||||||
|
confidence = scores[class_id]
|
||||||
|
if class_id == 0 and confidence > 0.5: # Class 0 corresponds to "person"
|
||||||
|
# Coordonner la boîte englobante
|
||||||
|
box = obj[0:4] * np.array([w, h, w, h])
|
||||||
|
(x, y, x2, y2) = box.astype("int")
|
||||||
|
# Créer un masque de la personne
|
||||||
|
mask = np.zeros((h, w), dtype=np.uint8)
|
||||||
|
mask[y:y2, x:x2] = 255 # Définir la zone de la personne
|
||||||
|
# Appliquer le masque sur l'image originale
|
||||||
|
result = cv.bitwise_and(image, image, mask=mask)
|
||||||
|
# Montrer l'image avec la personne segmentée et l'arrière-plan supprimé
|
||||||
|
cv.imshow("Segmented Image", result)
|
||||||
|
cv.waitKey(0)
|
||||||
|
cv.destroyAllWindows()
|
||||||
|
|
@ -0,0 +1,118 @@
|
||||||
|
import cv2
|
||||||
|
import mediapipe as mp
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
# Initialize MediaPipe Face Detection and Drawing utilities
|
||||||
|
mp_face_detection = mp.solutions.face_detection
|
||||||
|
mp_drawing = mp.solutions.drawing_utils
|
||||||
|
mp_face_mesh = mp.solutions.face_mesh
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.2)
|
||||||
|
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.2, min_tracking_confidence=0.5)
|
||||||
|
|
||||||
|
# Initialize camera
|
||||||
|
cap = cv2.VideoCapture(0)
|
||||||
|
|
||||||
|
# Variables for button states
|
||||||
|
greyscale = False
|
||||||
|
sunglasses_on = False
|
||||||
|
saved_image = None
|
||||||
|
|
||||||
|
# Function to overlay sunglasses
|
||||||
|
def overlay_sunglasses(image, face_landmarks, sunglasses_img):
|
||||||
|
if len(face_landmarks) > 0:
|
||||||
|
# Coordinates for the eyes based on face mesh landmarks
|
||||||
|
left_eye = face_landmarks[33]
|
||||||
|
right_eye = face_landmarks[263]
|
||||||
|
|
||||||
|
# Calculate the center between the eyes for positioning sunglasses
|
||||||
|
eye_center_x = int((left_eye[0] + right_eye[0]) / 2)
|
||||||
|
eye_center_y = int((left_eye[1] + right_eye[1]) / 2)
|
||||||
|
|
||||||
|
# Calculate the scaling factor for sunglasses based on the distance between the eyes
|
||||||
|
eye_distance = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
|
||||||
|
scale_factor = eye_distance / sunglasses_img.shape[1]
|
||||||
|
|
||||||
|
# Resize sunglasses based on scale factor
|
||||||
|
sunglasses_resized = cv2.resize(sunglasses_img, None, fx=scale_factor, fy=scale_factor)
|
||||||
|
|
||||||
|
# Determine the region of interest (ROI) for sunglasses
|
||||||
|
start_x = int(eye_center_x - sunglasses_resized.shape[1] / 2)
|
||||||
|
start_y = int(eye_center_y - sunglasses_resized.shape[0] / 2)
|
||||||
|
|
||||||
|
# Overlay sunglasses on the face
|
||||||
|
for i in range(sunglasses_resized.shape[0]):
|
||||||
|
for j in range(sunglasses_resized.shape[1]):
|
||||||
|
if sunglasses_resized[i, j][3] > 0: # If not transparent
|
||||||
|
image[start_y + i, start_x + j] = sunglasses_resized[i, j][0:3] # Apply RGB channels
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
# Function to apply greyscale filter
|
||||||
|
def toggle_greyscale(image, greyscale):
|
||||||
|
if greyscale:
|
||||||
|
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||||
|
else:
|
||||||
|
return image
|
||||||
|
|
||||||
|
# Load sunglasses image with transparency (PNG)
|
||||||
|
sunglasses_img = cv2.imread("sunglasses.png", cv2.IMREAD_UNCHANGED)
|
||||||
|
|
||||||
|
while cap.isOpened():
|
||||||
|
ret, frame = cap.read()
|
||||||
|
|
||||||
|
if not ret:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Flip the frame horizontally for a mirror effect
|
||||||
|
frame = cv2.flip(frame, 1)
|
||||||
|
|
||||||
|
# Convert to RGB for MediaPipe processing
|
||||||
|
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
results_detection = face_detection.process(rgb_frame)
|
||||||
|
results_mesh = face_mesh.process(rgb_frame)
|
||||||
|
|
||||||
|
# Draw face detection bounding boxes
|
||||||
|
if results_detection.detections:
|
||||||
|
for detection in results_detection.detections:
|
||||||
|
mp_drawing.draw_detection(frame, detection)
|
||||||
|
|
||||||
|
# Draw face mesh landmarks
|
||||||
|
if results_mesh.multi_face_landmarks:
|
||||||
|
for face_landmarks in results_mesh.multi_face_landmarks:
|
||||||
|
mp_drawing.draw_landmarks(frame, face_landmarks, mp_face_mesh.FACEMESH_CONTOURS)
|
||||||
|
|
||||||
|
# Apply greyscale filter if enabled
|
||||||
|
frame = toggle_greyscale(frame, greyscale)
|
||||||
|
|
||||||
|
# Display the image
|
||||||
|
cv2.imshow('Face Capture Controls', frame)
|
||||||
|
|
||||||
|
key = cv2.waitKey(1) & 0xFF
|
||||||
|
|
||||||
|
# Save Image
|
||||||
|
if key == ord('s'): # Press 's' to save image
|
||||||
|
saved_image = frame.copy()
|
||||||
|
cv2.imwrite("captured_image.png", saved_image)
|
||||||
|
print("Image Saved!")
|
||||||
|
|
||||||
|
# Retake Image
|
||||||
|
elif key == ord('r'): # Press 'r' to retake image
|
||||||
|
saved_image = None
|
||||||
|
print("Image Retaken!")
|
||||||
|
|
||||||
|
# Toggle Greyscale
|
||||||
|
elif key == ord('g'): # Press 'g' to toggle greyscale
|
||||||
|
greyscale = not greyscale
|
||||||
|
print(f"Greyscale: {'Enabled' if greyscale else 'Disabled'}")
|
||||||
|
|
||||||
|
|
||||||
|
# Kill Switch
|
||||||
|
elif key == ord('q'): # Press 'q' to quit
|
||||||
|
break
|
||||||
|
|
||||||
|
# Release camera and close all windows
|
||||||
|
cap.release()
|
||||||
|
cv2.destroyAllWindows()
|
||||||
|
|
||||||
|
|
@ -0,0 +1,19 @@
|
||||||
|
|
||||||
|
# Importing Required Modules
|
||||||
|
from rembg import remove
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
# Store path of the image in the variable input_path
|
||||||
|
input_path = 'ImageJPG/Felipe.png'
|
||||||
|
|
||||||
|
# Store path of the output image in the variable output_path
|
||||||
|
output_path = 'ImagePNG/Felipe.png'
|
||||||
|
|
||||||
|
# Processing the image
|
||||||
|
input = Image.open(input_path)
|
||||||
|
|
||||||
|
# Removing the background from the given Image
|
||||||
|
output = remove(input)
|
||||||
|
|
||||||
|
#Saving the image in the given path
|
||||||
|
output.save(output_path)
|
||||||
|
|
@ -1,65 +0,0 @@
|
||||||
import numpy as np
|
|
||||||
import cv2 as cv
|
|
||||||
from matplotlib import pyplot as plt
|
|
||||||
|
|
||||||
imagepath = "E:\\ECAM\\2022-23\\Pathway Discovery Workshops\\images-20230626\\ecam.png"
|
|
||||||
|
|
||||||
# Load Image
|
|
||||||
im = cv.imread(imagepath)
|
|
||||||
if im is None:
|
|
||||||
print("Error: Image not found at", imagepath)
|
|
||||||
exit()
|
|
||||||
|
|
||||||
# Get Dimensions
|
|
||||||
dimensions = im.shape
|
|
||||||
height, width, channels = dimensions
|
|
||||||
print('Image Dimension :', dimensions)
|
|
||||||
print('Image Height :', height)
|
|
||||||
print('Image Width :', width)
|
|
||||||
print('Number of Channels :', channels)
|
|
||||||
|
|
||||||
# Resize Image
|
|
||||||
rwidth, rheight = 700, 700
|
|
||||||
rdim = (rwidth, rheight)
|
|
||||||
Resized_Image = cv.resize(im, rdim, interpolation=cv.INTER_AREA)
|
|
||||||
cv.imshow("Resized Image", Resized_Image)
|
|
||||||
cv.waitKey(0)
|
|
||||||
cv.destroyAllWindows()
|
|
||||||
|
|
||||||
# Convert to Grayscale
|
|
||||||
Gray_Img = cv.cvtColor(Resized_Image, cv.COLOR_BGR2GRAY)
|
|
||||||
cv.imshow("Grayscale Image", Gray_Img)
|
|
||||||
cv.waitKey(0)
|
|
||||||
cv.destroyAllWindows()
|
|
||||||
|
|
||||||
# Threshold Image
|
|
||||||
ret, Thresh_Img = cv.threshold(Gray_Img, 100, 255, 0)
|
|
||||||
cv.imshow("Threshold Image", Thresh_Img)
|
|
||||||
cv.waitKey(0)
|
|
||||||
cv.destroyAllWindows()
|
|
||||||
|
|
||||||
# Plot Histogram
|
|
||||||
hist = cv.calcHist([Gray_Img], [0], None, [256], [0, 256])
|
|
||||||
plt.figure()
|
|
||||||
plt.title("Grayscale Histogram")
|
|
||||||
plt.xlabel("Bins")
|
|
||||||
plt.ylabel("# of Pixels")
|
|
||||||
plt.plot(hist)
|
|
||||||
plt.xlim([0, 256])
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
# Find and Draw Contours on Resized Image
|
|
||||||
contours, hierarchy = cv.findContours(Thresh_Img, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
|
|
||||||
contoured_image = Resized_Image.copy()
|
|
||||||
cv.drawContours(contoured_image, contours, -1, (0, 255, 0), 3)
|
|
||||||
cv.imshow("Contours on Resized Image", contoured_image)
|
|
||||||
cv.waitKey(0)
|
|
||||||
cv.destroyAllWindows()
|
|
||||||
|
|
||||||
# Draw Contours on Threshold Image
|
|
||||||
contoured_thresh = cv.cvtColor(Thresh_Img, cv.COLOR_GRAY2BGR)
|
|
||||||
cv.drawContours(contoured_thresh, contours, -1, (0, 255, 0), 3)
|
|
||||||
cv.imshow("Contours on Threshold Image", contoured_thresh)
|
|
||||||
cv.waitKey(0)
|
|
||||||
cv.destroyAllWindows()
|
|
||||||
|
|
||||||