119 lines
3.8 KiB
Python
119 lines
3.8 KiB
Python
import cv2
|
|
import mediapipe as mp
|
|
import numpy as np
|
|
|
|
# Initialize MediaPipe Face Detection and Drawing utilities
|
|
mp_face_detection = mp.solutions.face_detection
|
|
mp_drawing = mp.solutions.drawing_utils
|
|
mp_face_mesh = mp.solutions.face_mesh
|
|
|
|
# Initialize variables
|
|
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.2)
|
|
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.2, min_tracking_confidence=0.5)
|
|
|
|
# Initialize camera
|
|
cap = cv2.VideoCapture(0)
|
|
|
|
# Variables for button states
|
|
greyscale = False
|
|
sunglasses_on = False
|
|
saved_image = None
|
|
|
|
# Function to overlay sunglasses
|
|
def overlay_sunglasses(image, face_landmarks, sunglasses_img):
|
|
if len(face_landmarks) > 0:
|
|
# Coordinates for the eyes based on face mesh landmarks
|
|
left_eye = face_landmarks[33]
|
|
right_eye = face_landmarks[263]
|
|
|
|
# Calculate the center between the eyes for positioning sunglasses
|
|
eye_center_x = int((left_eye[0] + right_eye[0]) / 2)
|
|
eye_center_y = int((left_eye[1] + right_eye[1]) / 2)
|
|
|
|
# Calculate the scaling factor for sunglasses based on the distance between the eyes
|
|
eye_distance = np.linalg.norm(np.array(left_eye) - np.array(right_eye))
|
|
scale_factor = eye_distance / sunglasses_img.shape[1]
|
|
|
|
# Resize sunglasses based on scale factor
|
|
sunglasses_resized = cv2.resize(sunglasses_img, None, fx=scale_factor, fy=scale_factor)
|
|
|
|
# Determine the region of interest (ROI) for sunglasses
|
|
start_x = int(eye_center_x - sunglasses_resized.shape[1] / 2)
|
|
start_y = int(eye_center_y - sunglasses_resized.shape[0] / 2)
|
|
|
|
# Overlay sunglasses on the face
|
|
for i in range(sunglasses_resized.shape[0]):
|
|
for j in range(sunglasses_resized.shape[1]):
|
|
if sunglasses_resized[i, j][3] > 0: # If not transparent
|
|
image[start_y + i, start_x + j] = sunglasses_resized[i, j][0:3] # Apply RGB channels
|
|
|
|
return image
|
|
|
|
# Function to apply greyscale filter
|
|
def toggle_greyscale(image, greyscale):
|
|
if greyscale:
|
|
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
else:
|
|
return image
|
|
|
|
# Load sunglasses image with transparency (PNG)
|
|
sunglasses_img = cv2.imread("sunglasses.png", cv2.IMREAD_UNCHANGED)
|
|
|
|
while cap.isOpened():
|
|
ret, frame = cap.read()
|
|
|
|
if not ret:
|
|
break
|
|
|
|
# Flip the frame horizontally for a mirror effect
|
|
frame = cv2.flip(frame, 1)
|
|
|
|
# Convert to RGB for MediaPipe processing
|
|
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
results_detection = face_detection.process(rgb_frame)
|
|
results_mesh = face_mesh.process(rgb_frame)
|
|
|
|
# Draw face detection bounding boxes
|
|
if results_detection.detections:
|
|
for detection in results_detection.detections:
|
|
mp_drawing.draw_detection(frame, detection)
|
|
|
|
# Draw face mesh landmarks
|
|
if results_mesh.multi_face_landmarks:
|
|
for face_landmarks in results_mesh.multi_face_landmarks:
|
|
mp_drawing.draw_landmarks(frame, face_landmarks, mp_face_mesh.FACEMESH_CONTOURS)
|
|
|
|
# Apply greyscale filter if enabled
|
|
frame = toggle_greyscale(frame, greyscale)
|
|
|
|
# Display the image
|
|
cv2.imshow('Face Capture Controls', frame)
|
|
|
|
key = cv2.waitKey(1) & 0xFF
|
|
|
|
# Save Image
|
|
if key == ord('s'): # Press 's' to save image
|
|
saved_image = frame.copy()
|
|
cv2.imwrite("captured_image.png", saved_image)
|
|
print("Image Saved!")
|
|
|
|
# Retake Image
|
|
elif key == ord('r'): # Press 'r' to retake image
|
|
saved_image = None
|
|
print("Image Retaken!")
|
|
|
|
# Toggle Greyscale
|
|
elif key == ord('g'): # Press 'g' to toggle greyscale
|
|
greyscale = not greyscale
|
|
print(f"Greyscale: {'Enabled' if greyscale else 'Disabled'}")
|
|
|
|
|
|
# Kill Switch
|
|
elif key == ord('q'): # Press 'q' to quit
|
|
break
|
|
|
|
# Release camera and close all windows
|
|
cap.release()
|
|
cv2.destroyAllWindows()
|
|
|