GrpC_Identikit/ImageProcessing/Cascade.py

102 lines
3.7 KiB
Python

import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
imagepath = "ImagePNG/Dorian.png"
# Load Image
im = cv.imread(imagepath)
if im is None:
print("Error: Image not found at", imagepath)
exit()
# Get original dimensions
original_height, original_width = im.shape[:2]
max_dim = 800
if max(original_width, original_height) > max_dim: # Calculate scaling factor
if original_width > original_height:
scale = max_dim / original_width
else:
scale = max_dim / original_height
else:
scale = 1 # No resizing needed if already within the limit
new_width = int(original_width * scale) # Compute new dimensions
new_height = int(original_height * scale)
new_dim = (new_width, new_height)
# Resize image with preserved aspect ratio
Resized_Image = cv.resize(im, new_dim, interpolation=cv.INTER_AREA)
print(f"Resized image dimensions: {new_width}x{new_height}")
# Convert to Grayscale
Gray_Img = cv.cvtColor(Resized_Image, cv.COLOR_BGR2GRAY)
# Load Haar Cascade for Face Detection
face_cascade = cv.CascadeClassifier(cv.data.haarcascades + 'haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(Gray_Img, scaleFactor=1.4, minNeighbors=5, minSize=(30, 30))
if len(faces) == 0:
print("No faces detected.")
exit()
contour_image = np.zeros_like(Resized_Image, dtype=np.uint8) # Create a blank image for contours
for (x, y, w, h) in faces:
# Expand the ROI to include more of the head
expansion_factor = 0.3 # Increase the size by 30%
new_x = max(0, int(x - expansion_factor * w)) # Ensure ROI doesn't go out of bounds
new_y = max(0, int(y - expansion_factor * h))
new_w = min(Gray_Img.shape[1], int(w + 2 * expansion_factor * w))
new_h = min(Gray_Img.shape[0], int(h + 2 * expansion_factor * h))
face_roi = Gray_Img[new_y:new_y + new_h, new_x:new_x + new_w] # Extract ROI
hist = cv.calcHist([face_roi], [0], None, [256], [0, 256]) # Calculate the histogram of the face ROI
non_black_pixels = face_roi[face_roi > 0] # Exclude pure black pixels (intensity = 0)
# Calculate the median intensity of non-black pixels
if len(non_black_pixels) > 0:
median_intensity = np.median(non_black_pixels)
else:
print("All pixels are black, skipping...")
median_intensity = 0 # Fallback if no valid pixels exist
# Adjust thresholds based on the median intensity
lower = int(max(0, 0.66 * median_intensity))
upper = int(min(255, 1.66 * median_intensity))
# Apply Canny Edge Detection with the updated thresholds
edges = cv.Canny(face_roi, lower, upper)
print(median_intensity)
# Find Contours in the expanded face ROI
contours, _ = cv.findContours(edges, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# Draw Contours on the original resized image and on the blank contour image
for cnt in contours:
# Offset the contour points to match the original image
cnt[:, 0, 0] += new_x
cnt[:, 0, 1] += new_y
cv.drawContours(Resized_Image, [cnt], -1, (0, 255, 0), 1) # Draw on the resized image
cv.drawContours(contour_image, [cnt], -1, (0, 255, 0), 1) # Draw on the blank contour image
# Convert black background to transparent
b, g, r = cv.split(contour_image)# Split the channels
alpha = np.where((b == 0) & (g == 0) & (r == 0), 0, 255).astype(np.uint8)
# Merge the channels back with alpha
contour_image_with_alpha = cv.merge([b, g, r, alpha])
# Save the image with transparent background
cv.imwrite("contours_only.png", contour_image_with_alpha)
print("Contours-only PNG saved as 'contours_only.png'")
# Display Final Image with Face Contour
cv.imshow("Face Contour", Resized_Image)
cv.waitKey(0)
cv.destroyAllWindows()