diff --git a/ImageProcessing/Cascade.py b/ImageProcessing/Cascade.py index 57c48bf..65374bf 100644 --- a/ImageProcessing/Cascade.py +++ b/ImageProcessing/Cascade.py @@ -2,7 +2,7 @@ import numpy as np import cv2 as cv from matplotlib import pyplot as plt -imagepath = "output_image.png" +imagepath = "ImagePNG/Dorian.png" # Load Image im = cv.imread(imagepath) diff --git a/ImageProcessing/ImagePNG/output.png b/ImageProcessing/ImagePNG/outputAPI.png similarity index 100% rename from ImageProcessing/ImagePNG/output.png rename to ImageProcessing/ImagePNG/outputAPI.png diff --git a/ImageProcessing/ImagePNG/photo1.png b/ImageProcessing/ImagePNG/photo1.png new file mode 100644 index 0000000..dc79fd3 Binary files /dev/null and b/ImageProcessing/ImagePNG/photo1.png differ diff --git a/ImageProcessing/OutputImage/Glasses_output_image.jpg b/ImageProcessing/OutputImage/Glasses_output_image.jpg new file mode 100644 index 0000000..31ce908 Binary files /dev/null and b/ImageProcessing/OutputImage/Glasses_output_image.jpg differ diff --git a/ImageProcessing/OutputImage/Glasses_output_image_.jpg b/ImageProcessing/OutputImage/Glasses_output_image_.jpg new file mode 100644 index 0000000..310c80e Binary files /dev/null and b/ImageProcessing/OutputImage/Glasses_output_image_.jpg differ diff --git a/ImageProcessing/OutputImage/Glasses_output_image_dynamic.jpg b/ImageProcessing/OutputImage/Glasses_output_image_dynamic.jpg new file mode 100644 index 0000000..43ba2b3 Binary files /dev/null and b/ImageProcessing/OutputImage/Glasses_output_image_dynamic.jpg differ diff --git a/ImageProcessing/OutputImage/MArio_output_image.jpg b/ImageProcessing/OutputImage/MArio_output_image.jpg deleted file mode 100644 index d684e4f..0000000 Binary files a/ImageProcessing/OutputImage/MArio_output_image.jpg and /dev/null differ diff --git a/ImageProcessing/OutputImage/MArio_output_image_.jpg b/ImageProcessing/OutputImage/MArio_output_image_.jpg deleted file mode 100644 index 8be5490..0000000 Binary files a/ImageProcessing/OutputImage/MArio_output_image_.jpg and /dev/null differ diff --git a/ImageProcessing/OutputImage/MArio_output_image_adjustable_height.jpg b/ImageProcessing/OutputImage/MArio_output_image_adjustable_height.jpg deleted file mode 100644 index 0ae79e3..0000000 Binary files a/ImageProcessing/OutputImage/MArio_output_image_adjustable_height.jpg and /dev/null differ diff --git a/ImageProcessing/OutputImage/MArio_output_image_dynamic.jpg b/ImageProcessing/OutputImage/MArio_output_image_dynamic.jpg index c38b963..eae5519 100644 Binary files a/ImageProcessing/OutputImage/MArio_output_image_dynamic.jpg and b/ImageProcessing/OutputImage/MArio_output_image_dynamic.jpg differ diff --git a/ImageProcessing/OutputImage/MoustacheMario_output_image.jpg b/ImageProcessing/OutputImage/MoustacheMario_output_image.jpg new file mode 100644 index 0000000..d87b635 Binary files /dev/null and b/ImageProcessing/OutputImage/MoustacheMario_output_image.jpg differ diff --git a/ImageProcessing/OutputImage/MoustacheMario_output_image_.jpg b/ImageProcessing/OutputImage/MoustacheMario_output_image_.jpg new file mode 100644 index 0000000..5aaba56 Binary files /dev/null and b/ImageProcessing/OutputImage/MoustacheMario_output_image_.jpg differ diff --git a/ImageProcessing/OutputImage/MoustacheMario_output_image_dynamic.jpg b/ImageProcessing/OutputImage/MoustacheMario_output_image_dynamic.jpg new file mode 100644 index 0000000..6d0548b Binary files /dev/null and b/ImageProcessing/OutputImage/MoustacheMario_output_image_dynamic.jpg differ diff --git a/ImageProcessing/OutputImage/chat_output_image.jpg b/ImageProcessing/OutputImage/chat_output_image.jpg new file mode 100644 index 0000000..6d34691 Binary files /dev/null and b/ImageProcessing/OutputImage/chat_output_image.jpg differ diff --git a/ImageProcessing/OutputImage/chat_output_image_.jpg b/ImageProcessing/OutputImage/chat_output_image_.jpg new file mode 100644 index 0000000..0c2ffe9 Binary files /dev/null and b/ImageProcessing/OutputImage/chat_output_image_.jpg differ diff --git a/ImageProcessing/OutputImage/chien1_output_image.jpg b/ImageProcessing/OutputImage/chien1_output_image.jpg new file mode 100644 index 0000000..a998574 Binary files /dev/null and b/ImageProcessing/OutputImage/chien1_output_image.jpg differ diff --git a/ImageProcessing/add_filter.py b/ImageProcessing/add_filter.py index 2b178ee..031d177 100644 --- a/ImageProcessing/add_filter.py +++ b/ImageProcessing/add_filter.py @@ -7,43 +7,49 @@ mp_face_detection = mp.solutions.face_detection mp_face_mesh = mp.solutions.face_mesh mp_drawing = mp.solutions.drawing_utils -filter_image_path = "ImagePNG\MArio.png" +filter_image_path = "ImagePNG\MoustacheMario.png" filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED) -def add_filter(image, filter_image, landmarks): - # Use of eyes as reference points - left_eye = landmarks[33] - right_eye = landmarks[263] +def add_filter(image, filter_image, landmarks, size_factor=1.4): + """ + Adds a filter to an image based on facial landmarks. + Adjusts the filter size using a `size_factor`. + """ + # Use eyes as reference points + left_eye = landmarks[33] # Left eye landmark + right_eye = landmarks[263] # Right eye landmark - # Distance between both eyes --> filter size + # Distance between eyes determines the filter size eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye)) - # Filter size - filter_width = int(eye_dist * 2) # Adjust the factor for desired size + # Calculate filter size using the size factor + filter_width = int(eye_dist * size_factor) # Adjust for desired size filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1]) resized_filter = cv2.resize(filter_image, (filter_width, filter_height)) - # Filter position on the face + # Determine filter position above the eyes center_x = int((left_eye[0] + right_eye[0]) / 2) center_y = int((left_eye[1] + right_eye[1]) / 2) x = int(center_x - filter_width / 2) y = int(center_y - filter_height / 2) - # Extract the alpha channel (transparency) from the filter image - alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize alpha to range [0, 1] - filter_rgb = resized_filter[:, :, :3] # Extract the RGB channels + # Extract the alpha channel for blending + alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize alpha to [0, 1] + filter_rgb = resized_filter[:, :, :3] - # Overlay the filter onto the image, using the alpha channel as a mask + # Overlay the filter onto the image for i in range(resized_filter.shape[0]): for j in range(resized_filter.shape[1]): - if alpha_channel[i, j] > 0: # Check if the pixel is not fully transparent - # Blend the pixels: (1 - alpha) * original + alpha * filter - for c in range(3): - image[y + i, x + j, c] = (1 - alpha_channel[i, j]) * image[y + i, x + j, c] + alpha_channel[i, j] * filter_rgb[i, j, c] + if 0 <= y + i < image.shape[0] and 0 <= x + j < image.shape[1]: # Bounds check + alpha = alpha_channel[i, j] + if alpha > 0: # Only apply non-transparent pixels + image[y + i, x + j] = ( + (1 - alpha) * image[y + i, x + j] + alpha * filter_rgb[i, j] + ) return image -input_image_path = "ImagePNG\Felipe.jpg" +input_image_path = "ImagePNG\Dorian.png" input_image = cv2.imread(input_image_path) # RGB for Mediapipe diff --git a/ImageProcessing/add_filter_hat.py b/ImageProcessing/add_filter_hat.py index 7d2cb49..ce09687 100644 --- a/ImageProcessing/add_filter_hat.py +++ b/ImageProcessing/add_filter_hat.py @@ -11,7 +11,7 @@ mp_face_mesh = mp.solutions.face_mesh filter_image_path = "ImagePNG/MArio.png" filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED) -def add_filter(image, filter_image, bbox, scale_factor=1.2): +def add_filter_hat(image, filter_image, bbox, scale_factor=1.2): """ Add a filter image to a face image at a specified bounding box position, scaling it dynamically based on the face size. @@ -42,7 +42,7 @@ def add_filter(image, filter_image, bbox, scale_factor=1.2): return image # Load input image -input_image_path = "ImagePNG/output.png" +input_image_path = "ImagePNG/Dorian.png" input_image = cv2.imread(input_image_path) # Convert to RGB for Mediapipe @@ -68,7 +68,7 @@ with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence dynamic_scale_factor = 2.75 + face_height_ratio # Base size + adjustment # Add filter to the image with dynamic scaling - input_image = add_filter(input_image, filter_image, (x_min, y_min, box_width, box_height), scale_factor=dynamic_scale_factor) + input_image = add_filter_hat(input_image, filter_image, (x_min, y_min, box_width, box_height), scale_factor=dynamic_scale_factor) # Define output folder and save path output_folder = "OutputImage" diff --git a/ImageProcessing/add_filter_moustache.py b/ImageProcessing/add_filter_moustache.py new file mode 100644 index 0000000..0c1f82a --- /dev/null +++ b/ImageProcessing/add_filter_moustache.py @@ -0,0 +1,82 @@ +import os +import cv2 +import mediapipe as mp +import numpy as np + +# Mediapipe setup +mp_face_detection = mp.solutions.face_detection +mp_face_mesh = mp.solutions.face_mesh + +# Load the moustache filter +filter_image_path = "ImagePNG/MoustacheMario.png" +filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED) + +def add_filter_moustache(image, filter_image, nose_tip, scale_factor): + """ + Add a moustache filter to an image based on the nose tip position. + """ + nose_x, nose_y = nose_tip + + # Scale the filter image dynamically based on the face width + filter_width = int(image.shape[1] * scale_factor * 0.1) # Scale relative to image width + filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1]) + resized_filter = cv2.resize(filter_image, (filter_width, filter_height)) + + # Adjust the position to place the moustache below the nose + x = int(nose_x - filter_width / 2) + y = int(nose_y + filter_height * 0.2) + + # Extract alpha channel (transparency) from the filter + alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize to range [0, 1] + filter_rgb = resized_filter[:, :, :3] + + # Overlay the filter on the image using alpha blending + for i in range(filter_height): + for j in range(filter_width): + if 0 <= y + i < image.shape[0] and 0 <= x + j < image.shape[1]: + alpha = alpha_channel[i, j] + if alpha > 0: # Apply only non-transparent pixels + image[y + i, x + j] = ( + (1 - alpha) * image[y + i, x + j] + alpha * filter_rgb[i, j] + ) + + return image + +# Load input image +input_image_path = "ImagePNG/Dorian.png" +input_image = cv2.imread(input_image_path) + +# Convert to RGB for Mediapipe +rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB) + +# Use Mediapipe Face Mesh for robust landmark detection +with mp_face_mesh.FaceMesh(static_image_mode=True, min_detection_confidence=0.5) as face_mesh: + results = face_mesh.process(rgb_image) + + if results.multi_face_landmarks: + for face_landmarks in results.multi_face_landmarks: + # Get the nose tip landmark (index 4 in Mediapipe Face Mesh) + nose_tip = face_landmarks.landmark[4] + nose_x = int(nose_tip.x * input_image.shape[1]) + nose_y = int(nose_tip.y * input_image.shape[0]) + + # Dynamically calculate scale factor based on face size + face_width = abs(face_landmarks.landmark[454].x - face_landmarks.landmark[234].x) * input_image.shape[1] + dynamic_scale_factor = 1.5 + (face_width / input_image.shape[1]) # Base size + adjustment + + # Add filter to the image + input_image = add_filter_moustache(input_image, filter_image, (nose_x, nose_y), scale_factor=dynamic_scale_factor) + +# Define output folder and save path +output_folder = "OutputImage" +os.makedirs(output_folder, exist_ok=True) # Ensure the folder exists +filter_name = os.path.splitext(os.path.basename(filter_image_path))[0] +output_path = os.path.join(output_folder, f"{filter_name}_output_image_dynamic.jpg") + +# Save the output image +cv2.imwrite(output_path, input_image) + +# Display result +cv2.imshow("Image with Filter", input_image) +cv2.waitKey(0) +cv2.destroyAllWindows() \ No newline at end of file diff --git a/ImageProcessing/add_filter_multiple.py b/ImageProcessing/add_filter_multiple.py deleted file mode 100644 index 00ecdc5..0000000 --- a/ImageProcessing/add_filter_multiple.py +++ /dev/null @@ -1,87 +0,0 @@ -import cv2 -import mediapipe as mp -import numpy as np - -mp_face_mesh = mp.solutions.face_mesh - -# List of filters -filter_images = { - 1: "C:\Users\doria\Documents\ECAM\Année 4\IT & Robotics Lab\GrpC_Identikit\ImageProcessing\ImagePNG\Chien1.png", # Replace with your filter image paths - 2: "C:\Users\doria\Documents\ECAM\Année 4\IT & Robotics Lab\GrpC_Identikit\ImageProcessing\ImagePNG\MoustacheMario.png", - 3: "C:\Users\doria\Documents\ECAM\Année 4\IT & Robotics Lab\GrpC_Identikit\ImageProcessing\ImagePNG\MArio.png" -} - -def add_filter(image, filter_image, landmarks): - # Use eyes as reference points - left_eye = landmarks[33] - right_eye = landmarks[263] - - # Distance between both eyes --> filter size - eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye)) - - # Adjust the factor for a smaller filter size - scaling_factor = 2.75 - filter_width = int(eye_dist * scaling_factor) - filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1]) - resized_filter = cv2.resize(filter_image, (filter_width, filter_height)) - - # Filter position on the face - center_x = int((left_eye[0] + right_eye[0]) / 2) - center_y = int((left_eye[1] + right_eye[1]) / 2) - x = int(center_x - filter_width / 2) - y = int(center_y - filter_height / 2) - - # Extract the alpha channel (transparency) from the filter image - alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize alpha to range [0, 1] - filter_rgb = resized_filter[:, :, :3] # Extract the RGB channels - - # Overlay the filter onto the image, using the alpha channel as a mask - for i in range(resized_filter.shape[0]): - for j in range(resized_filter.shape[1]): - if alpha_channel[i, j] > 0: # Check if the pixel is not fully transparent - # Blend the pixels: (1 - alpha) * original + alpha * filter - for c in range(3): - image[y + i, x + j, c] = (1 - alpha_channel[i, j]) * image[y + i, x + j, c] + alpha_channel[i, j] * filter_rgb[i, j, c] - - return image - -def apply_filter_by_choice(choice, input_image_path): - # Validate the filter choice - if choice not in filter_images: - print(f"Filter {choice} does not exist. Please choose a valid filter number.") - return - - # Load the input image and filter - input_image = cv2.imread(input_image_path) - filter_image_path = filter_images[choice] - filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED) - - # RGB for Mediapipe - rgb_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB) - - # FaceMesh init - with mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh: - # Face detection + key points - results = face_mesh.process(rgb_image) - - if results.multi_face_landmarks: - for face_landmarks in results.multi_face_landmarks: - # Key points - landmarks = [(lm.x * input_image.shape[1], lm.y * input_image.shape[0]) for lm in face_landmarks.landmark] - - # Apply the filter - input_image = add_filter(input_image, filter_image, landmarks) - - # Display result - cv2.imshow("Image with Filter", input_image) - cv2.waitKey(0) - cv2.destroyAllWindows() - - # Save the image - output_path = f"output_image_filter_{choice}.jpg" - cv2.imwrite(output_path, input_image) - print(f"Saved filtered image to {output_path}") - -# Example usage: -filter_choice = int(input("Enter the filter number (1, 2, or 3): ")) -apply_filter_by_choice(filter_choice, "Dorianvide.png") diff --git a/ImageProcessing/menu_deroulant.py b/ImageProcessing/menu_deroulant.py new file mode 100644 index 0000000..687d9fe --- /dev/null +++ b/ImageProcessing/menu_deroulant.py @@ -0,0 +1,31 @@ +import tkinter as tk +from tkinter import ttk + +# Function to toggle the button state +def toggle_button(): + if toggle_var.get(): + toggle_button.config(text="Activated") + else: + toggle_button.config(text="Deactivated") + +# Main window +root = tk.Tk() +root.title("Toggle Button with Dropdown Menu") + +# Dropdown menu for options +options = ["Option 1", "Option 2", "Option 3"] +selected_option = tk.StringVar(root) +selected_option.set(options[0]) # default value + +dropdown_menu = ttk.Combobox(root, textvariable=selected_option, values=options) +dropdown_menu.pack(pady=10) + +# Variable to store the toggle state +toggle_var = tk.BooleanVar() + +# Toggle button +toggle_button = tk.Button(root, text="Deactivated", command=toggle_button) +toggle_button.pack(pady=20) + +# Start the GUI +root.mainloop() diff --git a/ImageProcessing/remove_bg.py b/ImageProcessing/remove_bg.py index f1fdbe9..cf43c81 100644 --- a/ImageProcessing/remove_bg.py +++ b/ImageProcessing/remove_bg.py @@ -4,11 +4,11 @@ from rembg import remove from PIL import Image # Store path of the image in the variable input_path -input_path = 'C:/Users/doria/Documents/ECAM/Année 4/IT & Robotics Lab/GrpC_Identikit/ImageProcessing/ImageJPG/Démon.png' +input_path = 'ImagePNG/map.png' # Store path of the output image in the variable output_path -output_path = 'C:/Users/doria/Documents/ECAM/Année 4/IT & Robotics Lab/GrpC_Identikit/ImageProcessing/Code\Demon.png' - +output_path = 'ImagePNG/map1.png' + # Processing the image input = Image.open(input_path) diff --git a/ImageProcessing/sunglasses-hat-moustache-filter.py b/ImageProcessing/sunglasses-hat-moustache-filter.py new file mode 100644 index 0000000..01f5f22 --- /dev/null +++ b/ImageProcessing/sunglasses-hat-moustache-filter.py @@ -0,0 +1,238 @@ +import cv2 +import tkinter as tk +import mediapipe as mp +import numpy as np +import os +import math + +# Load images with transparency +mario_hat_image_path = "ImagePNG/MArio.png" +sunglasses_image_path = "ImagePNG/Glasses.png" +moustache_image_path = "ImagePNG/MoustacheMario.png" + +# Load images +mario_hat = cv2.imread(mario_hat_image_path, cv2.IMREAD_UNCHANGED) +sunglasses = cv2.imread(sunglasses_image_path, cv2.IMREAD_UNCHANGED) +moustache = cv2.imread(moustache_image_path, cv2.IMREAD_UNCHANGED) + +# Check if images were loaded correctly +if mario_hat is None: + print("Error: Mario hat image not found.") + exit() +if sunglasses is None: + print("Error: Sunglasses image not found.") + exit() +if moustache is None: + print("Error: Moustache image not found.") + exit() + +# Initialize MediaPipe FaceMesh +mp_face_mesh = mp.solutions.face_mesh +face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5) + +# Variables for toggling filters +mario_hat_active = False +sunglasses_active = False +moustache_active = False +show_angles = False + +# Open webcam for capturing live feed +cap = cv2.VideoCapture(0) +if not cap.isOpened(): + print("Error: The webcam cannot be opened") + exit() + +# Variable to hold the freeze frame +freeze_frame = None + +def calculate_angles(landmarks): + left_eye = np.array(landmarks[33]) + right_eye = np.array(landmarks[263]) + nose_tip = np.array(landmarks[1]) + chin = np.array(landmarks[152]) + yaw = math.degrees(math.atan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0])) + pitch = math.degrees(math.atan2(chin[1] - nose_tip[1], chin[0] - nose_tip[0])) + return yaw, pitch + +def apply_mario_hat(frame, landmarks): + global mario_hat + if mario_hat_active and mario_hat is not None: + forehead = landmarks[10] + chin = landmarks[152] + left_side = landmarks[234] + right_side = landmarks[454] + face_width = int(np.linalg.norm(np.array(left_side) - np.array(right_side))) + hat_width = int(face_width * 4.0) + hat_height = int(hat_width * mario_hat.shape[0] / mario_hat.shape[1]) + mario_hat_resized = cv2.resize(mario_hat, (hat_width, hat_height)) + x = int(forehead[0] - hat_width / 2) + y = int(forehead[1] - hat_height * 0.7) + alpha_channel = mario_hat_resized[:, :, 3] / 255.0 + hat_rgb = mario_hat_resized[:, :, :3] + for i in range(hat_height): + for j in range(hat_width): + if 0 <= y + i < frame.shape[0] and 0 <= x + j < frame.shape[1]: + alpha = alpha_channel[i, j] + if alpha > 0: + for c in range(3): + frame[y + i, x + j, c] = (1 - alpha) * frame[y + i, x + j, c] + alpha * hat_rgb[i, j, c] + return frame + +def apply_sunglasses(frame, landmarks): + global sunglasses + if sunglasses_active and sunglasses is not None: + left_eye = landmarks[33] + right_eye = landmarks[263] + eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye)) + scaling_factor = 1.75 + sunglasses_width = int(eye_dist * scaling_factor) + sunglasses_height = int(sunglasses_width * sunglasses.shape[0] / sunglasses.shape[1]) + sunglasses_resized = cv2.resize(sunglasses, (sunglasses_width, sunglasses_height)) + center_x = int((left_eye[0] + right_eye[0]) / 2) + center_y = int((left_eye[1] + right_eye[1]) / 2) + x = int(center_x - sunglasses_resized.shape[1] / 2) + y = int(center_y - sunglasses_resized.shape[0] / 2) + alpha_channel = sunglasses_resized[:, :, 3] / 255.0 + sunglasses_rgb = sunglasses_resized[:, :, :3] + for i in range(sunglasses_resized.shape[0]): + for j in range(sunglasses_resized.shape[1]): + if alpha_channel[i, j] > 0: + for c in range(3): + frame[y + i, x + j, c] = (1 - alpha_channel[i, j]) * frame[y + i, x + j, c] + alpha_channel[i, j] * sunglasses_rgb[i, j, c] + return frame + +def apply_moustache(frame, landmarks): + global moustache + if moustache_active and moustache is not None: + nose_base = landmarks[1] + mouth_left = landmarks[61] + mouth_right = landmarks[291] + mouth_width = int(np.linalg.norm(np.array(mouth_left) - np.array(mouth_right))) + moustache_width = int(mouth_width * 1.5) + moustache_height = int(moustache_width * moustache.shape[0] / moustache.shape[1]) + moustache_resized = cv2.resize(moustache, (moustache_width, moustache_height)) + x = int(nose_base[0] - moustache_width / 2) + y = int(nose_base[1]) + alpha_channel = moustache_resized[:, :, 3] / 255.0 + moustache_rgb = moustache_resized[:, :, :3] + for i in range(moustache_height): + for j in range(moustache_width): + if 0 <= y + i < frame.shape[0] and 0 <= x + j < frame.shape[1]: + alpha = alpha_channel[i, j] + if alpha > 0: + for c in range(3): + frame[y + i, x + j, c] = (1 - alpha) * frame[y + i, x + j, c] + alpha * moustache_rgb[i, j, c] + return frame + +def update_frame(): + global mario_hat_active, sunglasses_active, show_angles, freeze_frame, moustache_active + ret, frame = cap.read() + if ret: + rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + results = face_mesh.process(rgb_frame) + if results.multi_face_landmarks: + for face_landmarks in results.multi_face_landmarks: + landmarks = [(lm.x * frame.shape[1], lm.y * frame.shape[0]) for lm in face_landmarks.landmark] + yaw, pitch = calculate_angles(landmarks) + if mario_hat_active: + frame = apply_mario_hat(frame, landmarks) + if sunglasses_active: + frame = apply_sunglasses(frame, landmarks) + if moustache_active: + frame = apply_moustache(frame, landmarks) + if show_angles: + cv2.putText(frame, f"Yaw: {yaw:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) + cv2.putText(frame, f"Pitch: {pitch:.2f}", (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) + cv2.imshow("Webcam Feed", frame) + freeze_frame = frame + root.after(10, update_frame) + +def toggle_mario_hat(): + global mario_hat_active + mario_hat_active = not mario_hat_active + status = "activated" if mario_hat_active else "deactivated" + print(f"Mario hat filter {status}") + +def toggle_sunglasses(): + global sunglasses_active + sunglasses_active = not sunglasses_active + status = "activated" if sunglasses_active else "deactivated" + print(f"Sunglasses filter {status}") + +def toggle_moustache(): + global moustache_active + moustache_active = not moustache_active + status = "activated" if moustache_active else "deactivated" + print(f"Moustache filter {status}") + +def toggle_angles(): + global show_angles + show_angles = not show_angles + status = "shown" if show_angles else "hidden" + print(f"Angles display {status}") + +def show_freeze_frame(): + if freeze_frame is not None: + cv2.imshow("Face Capture", freeze_frame) + +def retake_image(): + global freeze_frame + ret, frame = cap.read() + if ret: + rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + results = face_mesh.process(rgb_frame) + if results.multi_face_landmarks: + for face_landmarks in results.multi_face_landmarks: + landmarks = [(lm.x * frame.shape[1], lm.y * frame.shape[0]) for lm in face_landmarks.landmark] + frame = apply_mario_hat(frame, landmarks) + frame = apply_sunglasses(frame, landmarks) + frame = apply_moustache(frame, landmarks) + freeze_frame = frame.copy() + show_freeze_frame() + +def save_image(): + global freeze_frame + if freeze_frame is not None: + save_path = "E:/ECAM/2024-25/IT and Robotics Labs/captured_face.png" + cv2.imwrite(save_path, freeze_frame) + print(f"Image saved to {save_path}") + + +# Tkinter GUI setup +root = tk.Tk() +root.title("Face Capture Controls") +root.geometry("300x400") +root.configure(bg="#ffffff") + +# Buttons on the control window with updated font and colors +mario_hat_button = tk.Button(root, text="Toggle Mario Hat Filter", font=("Arial", 12, "bold"), command=toggle_mario_hat, bg="#5A4D41", fg="white", padx=10, pady=5) +mario_hat_button.pack(pady=10) + +sunglasses_button = tk.Button(root, text="Toggle Sunglasses Filter", font=("Arial", 12, "bold"), command=toggle_sunglasses, bg="#B8860B", fg="white", padx=10, pady=5) +sunglasses_button.pack(pady=10) + +moustache_button = tk.Button(root, text="Toggle Moustache Filter", font=("Arial", 12, "bold"), command=toggle_moustache, bg="#8B8000", fg="white", padx=10, pady=5) +moustache_button.pack(pady=10) + +retake_image_button = tk.Button(root, text="Retake Image", font=("Arial", 12, "bold"), command=retake_image, bg="#2E8B57", fg="white", padx=10, pady=5) +retake_image_button.pack(pady=10) + +save_image_button = tk.Button(root, text="Save Captured Image", font=("Arial", 12, "bold"), command=save_image, bg="#6A5ACD", fg="white", padx=10, pady=5) +save_image_button.pack(pady=10) + +freeze_frame_button = tk.Button(root, text="Show Freeze Frame", font=("Arial", 12, "bold"), command=show_freeze_frame, bg="#D2691E", fg="white", padx=10, pady=5) +freeze_frame_button.pack(pady=10) + +# Graceful exit +def on_closing(): + cap.release() + cv2.destroyAllWindows() + root.destroy() + +root.protocol("WM_DELETE_WINDOW", on_closing) + +show_freeze_frame() + +# Start Tkinter event loop and OpenCV frame updates +update_frame() +root.mainloop() diff --git a/Project/Filters/Glasses.png b/Project/Filters/Glasses.png new file mode 100644 index 0000000..9a37a47 Binary files /dev/null and b/Project/Filters/Glasses.png differ diff --git a/Project/Filters/Mario hat.png b/Project/Filters/Mario hat.png new file mode 100644 index 0000000..4f675ae Binary files /dev/null and b/Project/Filters/Mario hat.png differ diff --git a/Project/Filters/MoustacheMario.png b/Project/Filters/MoustacheMario.png new file mode 100644 index 0000000..3eb745d Binary files /dev/null and b/Project/Filters/MoustacheMario.png differ diff --git a/Project/Tmp/Dorian.png b/Project/Tmp/Dorian.png new file mode 100644 index 0000000..b49c5a7 Binary files /dev/null and b/Project/Tmp/Dorian.png differ diff --git a/Project/Tmp/Dorian_nobg.png b/Project/Tmp/Dorian_nobg.png new file mode 100644 index 0000000..8178b54 Binary files /dev/null and b/Project/Tmp/Dorian_nobg.png differ diff --git a/Project/Tmp/captured_face.png b/Project/Tmp/captured_face.png new file mode 100644 index 0000000..6c49f7c Binary files /dev/null and b/Project/Tmp/captured_face.png differ diff --git a/Project/Tmp/captured_face_nobg.png b/Project/Tmp/captured_face_nobg.png new file mode 100644 index 0000000..8bf1ef3 Binary files /dev/null and b/Project/Tmp/captured_face_nobg.png differ diff --git a/Project/Tmp/final_output_image.png b/Project/Tmp/final_output_image.png new file mode 100644 index 0000000..7997d66 Binary files /dev/null and b/Project/Tmp/final_output_image.png differ diff --git a/Project/dobot.py b/Project/dobot.py new file mode 100644 index 0000000..4f9c693 --- /dev/null +++ b/Project/dobot.py @@ -0,0 +1,2534 @@ +import serial +from serial import SerialException +from serial.serialutil import SerialTimeoutException +import struct + + + +# Initialisation + +# Calcul de la checksum + +def checksum(trame): + sum = 0x00 + length = int(trame[2]) + + for i in range(3, length+3): + sum = sum + trame[i] + + sum = (0xFF + 0x01 - sum) % 0x100 + return sum + +# Permet de détecter sur quel port COM se trouve le Dobot +def findPortCOM(): + print("Recherche du port COM...") + for i in range(3, 21): + try: + test = serial.Serial() + test.baudrate = 115200 + test.port = 'COM'+str(i) + test.timeout = 1 + test.open() + trame = bytearray() #getDeviceName + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x00) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + test.write(trame) + response = test.read(1) + if response: + return i + test.close() + except (SerialException): + 1 + +# Connexion au Dobot + +dobot = serial.Serial() +dobot.baudrate = 115200 +dobot.port = "COM"+str(findPortCOM()) +dobot.open() + + +def setPortCOM(port): + com = 'COM' + str(port) + dobot.port = com + print("Dobot is now communicating on " + com) + + +def printByte(reponse): + print("b'{}'".format(''.join('\\x{:02x}'.format(b) for b in reponse))) + + +def getResponseLength(): + call = dobot.read(3) + return call[2] + 1 + + +# Liste des fonctions + +def getDeviceSN(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x00) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + longueur = getResponseLength() + reponse = dobot.read(longueur) + + serialNumber = reponse[2:longueur-1].decode() + printByte(reponse) + return serialNumber + + +def setDeviceName(name, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 2+len(name)) + trame.insert(3, 0x01) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + nameByte = str.encode(name) + + for i in range(0, len(name)): + trame.insert(5+i, nameByte[i]) + + trame.insert(5+len(name), checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getDeviceName(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x01) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + longueur = getResponseLength() + reponse = dobot.read(longueur) + + name = reponse[2:longueur-1].decode() + return name + + +def getDeviceVersion(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x02) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + longueur = getResponseLength() + reponse = dobot.read(longueur) + + printByte(reponse) # révision + + v1 = int(reponse[2]) + v2 = int(reponse[3]) + v3 = int(reponse[4]) + + return [v1, v2, v3] + + +def getDeviceTime(): #retourne le temps du dobot en millisecondes + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x04) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + time = reponse[2]+reponse[3]*256+reponse[4]*256*256+reponse[5]*256*256*256 + + return time + + +def getDeviceID(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x05) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + printByte(reponse) # révision + + t1 = struct.unpack("i", reponse[2:6])[0] + t2 = struct.unpack("i", reponse[6:10])[0] + t3 = struct.unpack("i", reponse[10:14])[0] + + return [t1, t2, t3] + + +def getPose(): #renvoie un tableau des positions x, y, z et des angles j1, j2, j3, j4 (aussi nommé r) + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x0A) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + x = float(struct.unpack("f", reponse[2:6])[0]) + y = float(struct.unpack("f", reponse[6:10])[0]) + z = float(struct.unpack("f", reponse[10:14])[0]) + r = float(struct.unpack("f", reponse[14:18])[0]) + j1 = float(struct.unpack("f", reponse[18:22])[0]) + j2 = float(struct.unpack("f", reponse[22:26])[0]) + j3 = float(struct.unpack("f", reponse[26:30])[0]) + j4 = float(struct.unpack("f", reponse[30:34])[0]) + + return([x, y, z, r, j1, j2, j3, j4]) + + +def resetPose(mode, rearArmAngle = 0, frontArmAngle = 0, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x0B) + trame.insert(3, 0x0B) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, mode) + + rAngle = bytearray(struct.pack("f", rearArmAngle)) + + trame.insert(6, rAngle[0]) + trame.insert(7, rAngle[1]) + trame.insert(8, rAngle[2]) + trame.insert(9, rAngle[3]) + + fAngle = bytearray(struct.pack("f", frontArmAngle)) + + trame.insert(10, fAngle[0]) + trame.insert(11, fAngle[1]) + trame.insert(12, fAngle[2]) + trame.insert(13, fAngle[3]) + + trame.insert(14, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getAlarmsState(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x14) + trame.insert(4, 0x01) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def clearAllAlarmsState(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x15) + trame.insert(4, 0x01) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setHomeParams(x, y, z, r, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x12) + trame.insert(3, 0x1E) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + xParam = bytearray(struct.pack("f", x)) + + trame.insert(5, xParam[0]) + trame.insert(6, xParam[1]) + trame.insert(7, xParam[2]) + trame.insert(8, xParam[3]) + + yParam = bytearray(struct.pack("f", y)) + + trame.insert(9, yParam[0]) + trame.insert(10, yParam[1]) + trame.insert(11, yParam[2]) + trame.insert(12, yParam[3]) + + zParam = bytearray(struct.pack("f", z)) + + trame.insert(13, zParam[0]) + trame.insert(14, zParam[1]) + trame.insert(15, zParam[2]) + trame.insert(16, zParam[3]) + + rParam = bytearray(struct.pack("f", r)) + + trame.insert(17, rParam[0]) + trame.insert(18, rParam[1]) + trame.insert(19, rParam[2]) + trame.insert(20, rParam[3]) + + trame.insert(21, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getHomeParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x1E) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + x = float(struct.unpack("f", reponse[2:6])[0]) + y = float(struct.unpack("f", reponse[6:10])[0]) + z = float(struct.unpack("f", reponse[10:14])[0]) + r = float(struct.unpack("f", reponse[14:18])[0]) + + return [x, y, z, r] + + +def setHomeCmd(isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x03) + trame.insert(3, 0x1F) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, 0x00) + trame.insert(6, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setAutoLeveling(isAutoLeveling, accuracy, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 7) + trame.insert(3, 32) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, isAutoLeveling) + + accuracyParam = bytearray(struct.pack("f", accuracy)) + + trame.insert(6, accuracyParam[0]) + trame.insert(7, accuracyParam[1]) + trame.insert(8, accuracyParam[2]) + trame.insert(9, accuracyParam[3]) + + trame.insert(10, checksum(trame)) + + dobot.write(trame) + + return dobot.read(getResponseLength()) + + +def getAutoLeveling(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 32) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + result = float(struct.unpack("f", reponse[2:6])[0]) + + return result + + +def setHHTTrigMode(mode, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 0x28) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, mode) + trame.insert(6, mode) + # mode = 1 --> Prise de position en continu + # mode = 0 --> Prise de position au relachement du bouton + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getHHTTrigMode(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x28) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + mode = int(reponse[2]) + return mode + + +def setHHTTrigOutputEnabled(isOutputEnabled, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x03) + trame.insert(3, 0x29) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, isOutputEnabled) + + trame.insert(6, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getHHTTrigOutputEnabled(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x29) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + isOutputEnabled = int(reponse[2]) + return isOutputEnabled + + +def getHHTTrigOutput(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x2A) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + output = int(reponse[2]) + return output + + +def setEndEffectorParams(xOffset, yOffset, zOffset, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x0E) + trame.insert(3, 0x3C) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + x = bytearray(struct.pack("f", xOffset)) + + trame.insert(5, x[0]) + trame.insert(6, x[1]) + trame.insert(7, x[2]) + trame.insert(8, x[3]) + + y = bytearray(struct.pack("f", yOffset)) + + trame.insert(9, y[0]) + trame.insert(10, y[1]) + trame.insert(11, y[2]) + trame.insert(12, y[3]) + + z = bytearray(struct.pack("f", zOffset)) + + trame.insert(13, z[0]) + trame.insert(14, z[1]) + trame.insert(15, z[2]) + trame.insert(16, z[3]) + + trame.insert(17, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getEndEffectorParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x3C) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + xOffset = float(struct.unpack("f", reponse[2:6])[0]) + yOffset = float(struct.unpack("f", reponse[6:10])[0]) + zOffset = float(struct.unpack("f", reponse[10:14])[0]) + + return [xOffset, yOffset, zOffset] + + +def setEndEffectorSuctionCup(onOff, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 0x3E) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, 0x01) + + trame.insert(6, onOff) + # onOff = 1 --> SuctionCup On + # onOff = 0 --> SuctionCup Off + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getEndEffectorSuctionCup(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x3E) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + isSucked = reponse[3] + return isSucked + + +def setEndEffectorGripper(onOff, isGripped, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 63) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, onOff) + # onOff = 1 --> Gripper On + # onOff = 0 --> Gripper Off + + trame.insert(6, isGripped) + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getEndEffectorGripper(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 63) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + onOff = reponse[2] + isGripped = reponse[3] + + return [onOff, isGripped] + + +def setJOGjointParams(j1Velocity, j2Velocity, j3Velocity, j4Velocity, j1Accel, j2Accel, j3Accel, j4Accel, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x22) + trame.insert(3, 70) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + j1VelocityParam = bytearray(struct.pack("f", j1Velocity)) + + trame.insert(5, j1VelocityParam[0]) + trame.insert(6, j1VelocityParam[1]) + trame.insert(7, j1VelocityParam[2]) + trame.insert(8, j1VelocityParam[3]) + + j2VelocityParam = bytearray(struct.pack("f", j2Velocity)) + + trame.insert(9, j2VelocityParam[0]) + trame.insert(10, j2VelocityParam[1]) + trame.insert(11, j2VelocityParam[2]) + trame.insert(12, j2VelocityParam[3]) + + j3VelocityParam = bytearray(struct.pack("f", j3Velocity)) + + trame.insert(13, j3VelocityParam[0]) + trame.insert(14, j3VelocityParam[1]) + trame.insert(15, j3VelocityParam[2]) + trame.insert(16, j3VelocityParam[3]) + + j4VelocityParam = bytearray(struct.pack("f", j4Velocity)) + + trame.insert(17, j4VelocityParam[0]) + trame.insert(18, j4VelocityParam[1]) + trame.insert(19, j4VelocityParam[2]) + trame.insert(20, j4VelocityParam[3]) + + j1AccelParam = bytearray(struct.pack("f", j1Accel)) + + trame.insert(21, j1AccelParam[0]) + trame.insert(22, j1AccelParam[1]) + trame.insert(23, j1AccelParam[2]) + trame.insert(24, j1AccelParam[3]) + + j2AccelParam = bytearray(struct.pack("f", j2Accel)) + + trame.insert(25, j2AccelParam[0]) + trame.insert(26, j2AccelParam[1]) + trame.insert(27, j2AccelParam[2]) + trame.insert(28, j2AccelParam[3]) + + j3AccelParam = bytearray(struct.pack("f", j3Accel)) + + trame.insert(29, j3AccelParam[0]) + trame.insert(30, j3AccelParam[1]) + trame.insert(31, j3AccelParam[2]) + trame.insert(32, j3AccelParam[3]) + + j4AccelParam = bytearray(struct.pack("f", j4Accel)) + + trame.insert(33, j4AccelParam[0]) + trame.insert(34, j4AccelParam[1]) + trame.insert(35, j4AccelParam[2]) + trame.insert(36, j4AccelParam[3]) + + trame.insert(37, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getJOGjointParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 70) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + j1Velocity = float(struct.unpack("f", reponse[2:6])[0]) + j2Velocity = float(struct.unpack("f", reponse[6:10])[0]) + j3Velocity = float(struct.unpack("f", reponse[10:14])[0]) + j4Velocity = float(struct.unpack("f", reponse[14:18])[0]) + + j1Accel = float(struct.unpack("f", reponse[18:22])[0]) + j2Accel = float(struct.unpack("f", reponse[22:26])[0]) + j3Accel = float(struct.unpack("f", reponse[26:30])[0]) + j4Accel = float(struct.unpack("f", reponse[30:34])[0]) + + return [j1Velocity, j2Velocity, j3Velocity, j4Velocity, j1Accel, j2Accel, j3Accel, j4Accel] + + +def setJOGCoordinateParams(xAxisVelocity, yAxisVelocity, zAxisVelocity, rVelocity, xAxisAccel, yAxisAccel, zAxisAccel, rAccel, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x22) + trame.insert(3, 71) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + xAxisVelocityParam = bytearray(struct.pack("f", xAxisVelocity)) + + trame.insert(5, xAxisVelocityParam[0]) + trame.insert(6, xAxisVelocityParam[1]) + trame.insert(7, xAxisVelocityParam[2]) + trame.insert(8, xAxisVelocityParam[3]) + + yAxisVelocityParam = bytearray(struct.pack("f", yAxisVelocity)) + + trame.insert(9, yAxisVelocityParam[0]) + trame.insert(10, yAxisVelocityParam[1]) + trame.insert(11, yAxisVelocityParam[2]) + trame.insert(12, yAxisVelocityParam[3]) + + zAxisVelocityParam = bytearray(struct.pack("f", zAxisVelocity)) + + trame.insert(13, zAxisVelocityParam[0]) + trame.insert(14, zAxisVelocityParam[1]) + trame.insert(15, zAxisVelocityParam[2]) + trame.insert(16, zAxisVelocityParam[3]) + + rVelocityParam = bytearray(struct.pack("f", rVelocity)) + + trame.insert(17, rVelocityParam[0]) + trame.insert(18, rVelocityParam[1]) + trame.insert(19, rVelocityParam[2]) + trame.insert(20, rVelocityParam[3]) + + xAxisAccelParam = bytearray(struct.pack("f", xAxisAccel)) + + trame.insert(21, xAxisAccelParam[0]) + trame.insert(22, xAxisAccelParam[1]) + trame.insert(23, xAxisAccelParam[2]) + trame.insert(24, xAxisAccelParam[3]) + + yAxisAccelParam = bytearray(struct.pack("f", yAxisAccel)) + + trame.insert(25, yAxisAccelParam[0]) + trame.insert(26, yAxisAccelParam[1]) + trame.insert(27, yAxisAccelParam[2]) + trame.insert(28, yAxisAccelParam[3]) + + zAxisAccelParam = bytearray(struct.pack("f", zAxisAccel)) + + trame.insert(29, zAxisAccelParam[0]) + trame.insert(30, zAxisAccelParam[1]) + trame.insert(31, zAxisAccelParam[2]) + trame.insert(32, zAxisAccelParam[3]) + + rAccelParam = bytearray(struct.pack("f", rAccel)) + + trame.insert(33, rAccelParam[0]) + trame.insert(34, rAccelParam[1]) + trame.insert(35, rAccelParam[2]) + trame.insert(36, rAccelParam[3]) + + trame.insert(37, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getJOGCoordinateParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 71) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + xAxisVelocity = float(struct.unpack("f", reponse[2:6])[0]) + yAxisVelocity = float(struct.unpack("f", reponse[6:10])[0]) + zAxisVelocity = float(struct.unpack("f", reponse[10:14])[0]) + rVelocity = float(struct.unpack("f", reponse[14:18])[0]) + xAxisVAccel = float(struct.unpack("f", reponse[18:22])[0]) + yAxisAccel = float(struct.unpack("f", reponse[22:26])[0]) + zAxisAccel = float(struct.unpack("f", reponse[26:30])[0]) + rAccel = float(struct.unpack("f", reponse[30:34])[0]) + + return [xAxisVelocity, yAxisVelocity, zAxisVelocity, rVelocity, xAxisVAccel, yAxisAccel, zAxisAccel, rAccel] + + +def setJOGCommonParams(velocityRatio, accelerationRatio, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x0A) + trame.insert(3, 72) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + velocityRatioParam = bytearray(struct.pack("f", velocityRatio)) + + trame.insert(5, velocityRatioParam[0]) + trame.insert(6, velocityRatioParam[1]) + trame.insert(7, velocityRatioParam[2]) + trame.insert(8, velocityRatioParam[3]) + + accelerationRatioParam = bytearray(struct.pack("f", accelerationRatio)) + + trame.insert(9, accelerationRatioParam[0]) + trame.insert(10, accelerationRatioParam[1]) + trame.insert(11, accelerationRatioParam[2]) + trame.insert(12, accelerationRatioParam[3]) + + trame.insert(13, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getJOGCommonParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 72) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + velocityRatio = struct.unpack("f", reponse[2:6])[0] + accelerationRatio = struct.unpack("f", reponse[6:10])[0] + + return [velocityRatio, accelerationRatio] + + +def setJOGCmd(jogMode, jogCommand, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 73) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, jogMode) + + # Values of jogMode : + + # jogMode = 0 --> cartesian coordinate system + # jogMode = 1 --> joint system + + trame.insert(6, jogCommand) + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setPTPjointParams(j1Velocity, j2Velocity, j3Velocity, j4Velocity, j1Accel, j2Accel, j3Accel, j4Accel, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x22) + trame.insert(3, 0x50) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + j1VelocityParam = bytearray(struct.pack("f", j1Velocity)) + + trame.insert(5, j1VelocityParam[0]) + trame.insert(6, j1VelocityParam[1]) + trame.insert(7, j1VelocityParam[2]) + trame.insert(8, j1VelocityParam[3]) + + j2VelocityParam = bytearray(struct.pack("f", j2Velocity)) + + trame.insert(9, j2VelocityParam[0]) + trame.insert(10, j2VelocityParam[1]) + trame.insert(11, j2VelocityParam[2]) + trame.insert(12, j2VelocityParam[3]) + + j3VelocityParam = bytearray(struct.pack("f", j3Velocity)) + + trame.insert(13, j3VelocityParam[0]) + trame.insert(14, j3VelocityParam[1]) + trame.insert(15, j3VelocityParam[2]) + trame.insert(16, j3VelocityParam[3]) + + j4VelocityParam = bytearray(struct.pack("f", j4Velocity)) + + trame.insert(17, j4VelocityParam[0]) + trame.insert(18, j4VelocityParam[1]) + trame.insert(19, j4VelocityParam[2]) + trame.insert(20, j4VelocityParam[3]) + + j1AccelParam = bytearray(struct.pack("f", j1Accel)) + + trame.insert(21, j1AccelParam[0]) + trame.insert(22, j1AccelParam[1]) + trame.insert(23, j1AccelParam[2]) + trame.insert(24, j1AccelParam[3]) + + j2AccelParam = bytearray(struct.pack("f", j2Accel)) + + trame.insert(25, j2AccelParam[0]) + trame.insert(26, j2AccelParam[1]) + trame.insert(27, j2AccelParam[2]) + trame.insert(28, j2AccelParam[3]) + + j3AccelParam = bytearray(struct.pack("f", j3Accel)) + + trame.insert(29, j3AccelParam[0]) + trame.insert(30, j3AccelParam[1]) + trame.insert(31, j3AccelParam[2]) + trame.insert(32, j3AccelParam[3]) + + j4AccelParam = bytearray(struct.pack("f", j4Accel)) + + trame.insert(33, j4AccelParam[0]) + trame.insert(34, j4AccelParam[1]) + trame.insert(35, j4AccelParam[2]) + trame.insert(36, j4AccelParam[3]) + + trame.insert(37, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getPTPjointParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x50) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + j1Velocity = float(struct.unpack("f", reponse[2:6])[0]) + j2Velocity = float(struct.unpack("f", reponse[6:10])[0]) + j3Velocity = float(struct.unpack("f", reponse[10:14])[0]) + j4Velocity = float(struct.unpack("f", reponse[14:18])[0]) + + j1Accel = float(struct.unpack("f", reponse[18:22])[0]) + j2Accel = float(struct.unpack("f", reponse[22:26])[0]) + j3Accel = float(struct.unpack("f", reponse[26:30])[0]) + j4Accel = float(struct.unpack("f", reponse[30:34])[0]) + + return [j1Velocity, j2Velocity, j3Velocity, j4Velocity, j1Accel, j2Accel, j3Accel, j4Accel] + + +def setPTPCoordinateParams(xyzVelocity, rVelocity, xyzAccel, rAccel, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x12) + trame.insert(3, 0x51) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + xyzVelocityParam = bytearray(struct.pack("f", xyzVelocity)) + + trame.insert(5, xyzVelocityParam[0]) + trame.insert(6, xyzVelocityParam[1]) + trame.insert(7, xyzVelocityParam[2]) + trame.insert(8, xyzVelocityParam[3]) + + rVelocityParam = bytearray(struct.pack("f", rVelocity)) + + trame.insert(9, rVelocityParam[0]) + trame.insert(10, rVelocityParam[1]) + trame.insert(11, rVelocityParam[2]) + trame.insert(12, rVelocityParam[3]) + + xyzAccelParam = bytearray(struct.pack("f", xyzAccel)) + + trame.insert(13, xyzAccelParam[0]) + trame.insert(14, xyzAccelParam[1]) + trame.insert(15, xyzAccelParam[2]) + trame.insert(16, xyzAccelParam[3]) + + rAccelParam = bytearray(struct.pack("f", rAccel)) + + trame.insert(17, rAccelParam[0]) + trame.insert(18, rAccelParam[1]) + trame.insert(19, rAccelParam[2]) + trame.insert(20, rAccelParam[3]) + + trame.insert(21, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getPTPCoordinateParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x51) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + xyzVelocity = float(struct.unpack("f", reponse[2:6])[0]) + rVelocity = float(struct.unpack("f", reponse[6:10])[0]) + xyzAccel = float(struct.unpack("f", reponse[10:14])[0]) + rAccel = float(struct.unpack("f", reponse[14:18])[0]) + + return([xyzVelocity, rVelocity, xyzAccel, rAccel]) + + +def setPTPJumpParams(jumpHeight, zLimit, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x0A) + trame.insert(3, 0x52) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + jumpHeightParam = bytearray(struct.pack("f", jumpHeight)) + + trame.insert(5, jumpHeightParam[0]) + trame.insert(6, jumpHeightParam[1]) + trame.insert(7, jumpHeightParam[2]) + trame.insert(8, jumpHeightParam[3]) + + zLimitParam = bytearray(struct.pack("f", zLimit)) + + trame.insert(9, zLimitParam[0]) + trame.insert(10, zLimitParam[1]) + trame.insert(11, zLimitParam[2]) + trame.insert(12, zLimitParam[3]) + + trame.insert(13, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getPTPJumpParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x52) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + jumpHeight = struct.unpack("f", reponse[2:6])[0] + zLimit = struct.unpack("f", reponse[6:10])[0] + + return [jumpHeight, zLimit] + + +def setPTPCommonParams(velocityRatio, accelerationRatio, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x0A) + trame.insert(3, 0x53) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + velocityRatioParam = bytearray(struct.pack("f", velocityRatio)) + + trame.insert(5, velocityRatioParam[0]) + trame.insert(6, velocityRatioParam[1]) + trame.insert(7, velocityRatioParam[2]) + trame.insert(8, velocityRatioParam[3]) + + accelerationRatioParam = bytearray(struct.pack("f", accelerationRatio)) + + trame.insert(9, accelerationRatioParam[0]) + trame.insert(10, accelerationRatioParam[1]) + trame.insert(11, accelerationRatioParam[2]) + trame.insert(12, accelerationRatioParam[3]) + + trame.insert(13, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getPTPCommonParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x53) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + velocityRatio = struct.unpack("f", reponse[2:6])[0] + accelerationRatio = struct.unpack("f", reponse[6:10])[0] + + return [velocityRatio, accelerationRatio] + + +def setPTPCmd(ptpMode, x, y, z, r, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x13) + trame.insert(3, 0x54) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, ptpMode) + + # Values of ptpMode : + + # ptpMode = 0 --> jUMP_XYZ | joint-jump mode in cartesian coordinate system by using absolute position + # ptpMode = 1 --> MOVj_XYZ | joint mode in cartesian coordinate system by using absolute position + # ptpMode = 2 --> MOVL_XYZ | Linear mode in cartesian coordinate system by using absolute position + + # ptpMode = 3 --> jUMP_ANGLE | jump mode in joint coordinate system by using absolute position + # ptpMode = 4 --> MOVj_ANGLE | joint mode in joint coordinate system by using absolute position + # ptpMode = 5 --> MOVL_ANGLE | Linear mode in joint coordinate system by using absolute position + + # ptpMode = 6 --> MOVj_INC | joint mode in joint coordinate system by using relative position + # ptpMode = 7 --> MOVL_INC | Linear mode in cartesian coordinate system by using relative position + # ptpMode = 8 --> MOVj_XYZ_INC | joint mode in cartesian coordinate system by using relative position + + # ptpMode = 9 --> jUMP_MOVL_XYZ | Linear-jump mode in cartesian coordinate system by using absolute position + + xParam = bytearray(struct.pack("f", x)) + + trame.insert(6, xParam[0]) + trame.insert(7, xParam[1]) + trame.insert(8, xParam[2]) + trame.insert(9, xParam[3]) + + yParam = bytearray(struct.pack("f", y)) + + trame.insert(10, yParam[0]) + trame.insert(11, yParam[1]) + trame.insert(12, yParam[2]) + trame.insert(13, yParam[3]) + + zParam = bytearray(struct.pack("f", z)) + + trame.insert(14, zParam[0]) + trame.insert(15, zParam[1]) + trame.insert(16, zParam[2]) + trame.insert(17, zParam[3]) + + rParam = bytearray(struct.pack("f", r)) + + trame.insert(18, rParam[0]) + trame.insert(19, rParam[1]) + trame.insert(20, rParam[2]) + trame.insert(21, rParam[3]) + + trame.insert(22, checksum(trame)) + + dobot.write(trame) + return dobot.read(getResponseLength()) + + +def setPTPJump2Params(startHeight, endHeight, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x0A) + trame.insert(3, 87) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + startHeightParam = bytearray(struct.pack("f", startHeight)) + + trame.insert(5, startHeightParam[0]) + trame.insert(6, startHeightParam[1]) + trame.insert(7, startHeightParam[2]) + trame.insert(8, startHeightParam[3]) + + endHeightParam = bytearray(struct.pack("f", endHeight)) + + trame.insert(9, endHeightParam[0]) + trame.insert(10, endHeightParam[1]) + trame.insert(11, endHeightParam[2]) + trame.insert(12, endHeightParam[3]) + + trame.insert(13, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getPTPJump2Params(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x57) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + startHeight = struct.unpack("f", reponse[2:6])[0] + endHeight = struct.unpack("f", reponse[6:10])[0] + zLimit = struct.unpack("f", reponse[10:14])[0] + + return [startHeight, endHeight, zLimit] + + +def setPTPPOCmd(ptpMode, x, y, z, r, ratio, address, level, isQueued = 0): + # Values of ptpMode : + # ptpMode = 0 --> jUMP_XYZ | joint-jump mode in cartesian coordinate system by using absolute position + # ptpMode = 1 --> MOVj_XYZ | joint mode in cartesian coordinate system by using absolute position + # ptpMode = 2 --> MOVL_XYZ | Linear mode in cartesian coordinate system by using absolute position + # ptpMode = 3 --> jUMP_ANGLE | jump mode in joint coordinate system by using absolute position + # ptpMode = 4 --> MOVj_ANGLE | joint mode in joint coordinate system by using absolute position + # ptpMode = 5 --> MOVL_ANGLE | Linear mode in joint coordinate system by using absolute position + # ptpMode = 6 --> MOVj_INC | joint mode in joint coordinate system by using relative position + # ptpMode = 7 --> MOVL_INC | Linear mode in cartesian coordinate system by using relative position + # ptpMode = 8 --> MOVj_XYZ_INC | joint mode in cartesian coordinate system by using relative position + # ptpMode = 9 --> jUMP_MOVL_XYZ | Linear-jump mode in cartesian coordinate system by using absolute position + + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + + # Possibilité de commander plusieurs I/0 donc longueur variable + length = 19 + 4 * len(address) + trame.insert(2, length) + + trame.insert(3, 88) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, ptpMode) + + xParam = bytearray(struct.pack("f", x)) + trame.insert(6, xParam[0]) + trame.insert(7, xParam[1]) + trame.insert(8, xParam[2]) + trame.insert(9, xParam[3]) + + yParam = bytearray(struct.pack("f", y)) + trame.insert(10, yParam[0]) + trame.insert(11, yParam[1]) + trame.insert(12, yParam[2]) + trame.insert(13, yParam[3]) + + zParam = bytearray(struct.pack("f", z)) + trame.insert(14, zParam[0]) + trame.insert(15, zParam[1]) + trame.insert(16, zParam[2]) + trame.insert(17, zParam[3]) + + rParam = bytearray(struct.pack("f", r)) + trame.insert(18, rParam[0]) + trame.insert(19, rParam[1]) + trame.insert(20, rParam[2]) + trame.insert(21, rParam[3]) + + for i in range(len(address)): + trame.insert(22 + 4 * i, ratio[i]) + # addresse encodée sur 2 octets curieusement + trame.insert(23 + 4 * i, 0) + trame.insert(24 + 4 * i, address[i]) + trame.insert(25 + 4 * i, level[i]) + + trame.insert(26, checksum(trame)) + + printByte(trame) # a supprimer + dobot.write(trame) + + printByte(dobot.read(getResponseLength())) + + +def setCPParams(acceleration, velocity, accelLimit, accelMode, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 15) + trame.insert(3, 90) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + accelerationParam = bytearray(struct.pack("f", acceleration)) + + trame.insert(5, accelerationParam[0]) + trame.insert(6, accelerationParam[1]) + trame.insert(7, accelerationParam[2]) + trame.insert(8, accelerationParam[3]) + + velocityParam = bytearray(struct.pack("f", velocity)) + + trame.insert(9, velocityParam[0]) + trame.insert(10, velocityParam[1]) + trame.insert(11, velocityParam[2]) + trame.insert(12, velocityParam[3]) + + accelLimitParam = bytearray(struct.pack("f", accelLimit)) + + trame.insert(13, accelLimitParam[0]) + trame.insert(14, accelLimitParam[1]) + trame.insert(15, accelLimitParam[2]) + trame.insert(16, accelLimitParam[3]) + + trame.insert(17, accelMode) + + # Values of accelMode : + + # accelMode = 0 --> mode mouvement avec rampe d'accélération + # accelMode = 1 --> mode mouvement sans rampe d'accélération (mettre periode à 1000 minimum et accélération max 2000) + + trame.insert(18, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getCPParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 90) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + acceleration = struct.unpack("f", reponse[2:6])[0] + velocity = struct.unpack("f", reponse[6:10])[0] + accelLimit = struct.unpack("f", reponse[10:14])[0] + accelMode = int(reponse[14]) + + return [acceleration, velocity, accelLimit, accelMode] + + +def setCPCmd(cpMode, x, y, z, velocity, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 19) + trame.insert(3, 91) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, cpMode) + + # Values of cpMode : + + # cpMode = 0 --> moving using relative position + # cpMode = 1 --> moving using absolute position + + xParam = bytearray(struct.pack("f", x)) + + trame.insert(6, xParam[0]) + trame.insert(7, xParam[1]) + trame.insert(8, xParam[2]) + trame.insert(9, xParam[3]) + + yParam = bytearray(struct.pack("f", y)) + + trame.insert(10, yParam[0]) + trame.insert(11, yParam[1]) + trame.insert(12, yParam[2]) + trame.insert(13, yParam[3]) + + zParam = bytearray(struct.pack("f", z)) + + trame.insert(14, zParam[0]) + trame.insert(15, zParam[1]) + trame.insert(16, zParam[2]) + trame.insert(17, zParam[3]) + + velocityParam = bytearray(struct.pack("f", velocity)) + + trame.insert(18, velocityParam[0]) + trame.insert(19, velocityParam[1]) + trame.insert(20, velocityParam[2]) + trame.insert(21, velocityParam[3]) + + trame.insert(22, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setARCParams(xyzVelocity, rVelocity, xyzAccel, rAccel, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 18) + trame.insert(3, 100) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + xyzVelocityParam = bytearray(struct.pack("f", xyzVelocity)) + + trame.insert(5, xyzVelocityParam[0]) + trame.insert(6, xyzVelocityParam[1]) + trame.insert(7, xyzVelocityParam[2]) + trame.insert(8, xyzVelocityParam[3]) + + rVelocityParam = bytearray(struct.pack("f", rVelocity)) + + trame.insert(9, rVelocityParam[0]) + trame.insert(10, rVelocityParam[1]) + trame.insert(11, rVelocityParam[2]) + trame.insert(12, rVelocityParam[3]) + + xyzAccelParam = bytearray(struct.pack("f", xyzAccel)) + + trame.insert(13, xyzAccelParam[0]) + trame.insert(14, xyzAccelParam[1]) + trame.insert(15, xyzAccelParam[2]) + trame.insert(16, xyzAccelParam[3]) + + rAccelParam = bytearray(struct.pack("f", rAccel)) + + trame.insert(17, rAccelParam[0]) + trame.insert(18, rAccelParam[1]) + trame.insert(19, rAccelParam[2]) + trame.insert(20, rAccelParam[3]) + + trame.insert(21, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getARCParams(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 100) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + xyzVelocity = float(struct.unpack("f", reponse[2:6])[0]) + rVelocity = float(struct.unpack("f", reponse[6:10])[0]) + xyzAccel = float(struct.unpack("f", reponse[10:14])[0]) + rAccel = float(struct.unpack("f", reponse[14:18])[0]) + + return([xyzVelocity, rVelocity, xyzAccel, rAccel]) + + +def setARCCmd(xStart, yStart, zStart, rStart, xEnd, yEnd, zEnd, rEnd, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 34) + trame.insert(3, 101) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + xStartParam = bytearray(struct.pack("f", xStart)) + + trame.insert(5, xStartParam[0]) + trame.insert(6, xStartParam[1]) + trame.insert(7, xStartParam[2]) + trame.insert(8, xStartParam[3]) + + yStartParam = bytearray(struct.pack("f", yStart)) + + trame.insert(9, yStartParam[0]) + trame.insert(10, yStartParam[1]) + trame.insert(11, yStartParam[2]) + trame.insert(12, yStartParam[3]) + + zStartParam = bytearray(struct.pack("f", zStart)) + + trame.insert(13, zStartParam[0]) + trame.insert(14, zStartParam[1]) + trame.insert(15, zStartParam[2]) + trame.insert(16, zStartParam[3]) + + rStartParam = bytearray(struct.pack("f", rStart)) + + trame.insert(17, rStartParam[0]) + trame.insert(18, rStartParam[1]) + trame.insert(19, rStartParam[2]) + trame.insert(20, rStartParam[3]) + + xEndParam = bytearray(struct.pack("f", xEnd)) + + trame.insert(21, xEndParam[0]) + trame.insert(22, xEndParam[1]) + trame.insert(23, xEndParam[2]) + trame.insert(24, xEndParam[3]) + + yEndParam = bytearray(struct.pack("f", yEnd)) + + trame.insert(25, yEndParam[0]) + trame.insert(26, yEndParam[1]) + trame.insert(27, yEndParam[2]) + trame.insert(28, yEndParam[3]) + + zEndParam = bytearray(struct.pack("f", zEnd)) + + trame.insert(29, zEndParam[0]) + trame.insert(30, zEndParam[1]) + trame.insert(31, zEndParam[2]) + trame.insert(32, zEndParam[3]) + + rEndParam = bytearray(struct.pack("f", rEnd)) + + trame.insert(33, rEndParam[0]) + trame.insert(34, rEndParam[1]) + trame.insert(35, rEndParam[2]) + trame.insert(36, rEndParam[3]) + + trame.insert(37, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setWaitCmd(milliseconds, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x06) + trame.insert(3, 0x6E) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + millisecondsParam = bytearray(struct.pack("l", milliseconds)) + + trame.insert(5, millisecondsParam[0]) + trame.insert(6, millisecondsParam[1]) + trame.insert(7, millisecondsParam[2]) + trame.insert(8, millisecondsParam[3]) + + trame.insert(9, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setTRIGCmd(adress, mode, condition, valeur, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x07) + trame.insert(3, 0x78) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + addressParam = bytearray(struct.pack("i", adress)) + # EIO address 1 ~ 20 (Voir Dobot Magician Interface Description) + + trame.insert(5, addressParam[0]) + + trame.insert(6, mode) + # Triggering mode : + # mode = 0 --> Level (Tout ou rien) + # mode = 1 --> ADC + + trame.insert(7, condition) + # Triggering condition : + # condition = 0 --> < (inférieur) + # condition = 1 --> <= (inférieur ou égal) + # condition = 2 --> >= (supérieur ou égal) + # condition = 3 --> > (supérieur) + + valeurParam = bytearray(struct.pack("h", valeur)) + + trame.insert(8, valeurParam[0]) + trame.insert(9, valeurParam[1]) + + trame.insert(10, checksum(trame)) + + dobot.write(trame) + + return dobot.read(getResponseLength()) + + +def setIOMultiplexing(address, function, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 0x82) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, address) # EIO addressing(Value range 1~20) + trame.insert(6, function) + # EIO function : + # IOFunctionDummy; //Invalid = 0 + # IOFunctionDO; // I/O output = 1 + # IOFunctionPWM; // PWM output = 2 + # IOFunctionDI; //I/O input = 3 + # IOFunctionADC; //A/D input = 4 + # IOFunctionDIPU; //Pull-up input = 5 + # IOFunctionDIPD //Pull-down input = 6 + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getIOMultiplexing(address): + # La documentation n'est pas correcte, il faut rentrer une adresse en paramètre) + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 0x82) + trame.insert(4, 0x00) + trame.insert(5, address) + trame.insert(6, 0x00) # octet nécessaire à la place du mode du pin + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + response = dobot.read(getResponseLength()) + + function = int(response[3]) + return function + + +def setIODO(address, level, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 0x83) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, address) + trame.insert(6, level) # level = 0 ou 1 + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getIODO(address): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 0x83) + trame.insert(4, 0x00) + + trame.insert(5, address) + trame.insert(6, 0x00) + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + response = dobot.read(getResponseLength()) + + level = int(response[3]) + return level + + +def setIOPWM(address, frequency, dutyRatio, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x0B) + trame.insert(3, 0x84) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, address) + + frequencyParam = bytearray(struct.pack("f", frequency)) + # 10 Hz ~ 1 MHz /!\ float + trame.insert(6, frequencyParam[0]) + trame.insert(7, frequencyParam[1]) + trame.insert(8, frequencyParam[2]) + trame.insert(9, frequencyParam[3]) + + # Pin EIO6 inversé pour une raison inconnue /!\ + if address == 6: + dutyRatio = 100 - dutyRatio + + dutyRatioParam = bytearray(struct.pack("f", dutyRatio)) + # 0 ~ 100 /!\ float + trame.insert(10, dutyRatioParam[0]) + trame.insert(11, dutyRatioParam[1]) + trame.insert(12, dutyRatioParam[2]) + trame.insert(13, dutyRatioParam[3]) + + trame.insert(14, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getIOPWM(address): + # Documentation incorrecte + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x0B) + trame.insert(3, 0x84) + trame.insert(4, 0x00) + trame.insert(5, address) + + for i in range(6, 14): + trame.insert(i, 0x00) + + trame.insert(14, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + frequency = round(struct.unpack("f", reponse[3:7])[0], 2) + dutyCycle = round(struct.unpack("f", reponse[7:11])[0], 2) + + # Pin EIO6 inversé pour une raison inconnue /!\ + if address == 6: + dutyCycle = 100 - dutyCycle + + return [frequency, dutyCycle] + + +def getIODI(address): + # Documentation incorrecte + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 0x85) + trame.insert(4, 0x00) + trame.insert(5, address) + trame.insert(6, 0x00) + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + response = dobot.read(getResponseLength()) + + level = response[3] + return level + + +def getIOADC(address): + # Documentation incorrecte + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x05) + trame.insert(3, 0x86) + trame.insert(4, 0x00) + trame.insert(5, address) + trame.insert(6, 0x00) + trame.insert(7, 0x00) + + trame.insert(8, checksum(trame)) + + dobot.write(trame) + response = dobot.read(getResponseLength()) + + valeur = struct.unpack("h", response[3:5])[0] + return valeur + + +def setEMotor(stepper, onOff, speed, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x08) + trame.insert(3, 0x87) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, stepper) + # Numéro du port où est branché le moteur : + # stepper = 0 --> stepper 1 + # stepper = 1 --> stepper 2 + + trame.insert(6, onOff) + # onOff = 1 --> Motor On + # onOff = 0 --> Motor Off + + speedParam = bytearray(struct.pack("l", speed)) + # Vitesse en pulse/s + # 10 000 pulse/s = 71 mm/s + # Il est déconseillé de dépasser les 20 000 pulse/s + + trame.insert(7, speedParam[0]) + trame.insert(8, speedParam[1]) + trame.insert(9, speedParam[2]) + trame.insert(10, speedParam[3]) + + trame.insert(11, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setColorSensor(onOff, port, version, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x05) + trame.insert(3, 0x89) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, onOff) + # onOff = 1 --> Sensor On + # onOff = 0 --> Sensor Off + + trame.insert(6, port) + # Numéro du port où est branché le moteur : + # port = 0 --> GP1 + # port = 1 --> GP2 + # port = 2 --> GP4 + # port = 3 --> GP5 + + trame.insert(7, version) + # version du Color Sensor + # version = 0 --> Verion V1.0 + # version = 1 --> version v2.0 + + trame.insert(8, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getColorSensor(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0x89) + trame.insert(4, 0x00) + + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + r = reponse[2] + g = reponse[3] + b = reponse[4] + + return [r, g, b] + + +def setIRSwitch(onOff, port, version, isQueued = 0): + # Documentation incorrecte + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x05) + trame.insert(3, 0x8A) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, onOff) + #Off = 0 + #On = 1 + + trame.insert(6, port) + # Numéro du port où est branché le capteur : + # port = 0 --> GP1 + # port = 1 --> GP2 + # port = 2 --> GP4 + # port = 3 --> GP5 + + trame.insert(7, version) + # version du IR Sensor + # version = 0 --> version V1.0 + # version = 1 --> version v2.0 + + trame.insert(8, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getIRSwitch(port): + # Documentation incorrecte + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x04) + trame.insert(3, 0x8A) + trame.insert(4, 0x00) + trame.insert(5, port) + trame.insert(6, 0x00) + + trame.insert(7, checksum(trame)) + + dobot.write(trame) + response = dobot.read(getResponseLength()) + + output = int(response[2]) + return output + + +def setAngleSensorStaticError(rearArmAngleError, frontArmAngleError, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 10) + trame.insert(3, 140) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + rearArmAngleErrorParam = bytearray(struct.pack("f", rearArmAngleError)) + + trame.insert(5, rearArmAngleErrorParam[0]) + trame.insert(6, rearArmAngleErrorParam[1]) + trame.insert(7, rearArmAngleErrorParam[2]) + trame.insert(8, rearArmAngleErrorParam[3]) + + frontArmAngleErrorParam = bytearray(struct.pack("f", frontArmAngleError)) + + trame.insert(9, frontArmAngleErrorParam[0]) + trame.insert(10, frontArmAngleErrorParam[1]) + trame.insert(11, frontArmAngleErrorParam[2]) + trame.insert(12, frontArmAngleErrorParam[3]) + + trame.insert(13, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getAngleSensorStaticError(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 140) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + rearArmAngleError = float(struct.unpack("f", reponse[2:6])[0]) + frontArmAngleError = float(struct.unpack("f", reponse[6:10])[0]) + + return [rearArmAngleError, frontArmAngleError] + + +def setWIFIConfigMode(isEnabled, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 3) + trame.insert(3, 150) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, isEnabled) + + trame.insert(6, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getWIFIConfigMode(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 150) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + isEnabled = int(reponse[2]) + return isEnabled + + +def setWIFISSID(ssid, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 2+len(ssid)) + trame.insert(3, 151) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + ssidByte = str.encode(ssid) + + for i in range(0, len(ssid)): + trame.insert(5+i, ssidByte[i]) + + trame.insert(5+len(ssid), checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getWIFISSID(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 151) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + longueur = getResponseLength() + reponse = dobot.read(longueur) + + ssid = reponse[2:longueur-1].decode() + return ssid + + +def setWIFIPassword(password, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 2+len(password)) + trame.insert(3, 152) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + passwordByte = str.encode(password) + + for i in range(0, len(password)): + trame.insert(5+i, passwordByte[i]) + + trame.insert(5+len(password), checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getWIFIPassword(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 152) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + longueur = getResponseLength() + reponse = dobot.read(longueur) + + password = reponse[2:longueur-1].decode() + return password + + +def setWIFIIPAdress(dhcp, ipAdressByte1, ipAdressByte2, ipAdressByte3, ipAdressByte4, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 7) + trame.insert(3, 153) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, dhcp) + trame.insert(6, ipAdressByte1) + trame.insert(7, ipAdressByte2) + trame.insert(8, ipAdressByte3) + trame.insert(9, ipAdressByte4) + + trame.insert(10, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getWIFIIPAdress(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 153) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + dhcp = reponse[2] + ipAdressByte1 = reponse[3] + ipAdressByte2 = reponse[4] + ipAdressByte3 = reponse[5] + ipAdressByte4 = reponse[6] + + return [dhcp, ipAdressByte1, ipAdressByte2, ipAdressByte3, ipAdressByte4] + + +def setWIFINetmask(netmaskByte1, netmaskByte2, netmaskByte3, netmaskByte4, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 6) + trame.insert(3, 154) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, netmaskByte1) + trame.insert(6, netmaskByte2) + trame.insert(7, netmaskByte3) + trame.insert(8, netmaskByte4) + + trame.insert(9, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getWIFINetmask(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 154) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + netmask = [reponse[2], reponse[3], reponse[4], reponse[5]] + return netmask + + +def setWIFIGateway(gatewayByte1, gatewayByte2, gatewayByte3, gatewayByte4, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 6) + trame.insert(3, 155) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, gatewayByte1) + trame.insert(6, gatewayByte2) + trame.insert(7, gatewayByte3) + trame.insert(8, gatewayByte4) + + trame.insert(9, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getWIFIGateway(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 155) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + gateway = [reponse[2], reponse[3], reponse[4], reponse[5]] + return gateway + + +def setWIFIDNS(dnsByte1, dnsByte2, dnsByte3, dnsByte4, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 6) + trame.insert(3, 156) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, dnsByte1) + trame.insert(6, dnsByte2) + trame.insert(7, dnsByte3) + trame.insert(8, dnsByte4) + + trame.insert(9, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getWIFIDNS(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 156) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + dns = [reponse[2], reponse[3], reponse[4], reponse[5]] + return dns + + +def getWIFIConnectStatus(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 157) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + longueur = getResponseLength() + reponse = dobot.read(longueur) + + printByte(reponse) + + isEnabled = int(reponse[2]) + return isEnabled + + +def setLostStepParams(losingStep, isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 170) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + losingStepParam = bytearray(struct.pack("f", losingStep)) + + trame.insert(5, losingStepParam[0]) + trame.insert(6, losingStepParam[1]) + trame.insert(7, losingStepParam[2]) + trame.insert(8, losingStepParam[3]) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setLostStepCmd(isQueued = 0): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 171) + + ctrlByte = 1 + 2 * isQueued + trame.insert(4, ctrlByte) + + trame.insert(5, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setQueuedCmdStartExec(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 240) + trame.insert(4, 0x01) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setQueuedCmdStopExec(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 241) + trame.insert(4, 0x01) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setQueuedCmdForceStopExec(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 242) + trame.insert(4, 0x01) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setQueuedCmdStartDownload(totalLoop, linePerLoop): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 243) + trame.insert(4, 0x01) + + totalLoopParam = bytearray(struct.pack("f", totalLoop)) + + trame.insert(5, totalLoopParam[0]) + trame.insert(6, totalLoopParam[1]) + trame.insert(7, totalLoopParam[2]) + trame.insert(8, totalLoopParam[3]) + + linePerLoopParam = bytearray(struct.pack("f", linePerLoop)) + + trame.insert(9, linePerLoopParam[0]) + trame.insert(10, linePerLoopParam[1]) + trame.insert(11, linePerLoopParam[2]) + trame.insert(12, linePerLoopParam[3]) + + trame.insert(13, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setQueuedCmdStopDownload(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 244) + trame.insert(4, 0x01) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def setQueuedCmdClear(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 0xF5) + trame.insert(4, 0x01) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + dobot.read(getResponseLength()) + + +def getQueuedCmdCurrentIndex(): + trame = bytearray() + + trame.insert(0, 0xAA) + trame.insert(1, 0xAA) + trame.insert(2, 0x02) + trame.insert(3, 246) + trame.insert(4, 0x00) + trame.insert(5, checksum(trame)) + + dobot.write(trame) + reponse = dobot.read(getResponseLength()) + + index = int(struct.unpack("i", reponse[2:6])[0]) + increment = float(struct.unpack("i", reponse[6:10])[0]) + + return [index, increment] + + +# permet de fermer le port +def close(): + dobot.close() diff --git a/Project/main.py b/Project/main.py new file mode 100644 index 0000000..bedeb69 --- /dev/null +++ b/Project/main.py @@ -0,0 +1,267 @@ +import cv2 +import tkinter as tk +import mediapipe as mp +import numpy as np +import os +import math +from rembg import remove +from PIL import Image +#import dobot + +import vector_draw + +# Load images with transparency +mario_hat_image_path = "Filters/Mario hat.png" +sunglasses_image_path = "Filters/Glasses.png" +moustache_image_path = "Filters/MoustacheMario.png" + +# Load images +mario_hat = cv2.imread(mario_hat_image_path, cv2.IMREAD_UNCHANGED) +sunglasses = cv2.imread(sunglasses_image_path, cv2.IMREAD_UNCHANGED) +moustache = cv2.imread(moustache_image_path, cv2.IMREAD_UNCHANGED) + +# Check if images were loaded correctly +if mario_hat is None: + print("Error: Mario hat image not found.") + exit() +if sunglasses is None: + print("Error: Sunglasses image not found.") + exit() +if moustache is None: + print("Error: Moustache image not found.") + exit() + +# Initialize MediaPipe FaceMesh +mp_face_mesh = mp.solutions.face_mesh +face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5) + +# Variables for toggling filters +mario_hat_active = False +sunglasses_active = False +moustache_active = False +show_angles = False + +# Open webcam for capturing live feed +cap = cv2.VideoCapture(0) +if not cap.isOpened(): + print("Error: The webcam cannot be opened") + exit() + +# Variable to hold the contour frame +contour_frame = None +resized_edges = None + +def calculate_angles(landmarks): + left_eye = np.array(landmarks[33]) + right_eye = np.array(landmarks[263]) + nose_tip = np.array(landmarks[1]) + chin = np.array(landmarks[152]) + yaw = math.degrees(math.atan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0])) + pitch = math.degrees(math.atan2(chin[1] - nose_tip[1], chin[0] - nose_tip[0])) + return yaw, pitch + +def apply_mario_hat(frame, landmarks): + global mario_hat + if mario_hat_active and mario_hat is not None: + forehead = landmarks[10] + chin = landmarks[152] + left_side = landmarks[234] + right_side = landmarks[454] + face_width = int(np.linalg.norm(np.array(left_side) - np.array(right_side))) + hat_width = int(face_width * 4.0) + hat_height = int(hat_width * mario_hat.shape[0] / mario_hat.shape[1]) + mario_hat_resized = cv2.resize(mario_hat, (hat_width, hat_height)) + x = int(forehead[0] - hat_width / 2) + y = int(forehead[1] - hat_height * 0.7) + alpha_channel = mario_hat_resized[:, :, 3] / 255.0 + hat_rgb = mario_hat_resized[:, :, :3] + for i in range(hat_height): + for j in range(hat_width): + if 0 <= y + i < frame.shape[0] and 0 <= x + j < frame.shape[1]: + alpha = alpha_channel[i, j] + if alpha > 0: + for c in range(3): + frame[y + i, x + j, c] = (1 - alpha) * frame[y + i, x + j, c] + alpha * hat_rgb[i, j, c] + return frame + +def apply_sunglasses(frame, landmarks): + global sunglasses + if sunglasses_active and sunglasses is not None: + left_eye = landmarks[33] + right_eye = landmarks[263] + eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye)) + scaling_factor = 1.75 + sunglasses_width = int(eye_dist * scaling_factor) + sunglasses_height = int(sunglasses_width * sunglasses.shape[0] / sunglasses.shape[1]) + sunglasses_resized = cv2.resize(sunglasses, (sunglasses_width, sunglasses_height)) + center_x = int((left_eye[0] + right_eye[0]) / 2) + center_y = int((left_eye[1] + right_eye[1]) / 2) + x = int(center_x - sunglasses_resized.shape[1] / 2) + y = int(center_y - sunglasses_resized.shape[0] / 2) + alpha_channel = sunglasses_resized[:, :, 3] / 255.0 + sunglasses_rgb = sunglasses_resized[:, :, :3] + for i in range(sunglasses_resized.shape[0]): + for j in range(sunglasses_resized.shape[1]): + if alpha_channel[i, j] > 0: + for c in range(3): + frame[y + i, x + j, c] = (1 - alpha_channel[i, j]) * frame[y + i, x + j, c] + alpha_channel[i, j] * sunglasses_rgb[i, j, c] + return frame + +def apply_moustache(frame, landmarks): + global moustache + if moustache_active and moustache is not None: + nose_base = landmarks[1] + mouth_left = landmarks[61] + mouth_right = landmarks[291] + mouth_width = int(np.linalg.norm(np.array(mouth_left) - np.array(mouth_right))) + moustache_width = int(mouth_width * 1.5) + moustache_height = int(moustache_width * moustache.shape[0] / moustache.shape[1]) + moustache_resized = cv2.resize(moustache, (moustache_width, moustache_height)) + x = int(nose_base[0] - moustache_width / 2) + y = int(nose_base[1]) + alpha_channel = moustache_resized[:, :, 3] / 255.0 + moustache_rgb = moustache_resized[:, :, :3] + for i in range(moustache_height): + for j in range(moustache_width): + if 0 <= y + i < frame.shape[0] and 0 <= x + j < frame.shape[1]: + alpha = alpha_channel[i, j] + if alpha > 0: + for c in range(3): + frame[y + i, x + j, c] = (1 - alpha) * frame[y + i, x + j, c] + alpha * moustache_rgb[i, j, c] + return frame + +def update_frame(): + global mario_hat_active, sunglasses_active, show_angles, contour_frame, moustache_active + ret, frame = cap.read() + if ret: + rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + results = face_mesh.process(rgb_frame) + if results.multi_face_landmarks: + for face_landmarks in results.multi_face_landmarks: + landmarks = [(lm.x * frame.shape[1], lm.y * frame.shape[0]) for lm in face_landmarks.landmark] + yaw, pitch = calculate_angles(landmarks) + if mario_hat_active: + frame = apply_mario_hat(frame, landmarks) + if sunglasses_active: + frame = apply_sunglasses(frame, landmarks) + if moustache_active: + frame = apply_moustache(frame, landmarks) + if show_angles: + cv2.putText(frame, f"Yaw: {yaw:.2f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) + cv2.putText(frame, f"Pitch: {pitch:.2f}", (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) + cv2.imshow("Webcam Feed", frame) + contour_frame = frame + root.after(100, update_frame) + +def toggle_mario_hat(): + global mario_hat_active + mario_hat_active = not mario_hat_active + status = "activated" if mario_hat_active else "deactivated" + print(f"Mario hat filter {status}") + +def toggle_sunglasses(): + global sunglasses_active + sunglasses_active = not sunglasses_active + status = "activated" if sunglasses_active else "deactivated" + print(f"Sunglasses filter {status}") + +def toggle_moustache(): + global moustache_active + moustache_active = not moustache_active + status = "activated" if moustache_active else "deactivated" + print(f"Moustache filter {status}") + +def toggle_angles(): + global show_angles + show_angles = not show_angles + status = "shown" if show_angles else "hidden" + print(f"Angles display {status}") + +def show_contour_frame(): + if contour_frame is not None: + # Display the result + cv2.imshow('Edges', resized_edges) + + +def save_image(): + global contour_frame, resized_edges + if contour_frame is not None: + save_path = "Tmp/captured_face.png" + cv2.imwrite(save_path, contour_frame) + print(f"Image saved to {save_path}") + + # Store path of the image in the variable input_path + input_path = 'Tmp/captured_face.png' + + # Store path of the output image in the variable output_path + output_path = 'Tmp/captured_face_nobg.png' + + # Processing the image + input = Image.open(input_path) + + # Removing the background from the given Image + output = remove(input) + + #Saving the image in the given path + output.save(output_path) + image = cv2.imread(output_path, cv2.IMREAD_GRAYSCALE) + mask = (image > 1) & (image < 254) + blurred_image = cv2.GaussianBlur(image, (11, 11), 0) + median_val = np.median(blurred_image[mask]) + lower_threshold = int(max(0, 0.5 * median_val)) + upper_threshold = int(min(255, 1.2 * median_val)) + print(f"Automatic lower threshold: {lower_threshold}") + print(f"Automatic upper threshold: {upper_threshold}") + + # Apply Canny edge detection using the calculated thresholds + edges = cv2.Canny(blurred_image, lower_threshold, upper_threshold) + + # Resize the output image to a smaller size (e.g., 50% of the original size) + output_height, output_width = edges.shape[:2] + resized_edges = cv2.resize(edges, (output_width // 2, output_height // 2), interpolation=cv2.INTER_AREA) + + # Save the resized result to a file + cv2.imwrite('Tmp/final_output_image.png', resized_edges) + +def start_dobot(): + vector_draw.vector_draw() + + +# Tkinter GUI setup +root = tk.Tk() +root.title("Control Tab") +root.geometry("300x370") +root.configure(bg="#004346") + +# Buttons on the control window with updated font and colors +mario_hat_button = tk.Button(root, text="Add Mario Hat", font=("Arial", 12, "bold"), command=toggle_mario_hat, bg="#4C8577", fg="white", padx=10, pady=5, height=1, width=20) +mario_hat_button.pack(pady=10) + +sunglasses_button = tk.Button(root, text="Add Glasses", font=("Arial", 12, "bold"), command=toggle_sunglasses, bg="#4C8577", fg="white", padx=10, pady=5, height=1, width=20) +sunglasses_button.pack(pady=10) + +moustache_button = tk.Button(root, text="Add Mario Moustache", font=("Arial", 12, "bold"), command=toggle_moustache, bg="#4C8577", fg="white", padx=10, pady=5,height=1, width=20) +moustache_button.pack(pady=10) + +save_image_button = tk.Button(root, text="Save/Retake Image", font=("Arial", 12, "bold"), command=save_image, bg="#49A078", fg="white", padx=10, pady=5,height=1, width=20) +save_image_button.pack(pady=10) + +contour_frame_button = tk.Button(root, text="Show Contour Image", font=("Arial", 12, "bold"), command=show_contour_frame, bg="#216869", fg="white", padx=10, pady=5,height=1, width=20) +contour_frame_button.pack(pady=10) + +#contour_frame_button = tk.Button(root, text="Start Dobot Drawing", font=("Arial", 12, "bold"), command=start_dobot, bg="#49A078", fg="white", padx=10, pady=5,height=1, width=20) +#contour_frame_button.pack(pady=10) + +# Graceful exit +def on_closing(): + cap.release() + cv2.destroyAllWindows() + root.destroy() + +root.protocol("WM_DELETE_WINDOW", on_closing) + +show_contour_frame() + +# Start Tkinter event loop and OpenCV frame updates +update_frame() +root.mainloop() \ No newline at end of file diff --git a/Project/vector_draw.py b/Project/vector_draw.py new file mode 100644 index 0000000..ae05824 --- /dev/null +++ b/Project/vector_draw.py @@ -0,0 +1,80 @@ +import cv2 +import numpy as np +import dobot +import time + +def vector_draw(): + # Drawing parameters + DRAW_SPEED = 1 # Drawing speed for + DRAW_DEPTH = -30.5 # Initial height (null) + INIT_POSITION = [-100, 150] + + # -------------------------------------------------------------------------- + # IMAGE TREATMENT + # -------------------------------------------------------------------------- + # Load the image in grayscale + image = cv2.imread("Tmp/captured_face.png", cv2.IMREAD_GRAYSCALE) + + # Create a mask to exclude background pixels (assuming background is near white or black) + # For example, exclude pixels that are close to white (255) and black (0) + mask = (image > 1) & (image < 254) # Keep only pixels that are not close to white or black + + # Apply Gaussian Blur to reduce noise + blurred_image = cv2.GaussianBlur(image, (11, 11), 0) + + # Calculate the median of only the foreground pixels + median_val = np.median(blurred_image[mask]) + + # Automatically calculate thresholds based on the median pixel intensity + lower_threshold = int(max(0, 0.5 * median_val)) + upper_threshold = int(min(255, 1.2 * median_val)) + print(f"Automatic lower threshold: {lower_threshold}") + print(f"Automatic upper threshold: {upper_threshold}") + + # Apply Canny edge detection using the calculated thresholds + edges = cv2.Canny(blurred_image, lower_threshold, upper_threshold) + + # Find Contours + contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + + # Initialize an array to store all points + all_points = [] + + # Define Dobot workspace dimensions (e.g., in mm) + robot_workspace = (200, 200*2/3) # Replace with your Dobot's range in mm + + # Scale function to map image coordinates to Dobot's workspace + def scale_coordinates(point, img_dim, robot_dim): + img_x, img_y = point + img_width, img_height = img_dim + robot_x_range, robot_y_range = robot_dim + # Map x and y with scaling + robot_x = (img_x / img_width) * robot_x_range + robot_y = (img_y / img_height) * robot_y_range + return robot_x, robot_y + + # Collect points for Dobot + for cnt in contours: + # Scale and store points + for point in cnt: + x, y = point[0] + x, y = scale_coordinates((x, y), (image.shape[1], image.shape[0]), robot_workspace) + all_points.append((x, y)) + all_points.append((-1,-1)) + + robot_x_old = 0 + robot_y_old = 0 + for i, (robot_x, robot_y) in enumerate(all_points): + + if robot_x == -1 or robot_y == -1: + # Lift the pen at the end of each contour + dobot.setCPCmd(1, robot_x_old + INIT_POSITION[0], robot_y_old + INIT_POSITION[1], DRAW_DEPTH+15, DRAW_SPEED, 1) + else: + if robot_x_old == -1 or robot_y_old == -1: + dobot.setCPCmd(1, robot_x + INIT_POSITION[0], robot_y + INIT_POSITION[1], DRAW_DEPTH+15, DRAW_SPEED, 1) + dobot.setCPCmd(1, robot_x + INIT_POSITION[0], robot_y + INIT_POSITION[1], DRAW_DEPTH, DRAW_SPEED, 1) + time.sleep(0.1) + robot_x_old = robot_x + robot_y_old = robot_y + +vector_draw() \ No newline at end of file diff --git a/Saving/add_filter.py b/Saving/add_filter.py index 11c8ace..031d177 100644 --- a/Saving/add_filter.py +++ b/Saving/add_filter.py @@ -7,39 +7,45 @@ mp_face_detection = mp.solutions.face_detection mp_face_mesh = mp.solutions.face_mesh mp_drawing = mp.solutions.drawing_utils -filter_image_path = "ImagePNG\MArio.png" +filter_image_path = "ImagePNG\MoustacheMario.png" filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED) -def add_filter(image, filter_image, landmarks): - # Use of eyes as reference points - left_eye = landmarks[33] - right_eye = landmarks[263] +def add_filter(image, filter_image, landmarks, size_factor=1.4): + """ + Adds a filter to an image based on facial landmarks. + Adjusts the filter size using a `size_factor`. + """ + # Use eyes as reference points + left_eye = landmarks[33] # Left eye landmark + right_eye = landmarks[263] # Right eye landmark - # Distance between both eyes --> filter size + # Distance between eyes determines the filter size eye_dist = np.linalg.norm(np.array(left_eye) - np.array(right_eye)) - # Filter size - filter_width = int(eye_dist * 2) # Adjust the factor for desired size + # Calculate filter size using the size factor + filter_width = int(eye_dist * size_factor) # Adjust for desired size filter_height = int(filter_width * filter_image.shape[0] / filter_image.shape[1]) resized_filter = cv2.resize(filter_image, (filter_width, filter_height)) - # Filter position on the face + # Determine filter position above the eyes center_x = int((left_eye[0] + right_eye[0]) / 2) center_y = int((left_eye[1] + right_eye[1]) / 2) x = int(center_x - filter_width / 2) y = int(center_y - filter_height / 2) - # Extract the alpha channel (transparency) from the filter image - alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize alpha to range [0, 1] - filter_rgb = resized_filter[:, :, :3] # Extract the RGB channels + # Extract the alpha channel for blending + alpha_channel = resized_filter[:, :, 3] / 255.0 # Normalize alpha to [0, 1] + filter_rgb = resized_filter[:, :, :3] - # Overlay the filter onto the image, using the alpha channel as a mask + # Overlay the filter onto the image for i in range(resized_filter.shape[0]): for j in range(resized_filter.shape[1]): - if alpha_channel[i, j] > 0: # Check if the pixel is not fully transparent - # Blend the pixels: (1 - alpha) * original + alpha * filter - for c in range(3): - image[y + i, x + j, c] = (1 - alpha_channel[i, j]) * image[y + i, x + j, c] + alpha_channel[i, j] * filter_rgb[i, j, c] + if 0 <= y + i < image.shape[0] and 0 <= x + j < image.shape[1]: # Bounds check + alpha = alpha_channel[i, j] + if alpha > 0: # Only apply non-transparent pixels + image[y + i, x + j] = ( + (1 - alpha) * image[y + i, x + j] + alpha * filter_rgb[i, j] + ) return image diff --git a/Saving/add_filter_hat.py b/Saving/add_filter_hat.py index 7d2cb49..f12b060 100644 --- a/Saving/add_filter_hat.py +++ b/Saving/add_filter_hat.py @@ -11,7 +11,7 @@ mp_face_mesh = mp.solutions.face_mesh filter_image_path = "ImagePNG/MArio.png" filter_image = cv2.imread(filter_image_path, cv2.IMREAD_UNCHANGED) -def add_filter(image, filter_image, bbox, scale_factor=1.2): +def add_filter_hat(image, filter_image, bbox, scale_factor=1.2): """ Add a filter image to a face image at a specified bounding box position, scaling it dynamically based on the face size. @@ -68,7 +68,7 @@ with mp_face_detection.FaceDetection(model_selection=1, min_detection_confidence dynamic_scale_factor = 2.75 + face_height_ratio # Base size + adjustment # Add filter to the image with dynamic scaling - input_image = add_filter(input_image, filter_image, (x_min, y_min, box_width, box_height), scale_factor=dynamic_scale_factor) + input_image = add_filter_hat(input_image, filter_image, (x_min, y_min, box_width, box_height), scale_factor=dynamic_scale_factor) # Define output folder and save path output_folder = "OutputImage" diff --git a/remove_bg.py b/remove_bg.py new file mode 100644 index 0000000..e69de29