GrpC_Identikit/vector_draw.py

90 lines
3.4 KiB
Python

import matplotlib.pyplot as plt
import dobot
import time
import cv2
import numpy as np
import os
def vector_draw():
dobot.setQueuedCmdClear()
dobot.setQueuedCmdStartExec()
# Drawing parameters
DRAW_SPEED = 1 # Drawing speed for
DRAW_DEPTH = -38 # Initial height (null)
INIT_POSITION = [-100, 150]
# --------------------------------------------------------------------------
# IMAGE TREATMENT
# --------------------------------------------------------------------------
# Load the image in grayscale
output_path = os.getcwd() + "\Tmp\captured_face_nobg.png"
image = cv2.imread(output_path, cv2.IMREAD_GRAYSCALE)
# Create a mask to exclude background pixels (assuming background is near white or black)
# For example, exclude pixels that are close to white (255) and black (0)
mask = (image > 1) & (image < 254) # Keep only pixels that are not close to white or black
# Apply Gaussian Blur to reduce noise
blurred_image = cv2.GaussianBlur(image, (11, 11), 0)
# Calculate the median of only the foreground pixels
median_val = np.median(blurred_image[mask])
# Automatically calculate thresholds based on the median pixel intensity
lower_threshold = int(max(0, 0.5 * median_val))
upper_threshold = int(min(255, 1.2 * median_val))
print(f"Automatic lower threshold: {lower_threshold}")
print(f"Automatic upper threshold: {upper_threshold}")
# Apply Canny edge detection using the calculated thresholds
edges = cv2.Canny(blurred_image, lower_threshold, upper_threshold)
# Find Contours
contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print(contours)
# Initialize an array to store all points
all_points = []
# Define Dobot workspace dimensions (e.g., in mm)
robot_workspace = (200, 200*2/3) # Replace with your Dobot's range in mm
# Scale function to map image coordinates to Dobot's workspace
def scale_coordinates(point, img_dim, robot_dim):
img_x, img_y = point
img_width, img_height = img_dim
robot_x_range, robot_y_range = robot_dim
# Map x and y with scaling
robot_x = (img_x / img_width) * robot_x_range
robot_y = (img_y / img_height) * robot_y_range
return robot_x, robot_y
# Collect points for Dobot
for cnt in contours:
# Scale and store points
for point in cnt:
x, y = point[0]
x, y = scale_coordinates((x, y), (image.shape[1], image.shape[0]), robot_workspace)
all_points.append((x, y))
all_points.append((-1,-1))
# Delete all duplicate points
#all_points = list(dict.fromkeys(all_points))
robot_x_old = 0
robot_y_old = 0
for i, (robot_x, robot_y) in enumerate(all_points):
if robot_x == -1 or robot_y == -1:
# Lift the pen at the end of each contour
dobot.setCPCmd(1, robot_x_old + INIT_POSITION[0], robot_y_old + INIT_POSITION[1], DRAW_DEPTH+15, DRAW_SPEED, 1)
else:
if robot_x_old == -1 or robot_y_old == -1:
dobot.setCPCmd(1, robot_x + INIT_POSITION[0], robot_y + INIT_POSITION[1], DRAW_DEPTH+15, DRAW_SPEED, 1)
dobot.setCPCmd(1, robot_x + INIT_POSITION[0], robot_y + INIT_POSITION[1], DRAW_DEPTH, DRAW_SPEED, 1)
time.sleep(0.15)
robot_x_old = robot_x
robot_y_old = robot_y