This commit is contained in:
Rémi BUSSIERE 2024-01-10 10:30:59 +01:00
parent c79adab0ec
commit 2f99a32263
14 changed files with 3134 additions and 137 deletions

48
DouglasPeucker.py Normal file
View File

@ -0,0 +1,48 @@
import numpy as np
import matplotlib.pyplot as plt
def douglas_peucker(points, epsilon):
if len(points) <= 2:
return points
# Find the point with the maximum distance
d_max = 0
index = 0
end = len(points) - 1
for i in range(1, end):
d = perpendicular_distance(points[i], points[0], points[end])
if d > d_max:
index = i
d_max = d
# If the maximum distance is greater than epsilon, recursively simplify
result = []
if d_max > epsilon:
result += douglas_peucker(points[:index + 1], epsilon)
result += douglas_peucker(points[index:], epsilon)
else:
result = [points[0], points[end]]
return result
def perpendicular_distance(point, line_start, line_end):
# Calculate the perpendicular distance of a point to a line
return np.abs(np.cross(line_end - line_start, line_start - point)) / np.linalg.norm(line_end - line_start)
# Example usage
# Create a set of points representing a line
original_points = np.array([[0, 0], [1, 1], [2, 3], [3, 1], [4, 0], [5, 1], [6, -1], [7, -3]])
epsilon = 0.1
simplified_points = np.array(douglas_peucker(original_points, epsilon))
# Plot the original and simplified lines
plt.plot(original_points[:, 0], original_points[:, 1], label='Original Line')
simplified_points_array = np.array(simplified_points)
plt.plot(simplified_points_array[:, 0], simplified_points_array[:, 1], label='Simplified Line', linestyle='dashed')
plt.scatter(simplified_points[:, 0], simplified_points[:, 1], c='red', label='Simplified Points')
plt.legend()
plt.title('Douglas-Peucker Line Simplification')
plt.show()

View File

@ -1,33 +1,49 @@
import numpy as np
import matplotlib.pyplot as plt
def coord(contours, x_offset, y_offset, x_scale, y_scale):
new_contours = []
for i in range(len(contours)):
for j in range(len(contours[i])):
x = contours[i][j][0][0]*x_scale + x_offset
y = contours[i][j][0][1]*y_scale + y_offset
new_contours.append([x, y])
return new_contours
def new_coord(contours, x_scale, y_scale, angle):
''''''
''' SCALE THE IMAGE HERE '''
scale_contours = []
for i in contours:
x = i[0] * x_scale
y = i[1] * y_scale
z = i[2]
scale_contours.append([x, y, z])
''' ROTATE THE IMAGE HERE '''
angle = angle * np.pi / 180
rotate_contours = []
for i in scale_contours:
x_old = i[0]
y_old = i[1]
x = x_old * np.cos(angle) - y_old * np.sin(angle)
y = x_old * np.sin(angle) + y_old * np.cos(angle)
z = i[2]
rotate_contours.append([x, y, z])
return rotate_contours
def scale_calculator(contours, size):
x_max = contours[0][0][0][0]
x_min = contours[0][0][0][0]
y_max = contours[0][0][0][1]
y_min = contours[0][0][0][1]
x_max = contours[0][0]
x_min = contours[0][0]
y_max = contours[0][1]
y_min = contours[0][1]
for i in range(len(contours)):
for j in range(len(contours[i])):
x = contours[i][j][0][0]
y = contours[i][j][0][1]
if x_min > x:
x_min = x
if x > x_max:
x_max = x
if y_min > y:
y_min = y
if y > y_max:
y_max = y
# print([[x_min, x_max], [y_min, y_max]]) #display max and min in x and y
for i in contours:
x = i[0]
y = i[1]
if x_min > x:
x_min = x
if x > x_max:
x_max = x
if y_min > y:
y_min = y
if y > y_max:
y_max = y
#print([[x_min, x_max], [y_min, y_max]]) #display max and min in x and y
x_scale = 1.0
y_scale = 1.0
@ -65,3 +81,70 @@ def get_size(coords):
y_max = y
return [[x_min, x_max], [y_min, y_max]] #display max and min in x and y
def symetry(coords, x_max, y_max, symetry):
new_coords = []
for i in coords:
x = i[0]
y = i[1]
z = i[2]
if symetry == 'x':
x = x_max - x
if symetry == 'y':
y = y_max - y
new_coords.append([x, y, z])
return new_coords
def set_origin(coords, origin):
[[x_min, x_max], [y_min, y_max]] = get_size(coords)
new_coords = []
for i in coords:
x = i[0] - x_min + origin[0]
y = i[1] - y_min + origin[1]
z = i[2] + origin[2]
new_coords.append([x, y, z])
return new_coords
def plot_function(coords):
x_coords = []
y_coords = []
z_coords = []
for i in coords:
x_coords.append(i[0])
y_coords.append(i[1])
z_coords.append(i[2])
plt.scatter(y_coords, x_coords, label='Points', color='blue', s=5)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel('X-axis')
plt.ylabel('Y-axis')
plt.title('Scatter Plot of Points')
plt.legend()
plt.show()
def draw_function(coords):
temp_set_x = []
temp_set_y = []
final_set_x = []
final_set_y = []
for i in coords:
temp_set_x.append(i[0])
temp_set_y.append(i[1])
if i[2] != 0:
final_set_x.append(temp_set_x)
final_set_y.append(temp_set_y)
temp_set_x = []
temp_set_y = []
#print(final_set_x)
#print(final_set_y)
for j in range(len(final_set_x)):
plt.plot(final_set_x[j], final_set_y[j])
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel('X-axis')
plt.ylabel('Y-axis')
plt.title('Scatter Plot of Points')
plt.show()

55
coords_creation.py Normal file
View File

@ -0,0 +1,55 @@
import cv2
import show_img_conditions
import remove_bg
import get_head
import get_egdes
import coordinates_change
import time
import numpy as np
def coordinates(origin, new_size, angle, sym, live):
if live == 0:
print("Webcam take picture")
time.sleep(2)
#show_img_conditions.show()
image_path = "goutput.png"
image = cv2.imread(image_path)
#cv2.imshow('test', image)
background_path = remove_bg.rmbg("didier.jpg")
print(background_path)
if live == 1:
print("Picture from files")
time.sleep(2)
background_path = "output.png"
background = cv2.imread(background_path)
#cv2.imshow('Image without background', background)
gray_image = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
contours = get_egdes.edges(gray_image)
#cv2.imshow("contours", edges)
#cv2.waitKey(0)
[x_min, y_min, x_scale, y_scale] = coordinates_change.scale_calculator(contours, new_size)
new_coords = coordinates_change.new_coord(contours, x_scale, y_scale, angle)
size = coordinates_change.get_size(new_coords)
x_max = size[0][1]
y_max = size[1][1]
new_coords = coordinates_change.symetry(new_coords, x_max, y_max, sym)
new_coords = coordinates_change.set_origin(new_coords, origin)
print("origin : x = ", origin[0], " , y = ", origin[1], " , z = ", origin[2])
print("size : ", int(size[0][1] - size[0][0]), "x", int(size[1][1] - size[1][0]), "mm")
print("nb points : ", len(new_coords))
coordinates_change.plot_function(new_coords)
#coordinates_change.draw_function(new_coords)
return new_coords

BIN
didier.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

2539
dobot.py Normal file

File diff suppressed because it is too large Load Diff

75
get_egdes.py Normal file
View File

@ -0,0 +1,75 @@
import cv2
import numpy as np
from rdp import rdp
def edges(gray_image):
threshold_1 = 0
threshold_diff = 40
threshold_2 = threshold_1 + threshold_diff
edges_image = cv2.Canny(gray_image, threshold_1, threshold_2)
contours, _ = cv2.findContours(edges_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
new_contours = []
# here we create the contours in XYZ
for i in range(len(contours)):
if (len(contours[i]) >= 10):
new_contours.append([contours[i][0][0][0], contours[i][0][0][1], 25]) # create a point at Z = 25 before
for j in range(len(contours[i])):
x = contours[i][j][0][0]
y = contours[i][j][0][1]
z = 0
new_contours.append([x, y, z])
new_contours.append([contours[i][len(contours[i]) - 1][0][0], contours[i][len(contours[i]) - 1][0][1], 25])
iteration = 0
while len(new_contours) >= 6000:
threshold_1 = 40 + iteration*20
threshold_2 = threshold_1 + threshold_diff
edges_image = cv2.Canny(gray_image, threshold_1, threshold_2)
contours, _ = cv2.findContours(edges_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
new_contours = []
for i in range(len(contours)):
if (len(contours[i]) >= 10):
new_contours.append([contours[i][0][0][0], contours[i][0][0][1], 25]) # create a point at Z = 25 before
for j in range(len(contours[i])):
x = contours[i][j][0][0]
y = contours[i][j][0][1]
z = 0
new_contours.append([x, y, z])
new_contours.append([contours[i][len(contours[i]) - 1][0][0], contours[i][len(contours[i]) - 1][0][1], 25])
name = "points : " + str(len(new_contours)) + ", th1 : " + str(threshold_1) + ", th2 : " + str(threshold_2)
#print(name, iteration)
iteration += 1
#cv2.imshow(name, edges_image)
#cv2.waitKey(0)
coords = simplified_coords(contours)
print("nb points before optimisation : ", len(new_contours))
print("optimisation rate : ", round((len(new_contours) - len(coords))/len(new_contours), 2))
return coords
def simplified_coords(contours):
old_points = []
coords = []
for i in range(len(contours)):
for j in range(len(contours[i])):
x = contours[i][j][0][0]
y = contours[i][j][0][1]
if (j+1 == len(contours[i])) or j == 0:
z = 15
else:
z = 0
old_points.append([x, y, z])
new_points = rdp(old_points, 1.0)
#print(new_points)
#print(len(new_points)/len(old_points))
for k in new_points:
coords.append(k)
old_points = []
print("nb points after optimisation : ", len(coords))
return coords

BIN
image.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 283 KiB

After

Width:  |  Height:  |  Size: 311 KiB

63
main.py
View File

@ -1,42 +1,41 @@
import cv2
import coords_creation
import robot_control
import dobot
import serial
import time
import math
import show_img_conditions
import remove_bg
import get_head
import coordinates_change
import numpy as np
#show_img_conditions.show()
y = []
x = []
image_path = get_head.get_image()
image = cv2.imread(image_path)
#cv2.imshow('Image', image)
image_path = 'image.png'
coords = []
background_path = remove_bg.rmbg(image_path)
background = cv2.imread(background_path)
#cv2.imshow('Image without background', background)
origin = [200, 0, -63] # origin of the drawing
new_size = [60, 0] # new size of the picture in mm, write 0 if you want to be proportional
angle = 90 # rotation of 180°
sym = 'y' # symetry on the x-axis
live = 1 # 0 is live, 1 is for saved image
gray_image = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray_image, 20, 80)
cv2.imshow("contours", edges)
cv2.waitKey(0)
coords = coords_creation.coordinates(origin, new_size, angle, sym, live)
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# xmin = 128, max = 267, ymin 180 -180
x_offset = 0 # offset in the x_axis
y_offset = 0 # offset in the y_axis
new_size = [150, 0] # new size of the picture in mm, write 0 if you want to be proportional
dobot.setQueuedCmdClear()
dobot.setQueuedCmdStartExec()
dobot.setHomeCmd()
dobot.setCPParams(175, 0, 10, 1, 1)
[x_min, y_min, x_scale, y_scale] = coordinates_change.scale_calculator(contours, new_size)
delay = 0.3
count = 0
for i in coords:
x = i[0]
y = i[1]
z = i[2]
count += 1
r = math.atan2(x, y)
dobot.setCPCmd(1, x, y, z, r, 1)
time.sleep(delay)
print(round(count/len(coords)*100, 4), "%")
x_offset = x_offset - x_min
y_offset = y_offset - y_min
new_coords = coordinates_change.coord(contours, x_offset, y_offset, x_scale, y_scale)
size = coordinates_change.get_size(new_coords)
print("x offset : ", int(x_offset+x_min), "mm")
print("y offset : ", int(y_offset+y_min), "mm")
print("size : ", int(size[0][1] - size[0][0]), "x", int(size[1][1] - size[1][0]), "mm")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 198 KiB

After

Width:  |  Height:  |  Size: 213 KiB

BIN
remi.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

182
robot_control.py Normal file
View File

@ -0,0 +1,182 @@
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, './dobot/librairies')
import dobot
import time
import math
import cv2
import numpy as np
from operator import itemgetter
imageName = "test.jpg"
ztrace = -70
zleve = 20
xmin,xmax,ymin,ymax = 200,305,-74,74
xAdder = 200
yAdder = -74
contoursShort = 40
def calibration():
dobot.setHomeCmd()
time.sleep(20)
def displacement(x, y, z):
dobot.setPTPjointParams(200,200,200,200,200,200,200,200,1)
r=math.atan2(y, x)
dobot.setPTPCmd(1, x, y, z, r, 1)
def _map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
# def takePicture(imageName):
# # Setup camera
# cap = cv2.VideoCapture(0)
# # Set a smaller resolution
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# while True:
# # Capture frame-by-frame
# result, frame = cap.read()
# frame = cv2.flip(frame, 1)
# cv2.imshow("Webcam", frame)
# if cv2.waitKey(1) == ord('q'):
# cv2.imwrite(imageName, frame)
# break
# # When everything done, release the capture
# cap.release()
# cv2.destroyAllWindows()
# def findROICoords(imageName):
# image = cv2.imread(imageName)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
# # Detect faces
# faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# # Draw rectangle around the faces
# scalex = 0.9
# scaley = 0.8
# number = 1
# answer = "ERROR"
# if len(faces)>0:
# for (x, y, w, h) in faces:
# x,y,w,h = int(x*scalex),int(y*scaley),int((x+w)/scalex),int((y+h)/scaley)
# test = image
# cv2.rectangle(test, (x, y), (w, h), (0, 255, 0), 2)
# cv2.imshow("Faces {}".format(number), test)
# cv2.waitKey(0)
# cv2.destroyWindow("Faces {}".format(number))
# number += 1
# print(x,y,w,h)
# choice = 0
# while (choice<=0 or choice>len(faces)):
# choice = int(input("Quel visage ?"))
# print(faces)
# answer = faces[choice-1]
# x,y,w,h = answer[0], answer[1], answer[2], answer[3]
# x,y,w,h = int(x*scalex),int(y*scaley),int((x+w)/scalex),int((y+h)/scaley)
# answer = [x,y,w,h]
# return(answer)
# def getFinalROIPicture():
# img = cv2.imread(imageName,0)
# img = cv2.medianBlur(img,5)
# ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
# th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
# cv2.THRESH_BINARY,11,2)
# th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
# cv2.THRESH_BINARY,13,3)
# titles = ['Original Image', 'Global Thresholding (v = 127)',
# 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
# images = [img, th1, th2, th3]
# for i in range(4):
# plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
# plt.title(titles[i])
# plt.xticks([]),plt.yticks([])
# plt.show()
# choice = 3
# while choice not in [1,2,3]:
# choice = int(input("Quel threshold ?"))
# return(images[choice])
# def filteredContours(edged, contoursShort):
# # Finding Contours
# contours, hierarchy = cv2.findContours(edged,
# cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# print("Number of Contours found = " + str(len(contours)))
# #Filter contours too short (points)
# newcontours = []
# for i in range(len(contours)):
# if len(contours[i]) >= contoursShort:
# newcontours.append(contours[i])
# tuple(newcontours)
# print(len(newcontours))
# cv2.drawContours(image, newcontours, -1, (0, 255, 0), 3)
# cv2.imshow('NewContours', image)
# cv2.waitKey(0)
# cv2.imwrite("Contoured.png", image)
# cv2.destroyAllWindows()
# return(newcontours)
# def scaleFinder(image,xmin,xmax,ymin,ymax):
# rangeImageX=[0,len(image[0])]
# rangeImageY=[0,len(image)]
# xScale = rangeImageX[1]/(ymax-ymin)
# yScale = rangeImageY[1]/(xmax-xmin)
# if xScale > yScale:
# scale = xScale
# else:
# scale=yScale
# if scale <1:
# scale = 1/scale
# return(scale)
# dobot.setQueuedCmdClear()
# dobot.setQueuedCmdStartExec()
# dobot.setWaitCmd(1000)
# calibration()
# roiAnswer = "Error"
# while isinstance(roiAnswer, str):
# picture = 1
# while picture not in [1,2]:
# picture = int(input("Prendre une photo ?"))
# if picture == 1:
# takePicture(imageName)
# roiAnswer = findROICoords(imageName)
# roi = getFinalROIPicture()
# roi = roi[roiAnswer[1]:roiAnswer[3], roiAnswer[0]:roiAnswer[2]]
# cv2.imwrite("roi.png", roi)
# cv2.imshow('ROI',roi)
# cv2.waitKey(0)
# image = cv2.imread('roi.png')
# scale = scaleFinder(image,xmin,xmax,ymin,ymax)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Grayscale
# edged = cv2.Canny(gray, 80, 140) #Canny
#newcontours = filteredContours(edged,contoursShort) #Find and filter contours
#for line in range(len(newcontours)): #Rescale each point
# for point in range(len(newcontours[line])):
# for coord in range(len(newcontours[line][point])):
# newcontours[line][point][coord][0] = newcontours[line][point][coord][0]/scale + xAdder
# newcontours[line][point][coord][1] = newcontours[line][point][coord][1]/scale + yAdder
'''
leve = 0
for ligne in newcontours:
for m in range(len(ligne)):
a = ligne[m]
ztrace = _map(ligne[m][0][0], 200, 305, -64.5, -66.5)
if m == 0:
displacement(ligne[m][0][0],ligne[m][0][1],zleve)
leve += 1
displacement(ligne[m][0][0],ligne[m][0][1],ztrace)
if m == len(ligne)-1:
displacement(ligne[m][0][0], ligne[m][0][1],zleve)
time.sleep(2)
print(round(float(leve)/float(len(newcontours)),3)*100,"%")
'''

106
test.py
View File

@ -1,57 +1,63 @@
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, './dobot/librairies')
import dobot
import time
import math
dobot.setQueuedCmdClear()
dobot.setQueuedCmdStartExec()
cap = cv2.VideoCapture(0)
count = 0
condition = False
while condition == False:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
height, width = blurred.shape
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(blurred, scaleFactor=1.3, minNeighbors=5, minSize=(30, 30))
def moyenneMobile(list, length):
mm = []
for i in range(0, len(list) - length, 1):
sum = 0
for k in range(0, length, 1):
sum += list[i+k]
mm.append(sum / length)
return(mm)
for (x, y, w, h) in faces:
c_x = int(x + w/2)
c_y = int(y + h/2)
center = (c_x, c_y)
top = (c_x, int(c_y-h/2*1.4))
bottom = (c_x, int(c_y+h/2*1.1))
left = (int(c_x - w / 2 * 0.8), c_y)
right = (int(c_x + w / 2 * 0.8), c_y)
y = []
x = []
c_x2 = int(( int(c_x - w / 2 * 0.8) + int(c_x + w / 2 * 0.8) )/2)
c_y2 = int(( int(c_y-h/2*1.4) + int(c_y+h/2*1.1) )/2)
'''
cv2.circle(frame, center, 10, (255, 255, 255), 5) #center
cv2.circle(frame, top, 10, (255, 255, 255), 5) # top
cv2.circle(frame, bottom, 10, (255, 255, 255), 5) # bottom
cv2.circle(frame, left, 10, (255, 255, 255), 5) # left
cv2.circle(frame, right, 10, (255, 255, 255), 5) # right
dobot.setCPParams(120,100,10,1,1)
dobot.setHomeCmd()
time.sleep(20)
delay = 2
cv2.circle(frame, (x, y), 10, (255, 255, 255), 5) # top left
cv2.circle(frame, (x+w, y), 10, (255, 255, 255), 5) # top right
cv2.circle(frame, (x, y+h), 10, (255, 255, 255), 5) # bottom left
cv2.circle(frame, (x+h, y+h), 10, (255, 255, 255), 5) # bottom right
'''
center_ellipse = (c_x2, c_y2)
axes = (int(w/2* 0.9), int(h/2*1.4)) # Major axis, Minor axis
angle = 0 # Rotation angle
color = (0, 255, 0) # Color in BGR format
thickness = 2
# Draw the ellipse on the image
#cv2.ellipse(frame, center_ellipse, axes, angle, 0, 360, color, thickness)
dobot.setHomeParams(200, 50, 0, 0, 1)
time.sleep(delay)
dobot.setCPCmd(1, 200, 100, 0, 0, 1)
time.sleep(delay)
dobot.setCPCmd(1, 100, 100, 0, 0, 1)
time.sleep(delay)
dobot.setCPCmd(1, 200, -100, 0, 0, 1)
time.sleep(delay)
dobot.setCPCmd(1, 200, 100, 0, 0, 1)
time.sleep(delay)
mask = np.zeros((height, width), dtype=np.uint8)
cv2.ellipse(mask, center_ellipse, axes, angle, 0, 360, 255, -1)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('Head Detection', result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
#dobot.setCPCmd(1, 250, -100, 0, 0, 1)
#dobot.setCPCmd(1, 250, 0, 0, 0, 1)
#dobot.setCPCmd(1, 170, 0, 0, 0, 1)
# Parameters:
# x - X coordinate for home position.
# y - Y coordinate for home position.
# z - Z coordinate for home position.
# r - Peripheral rotation at home position.
# Function:
# dobot.setCPCmd(api, x, y, z, r, isQueued)
'''
delay = 0.03
for i in range(0, 150):
valeurs = dobot.getPose()
y.append(-valeurs[1])
x.append(valeurs[0])
time.sleep(delay)
# On trace le graphique
plt.xlabel("y")
plt.ylabel("x")
plt.plot(y, x, '.r')
#plt.axis([-90, 120, 150, 250])
plt.axis([-300, 300, -300, 300])
plt.show()'''

View File

@ -1,35 +1,23 @@
import cv2
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, './dobot/librairies')
import dobot
import time
import math
# Open a connection to the webcam (default camera index is 0)
cap = cv2.VideoCapture(0)
# xmin = 128, max = 267, ymin 180 -180
dobot.setQueuedCmdClear()
dobot.setQueuedCmdStartExec()
dobot.setHomeCmd()
# Check if the camera is opened successfully
if not cap.isOpened():
print("Error: Could not open the camera.")
exit()
# Create a window to display the webcam feed
window_name = 'Webcam Feed'
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
dobot.setCPParams(175,0,10,1,1)
while True:
# Read a frame from the webcam
ret, frame = cap.read()
for i in range(3):
dobot.setCPCmd(1, 200, 0, 50, 0, 1)
dobot.setCPCmd(1, 250, 0, -50, 0, 1)
dobot.setCPCmd(1, 180, 0, -50, 0, 1)
print(dobot.getPose())
time.sleep(2)
# Check if the frame is read successfully
if not ret:
print("Error: Could not read frame.")
break
# Display the frame in the window
cv2.imshow(window_name, frame)
# Break the loop if 'q' key is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the camera capture object
cap.release()
# Destroy the OpenCV window
cv2.destroyAllWindows()

22
test_dobot.py Normal file
View File

@ -0,0 +1,22 @@
import robot_control
import dobot
import serial
import time
import math
dobot.setQueuedCmdClear()
dobot.setQueuedCmdStartExec()
dobot.setHomeCmd()
dobot.setCPParams(175, 0, 10, 1, 1)
delay = 1
count = 0
for i in range(4):
x = 10*i + 100
y = 10*i + 50
z = 10*i + 50
count += 1
r = math.atan2(x, y)
dobot.setCPCmd(1, x, y, z, r, 1)
time.sleep(delay)