Identikit-Robot/robot_control.py

182 lines
5.9 KiB
Python

import matplotlib.pyplot as plt
import sys
sys.path.insert(0, './dobot/librairies')
import dobot
import time
import math
import cv2
import numpy as np
from operator import itemgetter
imageName = "test.jpg"
ztrace = -70
zleve = 20
xmin,xmax,ymin,ymax = 200,305,-74,74
xAdder = 200
yAdder = -74
contoursShort = 40
def calibration():
dobot.setHomeCmd()
time.sleep(20)
def displacement(x, y, z):
dobot.setPTPjointParams(200,200,200,200,200,200,200,200,1)
r=math.atan2(y, x)
dobot.setPTPCmd(1, x, y, z, r, 1)
def _map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
# def takePicture(imageName):
# # Setup camera
# cap = cv2.VideoCapture(0)
# # Set a smaller resolution
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# while True:
# # Capture frame-by-frame
# result, frame = cap.read()
# frame = cv2.flip(frame, 1)
# cv2.imshow("Webcam", frame)
# if cv2.waitKey(1) == ord('q'):
# cv2.imwrite(imageName, frame)
# break
# # When everything done, release the capture
# cap.release()
# cv2.destroyAllWindows()
# def findROICoords(imageName):
# image = cv2.imread(imageName)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
# # Detect faces
# faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# # Draw rectangle around the faces
# scalex = 0.9
# scaley = 0.8
# number = 1
# answer = "ERROR"
# if len(faces)>0:
# for (x, y, w, h) in faces:
# x,y,w,h = int(x*scalex),int(y*scaley),int((x+w)/scalex),int((y+h)/scaley)
# test = image
# cv2.rectangle(test, (x, y), (w, h), (0, 255, 0), 2)
# cv2.imshow("Faces {}".format(number), test)
# cv2.waitKey(0)
# cv2.destroyWindow("Faces {}".format(number))
# number += 1
# print(x,y,w,h)
# choice = 0
# while (choice<=0 or choice>len(faces)):
# choice = int(input("Quel visage ?"))
# print(faces)
# answer = faces[choice-1]
# x,y,w,h = answer[0], answer[1], answer[2], answer[3]
# x,y,w,h = int(x*scalex),int(y*scaley),int((x+w)/scalex),int((y+h)/scaley)
# answer = [x,y,w,h]
# return(answer)
# def getFinalROIPicture():
# img = cv2.imread(imageName,0)
# img = cv2.medianBlur(img,5)
# ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
# th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
# cv2.THRESH_BINARY,11,2)
# th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
# cv2.THRESH_BINARY,13,3)
# titles = ['Original Image', 'Global Thresholding (v = 127)',
# 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
# images = [img, th1, th2, th3]
# for i in range(4):
# plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
# plt.title(titles[i])
# plt.xticks([]),plt.yticks([])
# plt.show()
# choice = 3
# while choice not in [1,2,3]:
# choice = int(input("Quel threshold ?"))
# return(images[choice])
# def filteredContours(edged, contoursShort):
# # Finding Contours
# contours, hierarchy = cv2.findContours(edged,
# cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# print("Number of Contours found = " + str(len(contours)))
# #Filter contours too short (points)
# newcontours = []
# for i in range(len(contours)):
# if len(contours[i]) >= contoursShort:
# newcontours.append(contours[i])
# tuple(newcontours)
# print(len(newcontours))
# cv2.drawContours(image, newcontours, -1, (0, 255, 0), 3)
# cv2.imshow('NewContours', image)
# cv2.waitKey(0)
# cv2.imwrite("Contoured.png", image)
# cv2.destroyAllWindows()
# return(newcontours)
# def scaleFinder(image,xmin,xmax,ymin,ymax):
# rangeImageX=[0,len(image[0])]
# rangeImageY=[0,len(image)]
# xScale = rangeImageX[1]/(ymax-ymin)
# yScale = rangeImageY[1]/(xmax-xmin)
# if xScale > yScale:
# scale = xScale
# else:
# scale=yScale
# if scale <1:
# scale = 1/scale
# return(scale)
# dobot.setQueuedCmdClear()
# dobot.setQueuedCmdStartExec()
# dobot.setWaitCmd(1000)
# calibration()
# roiAnswer = "Error"
# while isinstance(roiAnswer, str):
# picture = 1
# while picture not in [1,2]:
# picture = int(input("Prendre une photo ?"))
# if picture == 1:
# takePicture(imageName)
# roiAnswer = findROICoords(imageName)
# roi = getFinalROIPicture()
# roi = roi[roiAnswer[1]:roiAnswer[3], roiAnswer[0]:roiAnswer[2]]
# cv2.imwrite("roi.png", roi)
# cv2.imshow('ROI',roi)
# cv2.waitKey(0)
# image = cv2.imread('roi.png')
# scale = scaleFinder(image,xmin,xmax,ymin,ymax)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Grayscale
# edged = cv2.Canny(gray, 80, 140) #Canny
#newcontours = filteredContours(edged,contoursShort) #Find and filter contours
#for line in range(len(newcontours)): #Rescale each point
# for point in range(len(newcontours[line])):
# for coord in range(len(newcontours[line][point])):
# newcontours[line][point][coord][0] = newcontours[line][point][coord][0]/scale + xAdder
# newcontours[line][point][coord][1] = newcontours[line][point][coord][1]/scale + yAdder
'''
leve = 0
for ligne in newcontours:
for m in range(len(ligne)):
a = ligne[m]
ztrace = _map(ligne[m][0][0], 200, 305, -64.5, -66.5)
if m == 0:
displacement(ligne[m][0][0],ligne[m][0][1],zleve)
leve += 1
displacement(ligne[m][0][0],ligne[m][0][1],ztrace)
if m == len(ligne)-1:
displacement(ligne[m][0][0], ligne[m][0][1],zleve)
time.sleep(2)
print(round(float(leve)/float(len(newcontours)),3)*100,"%")
'''