This commit is contained in:
Eliott LAURENT 2023-04-06 12:03:45 +02:00
parent de8ad0b82c
commit 7a318e668c
4 changed files with 236 additions and 8 deletions

View File

@ -0,0 +1,15 @@
<?xml version="1.0"?>
<opencv_storage>
<blockSize>15</blockSize>
<numDisparities>16</numDisparities>
<preFilterType>1</preFilterType>
<preFilterSize>9</preFilterSize>
<preFilterCap>5</preFilterCap>
<textureThreshold>10</textureThreshold>
<uniquenessRatio>15</uniquenessRatio>
<speckleRange>0</speckleRange>
<speckleWindowSize>6</speckleWindowSize>
<disp12MaxDiff>5</disp12MaxDiff>
<minDisparity>5</minDisparity>
<M>3.9075000000000003e+01</M>
</opencv_storage>

201
dispa2depth.py Normal file
View File

@ -0,0 +1,201 @@
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
# Check for left and right camera IDs
# These values can change depending on the system
CamL_id = 2 # Camera ID for left camera
CamR_id = 0 # Camera ID for right camera
#CamL= cv2.VideoCapture(CamL_id)
#CamR= cv2.VideoCapture(CamR_id)
retL, imgL= cv2.VideoCapture(CamL_id, cv2.CAP_V4L2).read()
retR, imgR= cv2.VideoCapture(CamR_id, cv2.CAP_V4L2).read()
'''
imgR_gray = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
imgL_gray = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
'''
# Reading the mapping values for stereo image rectification
cv_file = cv2.FileStorage("data/params_py.xml", cv2.FILE_STORAGE_READ)
Left_Stereo_Map_x = cv_file.getNode("Left_Stereo_Map_x").mat()
print(Left_Stereo_Map_x)
Left_Stereo_Map_y = cv_file.getNode("Left_Stereo_Map_y").mat()
Right_Stereo_Map_x = cv_file.getNode("Right_Stereo_Map_x").mat()
Right_Stereo_Map_y = cv_file.getNode("Right_Stereo_Map_y").mat()
cv_file.release()
# These parameters can vary according to the setup
# Keeping the target object at max_dist we store disparity values
# after every sample_delta distance.
max_dist = 230 # max distance to keep the target object (in cm)
min_dist = 50 # Minimum distance the stereo setup can measure (in cm)
sample_delta = 40 # Distance between two sampling points (in cm)
Z = max_dist
Value_pairs = []
disp_map = np.zeros((600,600,3))
# Reading the stored the StereoBM parameters
cv_file = cv2.FileStorage("../data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_READ)
numDisparities = int(cv_file.getNode("numDisparities").real())
blockSize = int(cv_file.getNode("blockSize").real())
preFilterType = int(cv_file.getNode("preFilterType").real())
preFilterSize = int(cv_file.getNode("preFilterSize").real())
preFilterCap = int(cv_file.getNode("preFilterCap").real())
textureThreshold = int(cv_file.getNode("textureThreshold").real())
uniquenessRatio = int(cv_file.getNode("uniquenessRatio").real())
speckleRange = int(cv_file.getNode("speckleRange").real())
speckleWindowSize = int(cv_file.getNode("speckleWindowSize").real())
disp12MaxDiff = int(cv_file.getNode("disp12MaxDiff").real())
minDisparity = int(cv_file.getNode("minDisparity").real())
M = cv_file.getNode("M").real()
cv_file.release()
# Defining callback functions for mouse events
def mouse_click(event,x,y,flags,param):
global Z
if event == cv2.EVENT_LBUTTONDBLCLK:
if disparity[y,x] > 0:
Value_pairs.append([Z,disparity[y,x]])
print("Distance: %r cm | Disparity: %r"%(Z,disparity[y,x]))
Z-=sample_delta
cv2.namedWindow('disp',cv2.WINDOW_NORMAL)
cv2.resizeWindow('disp',600,600)
cv2.namedWindow('left image',cv2.WINDOW_NORMAL)
cv2.resizeWindow('left image',600,600)
cv2.setMouseCallback('disp',mouse_click)
# Creating an object of StereoBM algorithm
stereo = cv2.StereoBM_create()
while True:
# Capturing and storing left and right camera images
retL, imgL= cv2.VideoCapture(CamL_id, cv2.CAP_V4L2).read()
retR, imgR= cv2.VideoCapture(CamR_id, cv2.CAP_V4L2).read()
'''
retR, imgR= CamR.read()
retL, imgL= CamL.read()
'''
# Proceed only if the frames have been captured
if retL and retR:
imgR_gray = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
imgL_gray = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
'''
cv2.imshow('imgL_gray',imgL_gray)
cv2.imshow('imgR_gray',imgR_gray)
cv2.waitKey(33)
'''
# Applying stereo image rectification on the left image
Left_nice= cv2.remap(imgL_gray,
Left_Stereo_Map_x,
Left_Stereo_Map_y,
cv2.INTER_LANCZOS4,
cv2.BORDER_CONSTANT,
0)
# Applying stereo image rectification on the right image
Right_nice= cv2.remap(imgR_gray,
Right_Stereo_Map_x,
Right_Stereo_Map_y,
cv2.INTER_LANCZOS4,
cv2.BORDER_CONSTANT,
0)
# Setting the updated parameters before computing disparity map
stereo.setNumDisparities(numDisparities)
stereo.setBlockSize(blockSize)
stereo.setPreFilterType(preFilterType)
stereo.setPreFilterSize(preFilterSize)
stereo.setPreFilterCap(preFilterCap)
stereo.setTextureThreshold(textureThreshold)
stereo.setUniquenessRatio(uniquenessRatio)
stereo.setSpeckleRange(speckleRange)
stereo.setSpeckleWindowSize(speckleWindowSize)
stereo.setDisp12MaxDiff(disp12MaxDiff)
stereo.setMinDisparity(minDisparity)
# Calculating disparity using the StereoBM algorithm
disparity = stereo.compute(Left_nice,Right_nice)
# NOTE: compute returns a 16bit signed single channel image,
# CV_16S containing a disparity map scaled by 16. Hence it
# is essential to convert it to CV_16S and scale it down 16 times.
# Converting to float32
disparity = disparity.astype(np.float32)
# Scaling down the disparity values and normalizing them
disparity = (disparity/16.0 - minDisparity)/numDisparities
# Displaying the disparity map
cv2.imshow("disp",disparity)
cv2.imshow("left image",imgL)
if cv2.waitKey(1) == 27:
break
if Z < min_dist:
break
else:
print("on est dans le else")
'''
CamL= cv2.VideoCapture(CamL_id)
CamR= cv2.VideoCapture(CamR_id)
'''
# solving for M in the following equation
# || depth = M * (1/disparity) ||
# for N data points coeff is Nx2 matrix with values
# 1/disparity, 1
# and depth is Nx1 matrix with depth values
value_pairs = np.array(Value_pairs)
z = value_pairs[:,0]
disp = value_pairs[:,1]
disp_inv = 1/disp
# Plotting the relation depth and corresponding disparity
fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,6))
ax1.plot(disp, z, 'o-')
ax1.set(xlabel='Normalized disparity value', ylabel='Depth from camera (cm)',
title='Relation between depth \n and corresponding disparity')
ax1.grid()
ax2.plot(disp_inv, z, 'o-')
ax2.set(xlabel='Inverse disparity value (1/disp) ', ylabel='Depth from camera (cm)',
title='Relation between depth \n and corresponding inverse disparity')
ax2.grid()
plt.show()
# Solving for M using least square fitting with QR decomposition method
coeff = np.vstack([disp_inv, np.ones(len(disp_inv))]).T
ret, sol = cv2.solve(coeff,z,flags=cv2.DECOMP_QR)
M = sol[0,0]
C = sol[1,0]
print("Value of M = ",M)
# Storing the updated value of M along with the stereo parameters
cv_file = cv2.FileStorage("../data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_WRITE)
cv_file.write("numDisparities",numDisparities)
cv_file.write("blockSize",blockSize)
cv_file.write("preFilterType",preFilterType)
cv_file.write("preFilterSize",preFilterSize)
cv_file.write("preFilterCap",preFilterCap)
cv_file.write("textureThreshold",textureThreshold)
cv_file.write("uniquenessRatio",uniquenessRatio)
cv_file.write("speckleRange",speckleRange)
cv_file.write("speckleWindowSize",speckleWindowSize)
cv_file.write("disp12MaxDiff",disp12MaxDiff)
cv_file.write("minDisparity",minDisparity)
cv_file.write("M",M)
cv_file.release()

View File

@ -23,6 +23,7 @@ print("la c bon")
# Reading the mapping values for stereo image rectification
cv_file = cv2.FileStorage("data/params_py.xml", cv2.FILE_STORAGE_READ)
Left_Stereo_Map_x = cv_file.getNode("Left_Stereo_Map_x").mat()
print(Left_Stereo_Map_x)
Left_Stereo_Map_y = cv_file.getNode("Left_Stereo_Map_y").mat()
Right_Stereo_Map_x = cv_file.getNode("Right_Stereo_Map_x").mat()
Right_Stereo_Map_y = cv_file.getNode("Right_Stereo_Map_y").mat()
@ -32,15 +33,18 @@ def nothing(x):
pass
cv2.namedWindow('disp',cv2.WINDOW_NORMAL)
cv2.resizeWindow('disp',600,600)
cv2.resizeWindow('disp',1000,800)
cv2.createTrackbar('numDisparities','disp',1,17,nothing)
cv2.createTrackbar('blockSize','disp',5,50,nothing)
cv2.createTrackbar('preFilterType','disp',1,1,nothing)
cv2.createTrackbar('preFilterSize','disp',2,25,nothing)
cv2.createTrackbar('preFilterCap','disp',5,62,nothing)
cv2.createTrackbar('textureThreshold','disp',10,100,nothing)
cv2.createTrackbar('uniquenessRatio','disp',15,100,nothing)
cv2.createTrackbar('speckleRange','disp',0,100,nothing)
cv2.createTrackbar('speckleWindowSize','disp',3,25,nothing)
cv2.createTrackbar('disp12MaxDiff','disp',5,25,nothing)
@ -58,6 +62,7 @@ while True:
if retL and retR:
imgR_gray = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
imgL_gray = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
# Applying stereo image rectification on the left image
Left_nice= cv2.remap(imgL_gray,
@ -78,11 +83,13 @@ while True:
# Updating the parameters based on the trackbar positions
numDisparities = cv2.getTrackbarPos('numDisparities','disp')*16
blockSize = cv2.getTrackbarPos('blockSize','disp')*2 + 5
preFilterType = cv2.getTrackbarPos('preFilterType','disp')
preFilterSize = cv2.getTrackbarPos('preFilterSize','disp')*2 + 5
preFilterCap = cv2.getTrackbarPos('preFilterCap','disp')
textureThreshold = cv2.getTrackbarPos('textureThreshold','disp')
uniquenessRatio = cv2.getTrackbarPos('uniquenessRatio','disp')
speckleRange = cv2.getTrackbarPos('speckleRange','disp')
speckleWindowSize = cv2.getTrackbarPos('speckleWindowSize','disp')*2
disp12MaxDiff = cv2.getTrackbarPos('disp12MaxDiff','disp')
@ -91,11 +98,13 @@ while True:
# Setting the updated parameters before computing disparity map
stereo.setNumDisparities(numDisparities)
stereo.setBlockSize(blockSize)
stereo.setPreFilterType(preFilterType)
stereo.setPreFilterSize(preFilterSize)
stereo.setPreFilterCap(preFilterCap)
stereo.setTextureThreshold(textureThreshold)
stereo.setUniquenessRatio(uniquenessRatio)
stereo.setSpeckleRange(speckleRange)
stereo.setSpeckleWindowSize(speckleWindowSize)
stereo.setDisp12MaxDiff(disp12MaxDiff)
@ -115,7 +124,6 @@ while True:
# Displaying the disparity map
cv2.imshow("disp",disparity)
# Close window using esc key
if cv2.waitKey(1) == 27:
break
@ -126,18 +134,21 @@ while True:
print("Saving depth estimation paraeters ......")
cv_file = cv2.FileStorage("../data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_WRITE)
cv_file.write("numDisparities",numDisparities)
cv_file = cv2.FileStorage("data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_WRITE)
cv_file.write("blockSize",blockSize)
print ("aprés le write")
cv_file.write("numDisparities",numDisparities)
cv_file.write("preFilterType",preFilterType)
cv_file.write("preFilterSize",preFilterSize)
cv_file.write("preFilterCap",preFilterCap)
cv_file.write("textureThreshold",textureThreshold)
cv_file.write("uniquenessRatio",uniquenessRatio)
cv_file.write("speckleRange",speckleRange)
cv_file.write("speckleWindowSize",speckleWindowSize)
cv_file.write("disp12MaxDiff",disp12MaxDiff)
cv_file.write("minDisparity",minDisparity)
cv_file.write("M",39.075)
cv_file.release()

View File

@ -6,15 +6,16 @@ ID2 = 0
#cam1.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
#cam2 = cv2.VideoCapture(ID1, cv2.CAP_V4L2)
#cam2.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret2, frame2 = cv2.VideoCapture(ID1, cv2.CAP_V4L2).read()
ret1, frame1 = cv2.VideoCapture(ID2, cv2.CAP_V4L2).read()
while True:
ret2, frame2 = cv2.VideoCapture(ID1, cv2.CAP_V4L2).read()
ret1, frame1 = cv2.VideoCapture(ID2, cv2.CAP_V4L2).read()
'''
print("ret 1 = ", ret1)
print("ret 2 = ", ret2)
'''
cv2.imshow('Camera 1', frame1)
cv2.imshow('Camera 2', frame2)