Compare commits
1 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
f65b4fa45a |
|
|
@ -0,0 +1,85 @@
|
|||
|
||||
#retval, corners = cv2.findChessboardCorners(image,patternSize, flags)
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# Define the size of the chessboard
|
||||
chessboard_size = (8,5)
|
||||
|
||||
# Define the object points of the chessboard
|
||||
object_points = np.zeros((np.prod(chessboard_size), 3), dtype=np.float32)
|
||||
object_points[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2)
|
||||
|
||||
# Create arrays to store the object points and image points from all the images
|
||||
object_points_array = []
|
||||
image_points_array1 = []
|
||||
image_points_array2 = []
|
||||
|
||||
# Load the images
|
||||
images = []
|
||||
images.append(cv2.imread("/home/ros/Bureau/ca_ur5/1.jpg"))
|
||||
# Add more images as needed
|
||||
|
||||
# Loop through each image and find the chessboard corners
|
||||
for image in images:
|
||||
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# Find the chessboard corners
|
||||
found, corners = cv2.findChessboardCorners(gray, chessboard_size, None)
|
||||
|
||||
# If the corners are found, add the object points and image points to the arrays
|
||||
if found:
|
||||
object_points_array.append(object_points)
|
||||
image_points_array1.append(corners)
|
||||
|
||||
# Calibrate the camera using the object points and image points
|
||||
ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors = cv2.calibrateCamera(
|
||||
object_points_array, image_points_array1, gray.shape[::-1], None, None)
|
||||
|
||||
# Print the camera matrix and distortion coefficients
|
||||
print("Camera matrix:")
|
||||
print(camera_matrix)
|
||||
print("Distortion coefficients:")
|
||||
print(distortion_coefficients)
|
||||
|
||||
|
||||
# Load the images
|
||||
images = []
|
||||
images.append(cv2.imread("/home/ros/Bureau/ca_ur5/2.jpg"))
|
||||
# Add more images as needed
|
||||
|
||||
# Loop through each image and find the chessboard corners
|
||||
for image in images:
|
||||
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# Find the chessboard corners
|
||||
found, corners = cv2.findChessboardCorners(gray, chessboard_size, None)
|
||||
|
||||
# If the corners are found, add the object points and image points to the arrays
|
||||
if found:
|
||||
object_points_array.append(object_points)
|
||||
image_points_array2.append(corners)
|
||||
|
||||
# Calibrate the camera using the object points and image points
|
||||
ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors = cv2.calibrateCamera(
|
||||
object_points_array, image_points_array2, gray.shape[::-1], None, None)
|
||||
|
||||
# Print the camera matrix and distortion coefficients
|
||||
print("Camera matrix:")
|
||||
print(camera_matrix)
|
||||
print("Distortion coefficients:")
|
||||
print(distortion_coefficients)
|
||||
|
||||
print("Stereo calib")
|
||||
|
||||
flags = 0
|
||||
flags |= cv2.CALIB_FIX_INTRINSIC
|
||||
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
|
||||
# Hence intrinsic parameters are the same
|
||||
|
||||
criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||
|
||||
|
||||
# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
|
||||
retS, new_mtxL, distL, new_mtxR, distR, Rot, Trns, Emat, Fmat = cv2.stereoCalibrate(object_points_array, image_points_array1, image_points_array2, new_mtxL, distL, new_mtxR, distR, imgL_gray.shape[::-1], criteria_stereo, flags)
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
# Set the path to the images captured by the left and right cameras
|
||||
import cv2
|
||||
import numpy as np
|
||||
import tqdm as tqdm
|
||||
|
||||
pathL = "/home/ros/Bureau/ca_ur5/1.jpg"
|
||||
pathR = "/home/ros/Bureau/ca_ur5/2.jpg"
|
||||
|
||||
# Termination criteria for refining the detected corners
|
||||
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||
|
||||
|
||||
objp = np.zeros((8*5,3), np.float32)
|
||||
objp[:,:2] = np.mgrid[0:8,0:5].T.reshape(-1,2)
|
||||
|
||||
img_ptsL = []
|
||||
img_ptsR = []
|
||||
obj_pts = []
|
||||
|
||||
for i in tqdm(range(1,12)):
|
||||
imgL = cv2.imread(pathL+"img%d.png"%i)
|
||||
imgR = cv2.imread(pathR+"img%d.png"%i)
|
||||
imgL_gray = cv2.imread(pathL+"img%d.png"%i,0)
|
||||
imgR_gray = cv2.imread(pathR+"img%d.png"%i,0)
|
||||
|
||||
outputL = imgL.copy()
|
||||
outputR = imgR.copy()
|
||||
|
||||
retR, cornersR = cv2.findChessboardCorners(outputR,(8,5),None)
|
||||
retL, cornersL = cv2.findChessboardCorners(outputL,(8,5),None)
|
||||
|
||||
if retR and retL:
|
||||
obj_pts.append(objp)
|
||||
cv2.cornerSubPix(imgR_gray,cornersR,(11,11),(-1,-1),criteria)
|
||||
cv2.cornerSubPix(imgL_gray,cornersL,(11,11),(-1,-1),criteria)
|
||||
cv2.drawChessboardCorners(outputR,(8,5),cornersR,retR)
|
||||
cv2.drawChessboardCorners(outputL,(8,5),cornersL,retL)
|
||||
cv2.imshow('cornersR',outputR)
|
||||
cv2.imshow('cornersL',outputL)
|
||||
cv2.waitKey(0)
|
||||
|
||||
img_ptsL.append(cornersL)
|
||||
img_ptsR.append(cornersR)
|
||||
|
||||
|
||||
# Calibrating left camera
|
||||
retL, mtxL, distL, rvecsL, tvecsL = cv2.calibrateCamera(obj_pts,img_ptsL,imgL_gray.shape[::-1],None,None)
|
||||
hL,wL= imgL_gray.shape[:2]
|
||||
new_mtxL, roiL= cv2.getOptimalNewCameraMatrix(mtxL,distL,(wL,hL),1,(wL,hL))
|
||||
|
||||
# Calibrating right camera
|
||||
retR, mtxR, distR, rvecsR, tvecsR = cv2.calibrateCamera(obj_pts,img_ptsR,imgR_gray.shape[::-1],None,None)
|
||||
hR,wR= imgR_gray.shape[:2]
|
||||
new_mtxR, roiR= cv2.getOptimalNewCameraMatrix(mtxR,distR,(wR,hR),1,(wR,hR))
|
||||
20
calibrate.py
|
|
@ -13,14 +13,14 @@ print("Extracting image coordinates of respective 3D pattern ....\n")
|
|||
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||
|
||||
|
||||
objp = np.zeros((21*15,3), np.float32)
|
||||
objp[:,:2] = np.mgrid[0:21,0:15].T.reshape(-1,2)
|
||||
objp = np.zeros((8*5,3), np.float32)
|
||||
objp[:,:2] = np.mgrid[0:8,0:5].T.reshape(-1,2)
|
||||
|
||||
img_ptsL = []
|
||||
img_ptsR = []
|
||||
obj_pts = []
|
||||
|
||||
for i in tqdm(range(1,6)): #number of images
|
||||
for i in tqdm(range(1,5)):
|
||||
imgL = cv2.imread(pathL+"L%d.jpg"%i)
|
||||
imgR = cv2.imread(pathR+"R%d.jpg"%i)
|
||||
imgL_gray = cv2.imread(pathL+"L%d.jpg"%i,0)
|
||||
|
|
@ -30,22 +30,16 @@ for i in tqdm(range(1,6)): #number of images
|
|||
outputL = imgL.copy()
|
||||
outputR = imgR.copy()
|
||||
|
||||
retR, cornersR = cv2.findChessboardCorners(outputR,(8,5),None)
|
||||
retL, cornersL = cv2.findChessboardCorners(outputL,(8,5),None)
|
||||
|
||||
retR, cornersR = cv2.findChessboardCorners(outputR,(21,15),None)
|
||||
retL, cornersL = cv2.findChessboardCorners(outputL,(21,15),None)
|
||||
print("ret R is",retR)
|
||||
print("ret L is",retL)
|
||||
|
||||
|
||||
|
||||
|
||||
if retR and retL:
|
||||
|
||||
obj_pts.append(objp)
|
||||
cv2.cornerSubPix(imgR_gray,cornersR,(11,11),(-1,-1),criteria)
|
||||
cv2.cornerSubPix(imgL_gray,cornersL,(11,11),(-1,-1),criteria)
|
||||
cv2.drawChessboardCorners(outputR,(21,15),cornersR,retR)
|
||||
cv2.drawChessboardCorners(outputL,(21,15),cornersL,retL)
|
||||
cv2.drawChessboardCorners(outputR,(8,5),cornersR,retR)
|
||||
cv2.drawChessboardCorners(outputL,(8,5),cornersL,retL)
|
||||
cv2.imshow('cornersR',outputR)
|
||||
cv2.imshow('cornersL',outputL)
|
||||
cv2.waitKey(0)
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 91 KiB After Width: | Height: | Size: 65 KiB |
|
Before Width: | Height: | Size: 93 KiB After Width: | Height: | Size: 69 KiB |
|
Before Width: | Height: | Size: 91 KiB After Width: | Height: | Size: 65 KiB |
|
Before Width: | Height: | Size: 93 KiB After Width: | Height: | Size: 63 KiB |
|
Before Width: | Height: | Size: 98 KiB |
|
Before Width: | Height: | Size: 98 KiB After Width: | Height: | Size: 65 KiB |
|
Before Width: | Height: | Size: 98 KiB After Width: | Height: | Size: 70 KiB |
|
Before Width: | Height: | Size: 96 KiB After Width: | Height: | Size: 65 KiB |
|
Before Width: | Height: | Size: 100 KiB After Width: | Height: | Size: 64 KiB |
|
Before Width: | Height: | Size: 95 KiB |
|
|
@ -1,15 +0,0 @@
|
|||
<?xml version="1.0"?>
|
||||
<opencv_storage>
|
||||
<blockSize>15</blockSize>
|
||||
<numDisparities>16</numDisparities>
|
||||
<preFilterType>1</preFilterType>
|
||||
<preFilterSize>9</preFilterSize>
|
||||
<preFilterCap>5</preFilterCap>
|
||||
<textureThreshold>10</textureThreshold>
|
||||
<uniquenessRatio>15</uniquenessRatio>
|
||||
<speckleRange>0</speckleRange>
|
||||
<speckleWindowSize>6</speckleWindowSize>
|
||||
<disp12MaxDiff>5</disp12MaxDiff>
|
||||
<minDisparity>5</minDisparity>
|
||||
<M>3.9075000000000003e+01</M>
|
||||
</opencv_storage>
|
||||
406164
data/params_py.xml
210
dispa2depth.py
|
|
@ -1,210 +0,0 @@
|
|||
import numpy as np
|
||||
import cv2
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
# Check for left and right camera IDs
|
||||
# These values can change depending on the system
|
||||
CamL_id = 2 # Camera ID for left camera
|
||||
CamR_id = 0 # Camera ID for right camera
|
||||
|
||||
#CamL= cv2.VideoCapture(CamL_id)
|
||||
#CamR= cv2.VideoCapture(CamR_id)
|
||||
retL, imgL= cv2.VideoCapture(CamL_id, cv2.CAP_V4L2).read()
|
||||
retR, imgR= cv2.VideoCapture(CamR_id, cv2.CAP_V4L2).read()
|
||||
'''
|
||||
imgR_gray = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
|
||||
imgL_gray = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
|
||||
'''
|
||||
|
||||
# Reading the mapping values for stereo image rectification
|
||||
cv_file = cv2.FileStorage("data/params_py.xml", cv2.FILE_STORAGE_READ)
|
||||
Left_Stereo_Map_x = cv_file.getNode("Left_Stereo_Map_x").mat()
|
||||
#print(Left_Stereo_Map_x)
|
||||
Left_Stereo_Map_y = cv_file.getNode("Left_Stereo_Map_y").mat()
|
||||
Right_Stereo_Map_x = cv_file.getNode("Right_Stereo_Map_x").mat()
|
||||
Right_Stereo_Map_y = cv_file.getNode("Right_Stereo_Map_y").mat()
|
||||
cv_file.release()
|
||||
|
||||
# These parameters can vary according to the setup
|
||||
# Keeping the target object at max_dist we store disparity values
|
||||
# after every sample_delta distance.
|
||||
max_dist = 230 # max distance to keep the target object (in cm)
|
||||
min_dist = 50 # Minimum distance the stereo setup can measure (in cm)
|
||||
sample_delta = 40 # Distance between two sampling points (in cm)
|
||||
|
||||
Z = max_dist
|
||||
Value_pairs = []
|
||||
|
||||
disp_map = np.zeros((600,600,3))
|
||||
|
||||
|
||||
# Reading the stored the StereoBM parameters
|
||||
cv_file = cv2.FileStorage("../data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_READ)
|
||||
numDisparities = int(cv_file.getNode("numDisparities").real())
|
||||
blockSize = int(cv_file.getNode("blockSize").real())
|
||||
preFilterType = int(cv_file.getNode("preFilterType").real())
|
||||
preFilterSize = int(cv_file.getNode("preFilterSize").real())
|
||||
preFilterCap = int(cv_file.getNode("preFilterCap").real())
|
||||
textureThreshold = int(cv_file.getNode("textureThreshold").real())
|
||||
uniquenessRatio = int(cv_file.getNode("uniquenessRatio").real())
|
||||
speckleRange = int(cv_file.getNode("speckleRange").real())
|
||||
speckleWindowSize = int(cv_file.getNode("speckleWindowSize").real())
|
||||
disp12MaxDiff = int(cv_file.getNode("disp12MaxDiff").real())
|
||||
minDisparity = int(cv_file.getNode("minDisparity").real())
|
||||
M = cv_file.getNode("M").real()
|
||||
cv_file.release()
|
||||
# Defining callback functions for mouse events
|
||||
def mouse_click(event,x,y,flags,param):
|
||||
global Z
|
||||
if event == cv2.EVENT_LBUTTONDBLCLK:
|
||||
if disparity[y,x] > 0:
|
||||
Value_pairs.append([Z,disparity[y,x]])
|
||||
print("Distance: %r cm | Disparity: %r"%(Z,disparity[y,x]))
|
||||
Z-=sample_delta
|
||||
|
||||
|
||||
|
||||
cv2.namedWindow('disp',cv2.WINDOW_NORMAL)
|
||||
cv2.resizeWindow('disp',600,600)
|
||||
cv2.namedWindow('left image',cv2.WINDOW_NORMAL)
|
||||
cv2.resizeWindow('left image',600,600)
|
||||
cv2.setMouseCallback('disp',mouse_click)
|
||||
|
||||
# Creating an object of StereoBM algorithm
|
||||
stereo = cv2.StereoBM_create()
|
||||
|
||||
while True:
|
||||
|
||||
# Capturing and storing left and right camera images
|
||||
retL, imgL= cv2.VideoCapture(CamL_id, cv2.CAP_V4L2).read()
|
||||
retR, imgR= cv2.VideoCapture(CamR_id, cv2.CAP_V4L2).read()
|
||||
'''
|
||||
cv2.imshow('imgL',imgL)
|
||||
cv2.waitKey(0)
|
||||
|
||||
retR, imgR= CamR.read()
|
||||
retL, imgL= CamL.read()
|
||||
'''
|
||||
# Proceed only if the frames have been captured
|
||||
if retL and retR:
|
||||
imgR_gray = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
|
||||
imgL_gray = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
|
||||
|
||||
'''
|
||||
cv2.imshow('imgL_gray',imgL_gray)
|
||||
cv2.imshow('imgR_gray',imgR_gray)
|
||||
cv2.waitKey(0)
|
||||
'''
|
||||
|
||||
|
||||
# Applying stereo image rectification on the left image
|
||||
Left_nice= cv2.remap(imgL_gray,
|
||||
Left_Stereo_Map_x,
|
||||
Left_Stereo_Map_y,
|
||||
cv2.INTER_LANCZOS4,
|
||||
cv2.BORDER_CONSTANT,
|
||||
0)
|
||||
cv2.imshow('imgL nice',Left_nice)
|
||||
cv2.waitKey(0)
|
||||
#print(Left_nice)
|
||||
# Applying stereo image rectification on the right image
|
||||
Right_nice= cv2.remap(imgR_gray,
|
||||
Right_Stereo_Map_x,
|
||||
Right_Stereo_Map_y,
|
||||
cv2.INTER_LANCZOS4,
|
||||
cv2.BORDER_CONSTANT,
|
||||
0)
|
||||
cv2.imshow('imgR nice',Right_nice)
|
||||
cv2.waitKey(0)
|
||||
|
||||
# Setting the updated parameters before computing disparity map
|
||||
stereo.setNumDisparities(numDisparities)
|
||||
stereo.setBlockSize(blockSize)
|
||||
stereo.setPreFilterType(preFilterType)
|
||||
stereo.setPreFilterSize(preFilterSize)
|
||||
stereo.setPreFilterCap(preFilterCap)
|
||||
stereo.setTextureThreshold(textureThreshold)
|
||||
stereo.setUniquenessRatio(uniquenessRatio)
|
||||
stereo.setSpeckleRange(speckleRange)
|
||||
stereo.setSpeckleWindowSize(speckleWindowSize)
|
||||
stereo.setDisp12MaxDiff(disp12MaxDiff)
|
||||
stereo.setMinDisparity(minDisparity)
|
||||
|
||||
# Calculating disparity using the StereoBM algorithm
|
||||
disparity = stereo.compute(Left_nice,Right_nice)
|
||||
# NOTE: compute returns a 16bit signed single channel image,
|
||||
# CV_16S containing a disparity map scaled by 16. Hence it
|
||||
# is essential to convert it to CV_16S and scale it down 16 times.
|
||||
|
||||
# Converting to float32
|
||||
disparity = disparity.astype(np.float32)
|
||||
|
||||
# Scaling down the disparity values and normalizing them
|
||||
disparity = (disparity/16.0 - minDisparity)/numDisparities
|
||||
|
||||
# Displaying the disparity map
|
||||
cv2.imshow("disp",disparity)
|
||||
cv2.imshow("left image",imgL)
|
||||
|
||||
if cv2.waitKey(1) == 27:
|
||||
break
|
||||
|
||||
if Z < min_dist:
|
||||
break
|
||||
|
||||
else:
|
||||
print("on est dans le else")
|
||||
'''
|
||||
CamL= cv2.VideoCapture(CamL_id)
|
||||
CamR= cv2.VideoCapture(CamR_id)
|
||||
'''
|
||||
|
||||
# solving for M in the following equation
|
||||
# || depth = M * (1/disparity) ||
|
||||
# for N data points coeff is Nx2 matrix with values
|
||||
# 1/disparity, 1
|
||||
# and depth is Nx1 matrix with depth values
|
||||
|
||||
value_pairs = np.array(Value_pairs)
|
||||
z = value_pairs[:,0]
|
||||
disp = value_pairs[:,1]
|
||||
disp_inv = 1/disp
|
||||
|
||||
# Plotting the relation depth and corresponding disparity
|
||||
fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,6))
|
||||
ax1.plot(disp, z, 'o-')
|
||||
ax1.set(xlabel='Normalized disparity value', ylabel='Depth from camera (cm)',
|
||||
title='Relation between depth \n and corresponding disparity')
|
||||
ax1.grid()
|
||||
ax2.plot(disp_inv, z, 'o-')
|
||||
ax2.set(xlabel='Inverse disparity value (1/disp) ', ylabel='Depth from camera (cm)',
|
||||
title='Relation between depth \n and corresponding inverse disparity')
|
||||
ax2.grid()
|
||||
plt.show()
|
||||
|
||||
|
||||
# Solving for M using least square fitting with QR decomposition method
|
||||
coeff = np.vstack([disp_inv, np.ones(len(disp_inv))]).T
|
||||
ret, sol = cv2.solve(coeff,z,flags=cv2.DECOMP_QR)
|
||||
M = sol[0,0]
|
||||
C = sol[1,0]
|
||||
print("Value of M = ",M)
|
||||
|
||||
|
||||
# Storing the updated value of M along with the stereo parameters
|
||||
cv_file = cv2.FileStorage("../data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_WRITE)
|
||||
cv_file.write("numDisparities",numDisparities)
|
||||
cv_file.write("blockSize",blockSize)
|
||||
cv_file.write("preFilterType",preFilterType)
|
||||
cv_file.write("preFilterSize",preFilterSize)
|
||||
cv_file.write("preFilterCap",preFilterCap)
|
||||
cv_file.write("textureThreshold",textureThreshold)
|
||||
cv_file.write("uniquenessRatio",uniquenessRatio)
|
||||
cv_file.write("speckleRange",speckleRange)
|
||||
cv_file.write("speckleWindowSize",speckleWindowSize)
|
||||
cv_file.write("disp12MaxDiff",disp12MaxDiff)
|
||||
cv_file.write("minDisparity",minDisparity)
|
||||
cv_file.write("M",M)
|
||||
cv_file.release()
|
||||
|
|
@ -4,47 +4,43 @@ import cv2
|
|||
|
||||
# Check for left and right camera IDs
|
||||
# These values can change depending on the system
|
||||
CamL_id = 0# Camera ID for left camera
|
||||
CamR_id = 2# Camera ID for right camera
|
||||
CamL_id = 6# Camera ID for left camera
|
||||
CamR_id = 8# Camera ID for right camera
|
||||
|
||||
#CamL= cv2.VideoCapture(CamL_id)
|
||||
#CamR= cv2.VideoCapture(CamR_id)
|
||||
|
||||
|
||||
|
||||
|
||||
retL, imgL= cv2.VideoCapture(CamL_id, cv2.CAP_V4L2).read()
|
||||
retR, imgR= cv2.VideoCapture(CamR_id, cv2.CAP_V4L2).read()
|
||||
print("les ret c'est",retL, retR)
|
||||
imgR_gray = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
|
||||
imgL_gray = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
|
||||
|
||||
print("la c bon")
|
||||
CamR= cv2.VideoCapture(CamR_id, cv2.CAP_V4L2)
|
||||
CamL= cv2.VideoCapture(CamL_id, cv2.CAP_V4L2)
|
||||
'''
|
||||
while True:
|
||||
ret, frame = CamR.read()
|
||||
if ret:
|
||||
codec=CamR.get(cv2.CAP_PROP_FOURCC)
|
||||
print("codec: ",codec)
|
||||
sdfgh
|
||||
'''
|
||||
# Reading the mapping values for stereo image rectification
|
||||
cv_file = cv2.FileStorage("data/params_py.xml", cv2.FILE_STORAGE_READ)
|
||||
|
||||
Left_Stereo_Map_x = cv_file.getNode("Left_Stereo_Map_x").mat()
|
||||
#print(Left_Stereo_Map_x)
|
||||
|
||||
Left_Stereo_Map_y = cv_file.getNode("Left_Stereo_Map_y").mat()
|
||||
Right_Stereo_Map_x = cv_file.getNode("Right_Stereo_Map_x").mat()
|
||||
Right_Stereo_Map_y = cv_file.getNode("Right_Stereo_Map_y").mat()
|
||||
cv_file.release()
|
||||
|
||||
|
||||
def nothing(x):
|
||||
pass
|
||||
|
||||
cv2.namedWindow('disp',cv2.WINDOW_NORMAL)
|
||||
cv2.resizeWindow('disp',1000,800)
|
||||
cv2.resizeWindow('disp',600,600)
|
||||
|
||||
cv2.createTrackbar('numDisparities','disp',1,17,nothing)
|
||||
cv2.createTrackbar('blockSize','disp',5,50,nothing)
|
||||
cv2.createTrackbar('preFilterType','disp',1,1,nothing)
|
||||
cv2.createTrackbar('preFilterSize','disp',2,25,nothing)
|
||||
|
||||
cv2.createTrackbar('preFilterCap','disp',5,62,nothing)
|
||||
|
||||
cv2.createTrackbar('textureThreshold','disp',10,100,nothing)
|
||||
cv2.createTrackbar('uniquenessRatio','disp',15,100,nothing)
|
||||
|
||||
cv2.createTrackbar('speckleRange','disp',0,100,nothing)
|
||||
cv2.createTrackbar('speckleWindowSize','disp',3,25,nothing)
|
||||
cv2.createTrackbar('disp12MaxDiff','disp',5,25,nothing)
|
||||
|
|
@ -52,17 +48,18 @@ cv2.createTrackbar('minDisparity','disp',5,25,nothing)
|
|||
|
||||
# Creating an object of StereoBM algorithm
|
||||
stereo = cv2.StereoBM_create()
|
||||
|
||||
while True:
|
||||
|
||||
# Capturing and storing left and right camera images
|
||||
retL, imgL= cv2.VideoCapture(CamL_id, cv2.CAP_V4L2).read()
|
||||
retR, imgR= cv2.VideoCapture(CamR_id, cv2.CAP_V4L2).read()
|
||||
retL, imgL= CamL.read()
|
||||
retR, imgR= CamR.read()
|
||||
|
||||
waitkey(0)
|
||||
# Proceed only if the frames have been captured
|
||||
if retL and retR:
|
||||
imgR_gray = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
|
||||
imgL_gray = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
|
||||
|
||||
|
||||
# Applying stereo image rectification on the left image
|
||||
Left_nice= cv2.remap(imgL_gray,
|
||||
|
|
@ -83,13 +80,11 @@ while True:
|
|||
# Updating the parameters based on the trackbar positions
|
||||
numDisparities = cv2.getTrackbarPos('numDisparities','disp')*16
|
||||
blockSize = cv2.getTrackbarPos('blockSize','disp')*2 + 5
|
||||
|
||||
preFilterType = cv2.getTrackbarPos('preFilterType','disp')
|
||||
preFilterSize = cv2.getTrackbarPos('preFilterSize','disp')*2 + 5
|
||||
preFilterCap = cv2.getTrackbarPos('preFilterCap','disp')
|
||||
textureThreshold = cv2.getTrackbarPos('textureThreshold','disp')
|
||||
uniquenessRatio = cv2.getTrackbarPos('uniquenessRatio','disp')
|
||||
|
||||
speckleRange = cv2.getTrackbarPos('speckleRange','disp')
|
||||
speckleWindowSize = cv2.getTrackbarPos('speckleWindowSize','disp')*2
|
||||
disp12MaxDiff = cv2.getTrackbarPos('disp12MaxDiff','disp')
|
||||
|
|
@ -98,13 +93,11 @@ while True:
|
|||
# Setting the updated parameters before computing disparity map
|
||||
stereo.setNumDisparities(numDisparities)
|
||||
stereo.setBlockSize(blockSize)
|
||||
|
||||
stereo.setPreFilterType(preFilterType)
|
||||
stereo.setPreFilterSize(preFilterSize)
|
||||
stereo.setPreFilterCap(preFilterCap)
|
||||
stereo.setTextureThreshold(textureThreshold)
|
||||
stereo.setUniquenessRatio(uniquenessRatio)
|
||||
|
||||
stereo.setSpeckleRange(speckleRange)
|
||||
stereo.setSpeckleWindowSize(speckleWindowSize)
|
||||
stereo.setDisp12MaxDiff(disp12MaxDiff)
|
||||
|
|
@ -124,6 +117,7 @@ while True:
|
|||
|
||||
# Displaying the disparity map
|
||||
cv2.imshow("disp",disparity)
|
||||
|
||||
# Close window using esc key
|
||||
if cv2.waitKey(1) == 27:
|
||||
break
|
||||
|
|
@ -131,24 +125,21 @@ while True:
|
|||
else:
|
||||
CamL= cv2.VideoCapture(CamL_id)
|
||||
CamR= cv2.VideoCapture(CamR_id)
|
||||
waitKey(0)
|
||||
print("Saving depth estimation paraeters ......")
|
||||
|
||||
print("Saving depth estimation parameters ......")
|
||||
|
||||
cv_file = cv2.FileStorage("data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_WRITE)
|
||||
cv_file.write("blockSize",blockSize)
|
||||
print ("aprés le write")
|
||||
cv_file = cv2.FileStorage("../data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_WRITE)
|
||||
cv_file.write("numDisparities",numDisparities)
|
||||
|
||||
|
||||
cv_file.write("blockSize",blockSize)
|
||||
cv_file.write("preFilterType",preFilterType)
|
||||
cv_file.write("preFilterSize",preFilterSize)
|
||||
cv_file.write("preFilterCap",preFilterCap)
|
||||
cv_file.write("textureThreshold",textureThreshold)
|
||||
cv_file.write("uniquenessRatio",uniquenessRatio)
|
||||
|
||||
cv_file.write("speckleRange",speckleRange)
|
||||
cv_file.write("speckleWindowSize",speckleWindowSize)
|
||||
cv_file.write("disp12MaxDiff",disp12MaxDiff)
|
||||
cv_file.write("minDisparity",minDisparity)
|
||||
cv_file.write("M",39.075)
|
||||
cv_file.release()
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
import cv2
|
||||
import numpy as np
|
||||
flags = 0
|
||||
flags |= cv2.CALIB_FIX_INTRINSIC
|
||||
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
|
||||
# Hence intrinsic parameters are the same
|
||||
|
||||
criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||
|
||||
|
||||
# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
|
||||
retS, new_mtxL, distL, new_mtxR, distR, Rot, Trns, Emat, Fmat = cv2.stereoCalibrate(obj_pts, img_ptsL, img_ptsR, new_mtxL, distL, new_mtxR, distR, imgL_gray.shape[::-1], criteria_stereo, flags)
|
||||
25
test12.py
|
|
@ -1,28 +1,21 @@
|
|||
import cv2
|
||||
|
||||
ID1 = 2
|
||||
ID2 = 0
|
||||
#cam1 = cv2.VideoCapture(ID2, cv2.CAP_V4L2)
|
||||
#cam1.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
|
||||
#cam2 = cv2.VideoCapture(ID1, cv2.CAP_V4L2)
|
||||
#cam2.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
|
||||
ret2, frame2 = cv2.VideoCapture(ID1, cv2.CAP_V4L2).read()
|
||||
ret1, frame1 = cv2.VideoCapture(ID2, cv2.CAP_V4L2).read()
|
||||
ID1 = 6
|
||||
ID2 = 8
|
||||
cam1 = cv2.VideoCapture(ID1, cv2.CAP_V4L2)
|
||||
cam1.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
|
||||
cam2 = cv2.VideoCapture(ID2, cv2.CAP_V4L2)
|
||||
cam2.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
|
||||
|
||||
while True:
|
||||
ret1, frame1 = cam1.read()
|
||||
ret2, frame2 = cam2.read()
|
||||
|
||||
ret2, frame2 = cv2.VideoCapture(ID1, cv2.CAP_V4L2).read()
|
||||
ret1, frame1 = cv2.VideoCapture(ID2, cv2.CAP_V4L2).read()
|
||||
'''
|
||||
print("ret 1 = ", ret1)
|
||||
print("ret 2 = ", ret2)
|
||||
'''
|
||||
cv2.imshow('Camera 1', frame1)
|
||||
cv2.imshow('Camera 2', frame2)
|
||||
|
||||
|
||||
if cv2.waitKey(1) == ord('q'):
|
||||
break
|
||||
|
||||
cam1.release()
|
||||
cam2.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
|
|||