session240523

This commit is contained in:
Eliott LAURENT 2023-05-24 17:24:14 +02:00
parent b050c1a5f4
commit 0f22300a45
16 changed files with 224426 additions and 181919 deletions

View File

@ -1,85 +0,0 @@
#retval, corners = cv2.findChessboardCorners(image,patternSize, flags)
import cv2
import numpy as np
# Define the size of the chessboard
chessboard_size = (8,5)
# Define the object points of the chessboard
object_points = np.zeros((np.prod(chessboard_size), 3), dtype=np.float32)
object_points[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2)
# Create arrays to store the object points and image points from all the images
object_points_array = []
image_points_array1 = []
image_points_array2 = []
# Load the images
images = []
images.append(cv2.imread("/home/ros/Bureau/ca_ur5/1.jpg"))
# Add more images as needed
# Loop through each image and find the chessboard corners
for image in images:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
found, corners = cv2.findChessboardCorners(gray, chessboard_size, None)
# If the corners are found, add the object points and image points to the arrays
if found:
object_points_array.append(object_points)
image_points_array1.append(corners)
# Calibrate the camera using the object points and image points
ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors = cv2.calibrateCamera(
object_points_array, image_points_array1, gray.shape[::-1], None, None)
# Print the camera matrix and distortion coefficients
print("Camera matrix:")
print(camera_matrix)
print("Distortion coefficients:")
print(distortion_coefficients)
# Load the images
images = []
images.append(cv2.imread("/home/ros/Bureau/ca_ur5/2.jpg"))
# Add more images as needed
# Loop through each image and find the chessboard corners
for image in images:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
found, corners = cv2.findChessboardCorners(gray, chessboard_size, None)
# If the corners are found, add the object points and image points to the arrays
if found:
object_points_array.append(object_points)
image_points_array2.append(corners)
# Calibrate the camera using the object points and image points
ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors = cv2.calibrateCamera(
object_points_array, image_points_array2, gray.shape[::-1], None, None)
# Print the camera matrix and distortion coefficients
print("Camera matrix:")
print(camera_matrix)
print("Distortion coefficients:")
print(distortion_coefficients)
print("Stereo calib")
flags = 0
flags |= cv2.CALIB_FIX_INTRINSIC
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same
criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
retS, new_mtxL, distL, new_mtxR, distR, Rot, Trns, Emat, Fmat = cv2.stereoCalibrate(object_points_array, image_points_array1, image_points_array2, new_mtxL, distL, new_mtxR, distR, imgL_gray.shape[::-1], criteria_stereo, flags)

View File

@ -1,54 +0,0 @@
# Set the path to the images captured by the left and right cameras
import cv2
import numpy as np
import tqdm as tqdm
pathL = "/home/ros/Bureau/ca_ur5/1.jpg"
pathR = "/home/ros/Bureau/ca_ur5/2.jpg"
# Termination criteria for refining the detected corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((8*5,3), np.float32)
objp[:,:2] = np.mgrid[0:8,0:5].T.reshape(-1,2)
img_ptsL = []
img_ptsR = []
obj_pts = []
for i in tqdm(range(1,12)):
imgL = cv2.imread(pathL+"img%d.png"%i)
imgR = cv2.imread(pathR+"img%d.png"%i)
imgL_gray = cv2.imread(pathL+"img%d.png"%i,0)
imgR_gray = cv2.imread(pathR+"img%d.png"%i,0)
outputL = imgL.copy()
outputR = imgR.copy()
retR, cornersR = cv2.findChessboardCorners(outputR,(8,5),None)
retL, cornersL = cv2.findChessboardCorners(outputL,(8,5),None)
if retR and retL:
obj_pts.append(objp)
cv2.cornerSubPix(imgR_gray,cornersR,(11,11),(-1,-1),criteria)
cv2.cornerSubPix(imgL_gray,cornersL,(11,11),(-1,-1),criteria)
cv2.drawChessboardCorners(outputR,(8,5),cornersR,retR)
cv2.drawChessboardCorners(outputL,(8,5),cornersL,retL)
cv2.imshow('cornersR',outputR)
cv2.imshow('cornersL',outputL)
cv2.waitKey(0)
img_ptsL.append(cornersL)
img_ptsR.append(cornersR)
# Calibrating left camera
retL, mtxL, distL, rvecsL, tvecsL = cv2.calibrateCamera(obj_pts,img_ptsL,imgL_gray.shape[::-1],None,None)
hL,wL= imgL_gray.shape[:2]
new_mtxL, roiL= cv2.getOptimalNewCameraMatrix(mtxL,distL,(wL,hL),1,(wL,hL))
# Calibrating right camera
retR, mtxR, distR, rvecsR, tvecsR = cv2.calibrateCamera(obj_pts,img_ptsR,imgR_gray.shape[::-1],None,None)
hR,wR= imgR_gray.shape[:2]
new_mtxR, roiR= cv2.getOptimalNewCameraMatrix(mtxR,distR,(wR,hR),1,(wR,hR))

View File

@ -13,14 +13,14 @@ print("Extracting image coordinates of respective 3D pattern ....\n")
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((8*5,3), np.float32)
objp[:,:2] = np.mgrid[0:8,0:5].T.reshape(-1,2)
objp = np.zeros((21*15,3), np.float32)
objp[:,:2] = np.mgrid[0:21,0:15].T.reshape(-1,2)
img_ptsL = []
img_ptsR = []
obj_pts = []
for i in tqdm(range(1,5)):
for i in tqdm(range(1,6)): #number of images
imgL = cv2.imread(pathL+"L%d.jpg"%i)
imgR = cv2.imread(pathR+"R%d.jpg"%i)
imgL_gray = cv2.imread(pathL+"L%d.jpg"%i,0)
@ -30,16 +30,22 @@ for i in tqdm(range(1,5)):
outputL = imgL.copy()
outputR = imgR.copy()
retR, cornersR = cv2.findChessboardCorners(outputR,(8,5),None)
retL, cornersL = cv2.findChessboardCorners(outputL,(8,5),None)
retR, cornersR = cv2.findChessboardCorners(outputR,(21,15),None)
retL, cornersL = cv2.findChessboardCorners(outputL,(21,15),None)
print("ret R is",retR)
print("ret L is",retL)
if retR and retL:
obj_pts.append(objp)
cv2.cornerSubPix(imgR_gray,cornersR,(11,11),(-1,-1),criteria)
cv2.cornerSubPix(imgL_gray,cornersL,(11,11),(-1,-1),criteria)
cv2.drawChessboardCorners(outputR,(8,5),cornersR,retR)
cv2.drawChessboardCorners(outputL,(8,5),cornersL,retL)
cv2.drawChessboardCorners(outputR,(21,15),cornersR,retR)
cv2.drawChessboardCorners(outputL,(21,15),cornersL,retL)
cv2.imshow('cornersR',outputR)
cv2.imshow('cornersL',outputL)
cv2.waitKey(0)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 69 KiB

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 63 KiB

After

Width:  |  Height:  |  Size: 93 KiB

BIN
damier/Left/L5.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 64 KiB

After

Width:  |  Height:  |  Size: 100 KiB

BIN
damier/Right/R5.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 95 KiB

File diff suppressed because it is too large Load Diff

View File

@ -4,8 +4,8 @@ import cv2
# Check for left and right camera IDs
# These values can change depending on the system
CamL_id = 2# Camera ID for left camera
CamR_id = 0# Camera ID for right camera
CamL_id = 0# Camera ID for left camera
CamR_id = 2# Camera ID for right camera
#CamL= cv2.VideoCapture(CamL_id)
#CamR= cv2.VideoCapture(CamR_id)
@ -15,7 +15,7 @@ CamR_id = 0# Camera ID for right camera
retL, imgL= cv2.VideoCapture(CamL_id, cv2.CAP_V4L2).read()
retR, imgR= cv2.VideoCapture(CamR_id, cv2.CAP_V4L2).read()
print("les ret c'est",retL, retR)
imgR_gray = cv2.cvtColor(imgR,cv2.COLOR_BGR2GRAY)
imgL_gray = cv2.cvtColor(imgL,cv2.COLOR_BGR2GRAY)
@ -23,7 +23,7 @@ print("la c bon")
# Reading the mapping values for stereo image rectification
cv_file = cv2.FileStorage("data/params_py.xml", cv2.FILE_STORAGE_READ)
Left_Stereo_Map_x = cv_file.getNode("Left_Stereo_Map_x").mat()
print(Left_Stereo_Map_x)
#print(Left_Stereo_Map_x)
Left_Stereo_Map_y = cv_file.getNode("Left_Stereo_Map_y").mat()
Right_Stereo_Map_x = cv_file.getNode("Right_Stereo_Map_x").mat()
Right_Stereo_Map_y = cv_file.getNode("Right_Stereo_Map_y").mat()
@ -132,7 +132,7 @@ while True:
CamL= cv2.VideoCapture(CamL_id)
CamR= cv2.VideoCapture(CamR_id)
print("Saving depth estimation paraeters ......")
print("Saving depth estimation parameters ......")
cv_file = cv2.FileStorage("data/depth_estmation_params_py.xml", cv2.FILE_STORAGE_WRITE)
cv_file.write("blockSize",blockSize)

View File

@ -1,12 +0,0 @@
import cv2
import numpy as np
flags = 0
flags |= cv2.CALIB_FIX_INTRINSIC
# Here we fix the intrinsic camara matrixes so that only Rot, Trns, Emat and Fmat are calculated.
# Hence intrinsic parameters are the same
criteria_stereo= (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# This step is performed to transformation between the two cameras and calculate Essential and Fundamenatl matrix
retS, new_mtxL, distL, new_mtxR, distR, Rot, Trns, Emat, Fmat = cv2.stereoCalibrate(obj_pts, img_ptsL, img_ptsR, new_mtxL, distL, new_mtxR, distR, imgL_gray.shape[::-1], criteria_stereo, flags)