Vision work added
|
|
@ -0,0 +1,49 @@
|
|||
import cv2
|
||||
import os
|
||||
|
||||
# Set the camera index (change to the appropriate index if needed)
|
||||
camera_index = 1
|
||||
|
||||
# Create a directory to save captured images
|
||||
output_dir = 'camera1_images'
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Initialize the camera
|
||||
cap = cv2.VideoCapture(camera_index)
|
||||
|
||||
# Check if the camera is opened successfully
|
||||
if not cap.isOpened():
|
||||
print(f"Error: Could not open camera {camera_index}")
|
||||
exit()
|
||||
i = 0
|
||||
# Capture and save 12 images
|
||||
while i < 12:
|
||||
# Capture a frame from the camera
|
||||
ret, frame = cap.read()
|
||||
|
||||
# Check if the frame is captured successfully
|
||||
if not ret:
|
||||
print("Error: Could not read frame")
|
||||
break
|
||||
|
||||
# Display the captured image
|
||||
cv2.imshow('Captured Image', frame)
|
||||
|
||||
# Wait for a key event (0 means wait indefinitely)
|
||||
key = cv2.waitKey(5) & 0xFF
|
||||
|
||||
# Save the captured image if the 's' key is pressed
|
||||
if key == ord('s'):
|
||||
img_path = os.path.join(output_dir, f'captured_image_{i+1}.jpg')
|
||||
cv2.imwrite(img_path, frame)
|
||||
print(f"Image {i+1} saved: {img_path}")
|
||||
i += 1
|
||||
|
||||
# If 'q' key is pressed, exit the loop
|
||||
elif key == ord('q'):
|
||||
break
|
||||
|
||||
# Release the camera and close all OpenCV windows
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
print("Image capture complete.")
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
import numpy as np
|
||||
import cv2 as cv
|
||||
import glob
|
||||
|
||||
# Termination criteria
|
||||
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||
|
||||
# Prepare object points, assuming a chessboard with 9 by 6 squares of 30mm
|
||||
square_size = 30 # in millimeters
|
||||
objp = np.zeros((5 * 8, 3), np.float32)
|
||||
objp[:, :2] = np.mgrid[0:8, 0:5].T.reshape(-1, 2) * square_size
|
||||
|
||||
# Arrays to store object points and image points from all the images.
|
||||
objpoints = [] # 3D point in real-world space
|
||||
imgpoints = [] # 2D points in image plane.
|
||||
images = glob.glob('camera1_images/*.jpg')
|
||||
|
||||
for fname in images:
|
||||
img = cv.imread(fname)
|
||||
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
|
||||
|
||||
# Find the chessboard corners
|
||||
ret, corners = cv.findChessboardCorners(gray, (8, 5), None)
|
||||
# If found, add object points, image points (after refining them)
|
||||
if ret == True:
|
||||
objpoints.append(objp)
|
||||
corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
|
||||
imgpoints.append(corners2)
|
||||
# Draw and display the corners
|
||||
cv.drawChessboardCorners(img, (8, 5), corners2, ret)
|
||||
cv.imshow('img', img)
|
||||
cv.waitKey(400)
|
||||
|
||||
cv.destroyAllWindows()
|
||||
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import cv2
|
||||
import threading
|
||||
|
||||
# Function to continuously read frames from a camera and display them
|
||||
def display_frames(cap, window_name):
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
print(f"Error reading frame from {window_name} camera.")
|
||||
break
|
||||
|
||||
cv2.imshow(window_name, frame)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
# Open two video capture objects for each camera
|
||||
cap_left = cv2.VideoCapture(1) # Adjust the index if needed
|
||||
cap_right = cv2.VideoCapture(2) # Adjust the index if needed
|
||||
|
||||
# Check if the cameras opened successfully
|
||||
if not cap_left.isOpened() or not cap_right.isOpened():
|
||||
print("Error: Couldn't open one or both cameras.")
|
||||
exit()
|
||||
|
||||
# Set the width and height of the video capture (adjust as needed)
|
||||
cap_left.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap_left.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
cap_right.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap_right.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
|
||||
# Create threads for each camera
|
||||
thread_left = threading.Thread(target=display_frames, args=(cap_left, 'Left Camera'))
|
||||
thread_right = threading.Thread(target=display_frames, args=(cap_right, 'Right Camera'))
|
||||
|
||||
# Start the threads
|
||||
thread_left.start()
|
||||
thread_right.start()
|
||||
|
||||
# Wait for the threads to finish (when 'q' is pressed)
|
||||
thread_left.join()
|
||||
thread_right.join()
|
||||
|
||||
# Release the video capture objects and close the OpenCV windows
|
||||
cap_left.release()
|
||||
cap_right.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
from pypot.creatures import PoppyErgoJr
|
||||
|
||||
jr = PoppyErgoJr()
|
||||
jr.m3.goal_position = 30
|
||||
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 83 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 86 KiB |
|
After Width: | Height: | Size: 87 KiB |
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 72 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 73 KiB |
|
After Width: | Height: | Size: 70 KiB |
|
After Width: | Height: | Size: 71 KiB |
|
After Width: | Height: | Size: 71 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 76 KiB |
|
|
@ -0,0 +1,20 @@
|
|||
|
||||
import cv2
|
||||
|
||||
cam_available = []
|
||||
|
||||
for i in range(10): # Try indices from 0 to 9
|
||||
cap = cv2.VideoCapture(i)
|
||||
if cap.isOpened():
|
||||
print(f"Camera found at index {i}")
|
||||
cam_available.append(i)
|
||||
cap.release()
|
||||
if len(cam_available) > 2:
|
||||
break
|
||||
|
||||
if len(cam_available) > 2 and cam_available[0] == 0:
|
||||
cam1 = cam_available[1]
|
||||
cam2 = cam_available[2]
|
||||
else:
|
||||
cam1 = cam_available[0]
|
||||
cam2 = cam_available[1]
|
||||
|
After Width: | Height: | Size: 83 KiB |
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 80 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 83 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 76 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 80 KiB |
|
|
@ -0,0 +1,44 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
# Open two video capture objects for each camera
|
||||
cap_left = cv2.VideoCapture(1) # Adjust the index if needed
|
||||
cap_right = cv2.VideoCapture(2) # Adjust the index if needed
|
||||
|
||||
# Check if the cameras opened successfully
|
||||
if not cap_left.isOpened() or not cap_right.isOpened():
|
||||
print("Error: Couldn't open one or both cameras.")
|
||||
exit()
|
||||
|
||||
# Set the width and height of the video capture (adjust as needed)
|
||||
cap_left.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap_left.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
cap_right.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap_right.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
|
||||
while True:
|
||||
# Read frames from both cameras
|
||||
ret_left, frame_left = cap_left.read()
|
||||
ret_right, frame_right = cap_right.read()
|
||||
|
||||
# Break the loop if either of the cameras fails to read a frame
|
||||
if not ret_left or not ret_right:
|
||||
print("Error: Couldn't read frames from one or both cameras.")
|
||||
break
|
||||
|
||||
# Display the frames side by side for stereo effect
|
||||
stereo_frame = cv2.hconcat([frame_left, frame_right])
|
||||
|
||||
# Display the stereo frame
|
||||
cv2.imshow('Stereo Camera Feed', stereo_frame)
|
||||
|
||||
# Break the loop if 'q' key is pressed
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
# Release the video capture objects and close the OpenCV window
|
||||
cap_left.release()
|
||||
cap_right.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
@ -0,0 +1,243 @@
|
|||
import numpy as np
|
||||
import cv2 as cv
|
||||
import glob
|
||||
import os
|
||||
|
||||
# Here is a litle help:
|
||||
# https://temugeb.github.io/opencv/python/2021/02/02/stereo-camera-calibration-and-triangulation.html
|
||||
|
||||
def find_camera(find_flag):
|
||||
if find_flag:
|
||||
cam_available = []
|
||||
for i in range(10): # Try indices from 0 to 9
|
||||
cap = cv.VideoCapture(i)
|
||||
if cap.isOpened():
|
||||
print(f"Camera found at index {i}")
|
||||
cam_available.append(i)
|
||||
cap.release()
|
||||
if len(cam_available) > 2:
|
||||
break
|
||||
|
||||
if len(cam_available) > 2 and cam_available[0] == 0:
|
||||
cam1 = cam_available[1]
|
||||
cam2 = cam_available[2]
|
||||
else:
|
||||
cam1 = cam_available[0]
|
||||
cam2 = cam_available[1]
|
||||
else:
|
||||
cam1 = 1
|
||||
cam2 = 2
|
||||
return cam1, cam2
|
||||
def img_capture(camera_num):
|
||||
# Create a directory to save captured images
|
||||
output_dir = f"camera{camera_num}_images"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Initialize the camera
|
||||
cap = cv.VideoCapture(camera_num)
|
||||
|
||||
# Check if the camera is opened successfully
|
||||
if not cap.isOpened():
|
||||
print(f"Error: Could not open camera {camera_num}")
|
||||
exit()
|
||||
i = 0
|
||||
# Capture and save 12 images
|
||||
while i < 12:
|
||||
# Capture a frame from the camera
|
||||
ret, frame = cap.read()
|
||||
|
||||
# Check if the frame is captured successfully
|
||||
if not ret:
|
||||
print("Error: Could not read frame")
|
||||
break
|
||||
|
||||
# Display the captured image
|
||||
cv.imshow('Captured Image', frame)
|
||||
|
||||
# Save the captured image if the 's' key is pressed
|
||||
key = cv.waitKey(5) & 0xFF
|
||||
if key == ord('s'):
|
||||
img_path = os.path.join(output_dir, f'image_{i+1}.jpg')
|
||||
cv.imwrite(img_path, frame)
|
||||
print(f"Image {i+1} saved: {img_path}")
|
||||
i += 1
|
||||
|
||||
# If 'q' key is pressed, exit the loop
|
||||
elif key == ord('q'):
|
||||
break
|
||||
|
||||
# Release the camera and close all OpenCV windows
|
||||
cap.release()
|
||||
cv.destroyAllWindows()
|
||||
print("Image capture complete.")
|
||||
|
||||
return
|
||||
def single_calibration(camera_num, img_cap):
|
||||
if img_cap:
|
||||
img_capture(camera_num)
|
||||
# Termination criteria
|
||||
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
|
||||
|
||||
# Prepare object points, assuming a chessboard with 9 by 6 squares of 30mm
|
||||
square_size = 30 # in millimeters
|
||||
objp = np.zeros((5 * 8, 3), np.float32)
|
||||
objp[:, :2] = np.mgrid[0:8, 0:5].T.reshape(-1, 2) * square_size
|
||||
|
||||
# Arrays to store object points and image points from all the images.
|
||||
objpoints = [] # 3D point in real-world space
|
||||
imgpoints = [] # 2D points in image plane.
|
||||
images = glob.glob(f'camera{camera_num}_images/*.jpg')
|
||||
|
||||
for frame in images:
|
||||
img = cv.imread(frame)
|
||||
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
|
||||
|
||||
# Find the chessboard corners
|
||||
ret, corners = cv.findChessboardCorners(gray, (8, 5), None)
|
||||
|
||||
# If found, add object points, image points (after refining them)
|
||||
if ret == True:
|
||||
objpoints.append(objp)
|
||||
corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
|
||||
imgpoints.append(corners2)
|
||||
# Draw and display the corners
|
||||
cv.drawChessboardCorners(img, (8, 5), corners2, ret)
|
||||
cv.imshow('img', img)
|
||||
cv.waitKey(400)
|
||||
|
||||
cv.destroyAllWindows()
|
||||
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, (gray.shape[1], gray.shape[0]), None, None)
|
||||
|
||||
return mtx, dist
|
||||
|
||||
def stereo_capture():
|
||||
# Open two video capture objects for each camera
|
||||
cap_left = cv.VideoCapture(1) # Adjust the index if needed
|
||||
cap_right = cv.VideoCapture(2) # Adjust the index if needed
|
||||
|
||||
# Check if the cameras opened successfully
|
||||
if not cap_left.isOpened() or not cap_right.isOpened():
|
||||
print("Error: Couldn't open one or both cameras.")
|
||||
exit()
|
||||
|
||||
# Set the width and height of the video capture (adjust as needed)
|
||||
cap_left.set(cv.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap_left.set(cv.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
cap_right.set(cv.CAP_PROP_FRAME_WIDTH, 640)
|
||||
cap_right.set(cv.CAP_PROP_FRAME_HEIGHT, 480)
|
||||
|
||||
# Create a directory to save images
|
||||
output_dir = 'stereo_images'
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
frame_counter = 0
|
||||
while frame_counter < 12:
|
||||
# Read frames from both cameras
|
||||
ret_left, frame_left = cap_left.read()
|
||||
ret_right, frame_right = cap_right.read()
|
||||
|
||||
# Break the loop if either of the cameras fails to read a frame
|
||||
if not ret_left or not ret_right:
|
||||
print("Error: Couldn't read frames from one or both cameras.")
|
||||
break
|
||||
|
||||
# Display the frames side by side for stereo effect
|
||||
stereo_frame = cv.hconcat([frame_left, frame_right])
|
||||
cv.imshow('Stereo Camera Feed', stereo_frame)
|
||||
|
||||
|
||||
# Save the captured image if the 's' key is pressed
|
||||
key = cv.waitKey(5) & 0xFF
|
||||
if key == ord('s'):
|
||||
# Save the frames from both cameras
|
||||
frame_counter += 1
|
||||
img_path_left = os.path.join(output_dir, f'{frame_counter}_left_image.jpg')
|
||||
img_path_right = os.path.join(output_dir, f'{frame_counter}_right_image.jpg')
|
||||
cv.imwrite(img_path_left, frame_left)
|
||||
cv.imwrite(img_path_right, frame_right)
|
||||
print(f"Image {frame_counter+1} saved")
|
||||
|
||||
# Break the loop if 'q' key is pressed
|
||||
if cv.waitKey(1) & 0xFF == ord('q'):
|
||||
break
|
||||
|
||||
# Release the video capture objects and close the OpenCV window
|
||||
cap_left.release()
|
||||
cap_right.release()
|
||||
cv.destroyAllWindows()
|
||||
|
||||
#stereo_capture()
|
||||
|
||||
def stereo_calibration(mtx1, dist1, mtx2, dist2, frames_folder):
|
||||
#read the synched frames
|
||||
images_names = glob.glob(frames_folder)
|
||||
images_names = sorted(images_names)
|
||||
c1_images_names = images_names[:len(images_names)//2]
|
||||
c2_images_names = images_names[len(images_names)//2:]
|
||||
|
||||
c1_images = []
|
||||
c2_images = []
|
||||
for im1, im2 in zip(c1_images_names, c2_images_names):
|
||||
_im = cv.imread(im1, 1)
|
||||
c1_images.append(_im)
|
||||
|
||||
_im = cv.imread(im2, 1)
|
||||
c2_images.append(_im)
|
||||
|
||||
#change this if stereo calibration not good.
|
||||
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
|
||||
|
||||
rows = 5 #number of checkerboard rows.
|
||||
columns = 8 #number of checkerboard columns.
|
||||
world_scaling = 1. #change this to the real world square size. Or not.
|
||||
|
||||
#coordinates of squares in the checkerboard world space
|
||||
objp = np.zeros((rows*columns,3), np.float32)
|
||||
objp[:,:2] = np.mgrid[0:rows,0:columns].T.reshape(-1,2)
|
||||
objp = world_scaling* objp
|
||||
|
||||
#frame dimensions. Frames should be the same size.
|
||||
width = c1_images[0].shape[1]
|
||||
height = c1_images[0].shape[0]
|
||||
|
||||
#Pixel coordinates of checkerboards
|
||||
imgpoints_left = [] # 2d points in image plane.
|
||||
imgpoints_right = []
|
||||
|
||||
#coordinates of the checkerboard in checkerboard world space.
|
||||
objpoints = [] # 3d point in real world space
|
||||
|
||||
for frame1, frame2 in zip(c1_images, c2_images):
|
||||
gray1 = cv.cvtColor(frame1, cv.COLOR_BGR2GRAY)
|
||||
gray2 = cv.cvtColor(frame2, cv.COLOR_BGR2GRAY)
|
||||
c_ret1, corners1 = cv.findChessboardCorners(gray1, (5, 8), None)
|
||||
c_ret2, corners2 = cv.findChessboardCorners(gray2, (5, 8), None)
|
||||
|
||||
if c_ret1 == True and c_ret2 == True:
|
||||
corners1 = cv.cornerSubPix(gray1, corners1, (11, 11), (-1, -1), criteria)
|
||||
corners2 = cv.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), criteria)
|
||||
|
||||
cv.drawChessboardCorners(frame1, (5,8), corners1, c_ret1)
|
||||
cv.imshow('img', frame1)
|
||||
|
||||
cv.drawChessboardCorners(frame2, (5,8), corners2, c_ret2)
|
||||
cv.imshow('img2', frame2)
|
||||
k = cv.waitKey(500)
|
||||
|
||||
objpoints.append(objp)
|
||||
imgpoints_left.append(corners1)
|
||||
imgpoints_right.append(corners2)
|
||||
|
||||
stereocalibration_flags = cv.CALIB_FIX_INTRINSIC
|
||||
ret, CM1, dist1, CM2, dist2, R, T, E, F = cv.stereoCalibrate(objpoints, imgpoints_left, imgpoints_right, mtx1, dist1, mtx2, dist2, (width, height), criteria = criteria, flags = stereocalibration_flags)
|
||||
|
||||
print(ret)
|
||||
return R, T
|
||||
|
||||
#R, T = stereo_calibration(mtx1, dist1, mtx2, dist2, 'stereo_images/*')
|
||||
|
||||
|
||||
cam1, cam2 = find_camera(find_flag = True)
|
||||
|
||||
mtx1, dist1 = single_calibration(camera_num = cam1, img_cap = False)
|
||||
mtx2, dist2 = single_calibration(camera_num = cam2, img_cap = False)
|
||||