雙CCD 測距 opencv python
雙CCD 測距 opencv python
資料來源: Copilot
在使用OpenCV和Python進行雙CCD測距的過程中,主要涉及以下步驟:
01.雙目標定:獲取左右兩個相機的內參、外參和畸變係數。
02.畸變校正:消除透鏡引起的畸變,以獲得更準確的圖像。
03.立體校正:對左右視圖進行數學上的投影變換,使得成像平面平行於基線。
04.立體匹配與視差圖計算:爲左圖中的每一個像素在右圖中找到對應點,計算視差。
05.深度圖計算:利用視差和相機參數計算深度信息。
這些步驟需要編寫相應的Python代碼來實現。下面是一個簡化的代碼示例,展示了如何使用OpenCV進行雙目測距的基本流程:
code
import cv2
import numpy as np
# 預處理函數
def preprocess(img1, img2):
# 轉換爲灰度圖並進行直方圖均衡
im1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
im2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
im1 = cv2.equalizeHist(im1)
im2 = cv2.equalizeHist(im2)
return im1, im2
# 畸變校正函數
def undistortion(image, camera_matrix, dist_coeff):
undistortion_image = cv2.undistort(image, camera_matrix, dist_coeff)
return undistortion_image
# 立體匹配函數
def stereo_matching(img1, img2):
# 使用SGBM算法進行立體匹配
stereo = cv2.StereoSGBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(img1, img2)
return disparity
# 主函數
def main():
# 讀取圖像
img_left = cv2.imread('left.jpg')
img_right = cv2.imread('right.jpg')
# 預處理
img1, img2 = preprocess(img_left, img_right)
# 畸變校正(此處省略了相機參數)
# img1 = undistortion(img1, camera_matrix, dist_coeff)
# img2 = undistortion(img2, camera_matrix, dist_coeff)
# 立體匹配
disparity = stereo_matching(img1, img2)
# 顯示視差圖
cv2.imshow('Disparity', np.uint8(disparity))
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
code[請不要省略步驟 給我詳細完整範例程式]
import numpy as np
import cv2
from matplotlib import pyplot as plt
# 參數設定
square_size = 1.0 # 棋盤格方塊的大小
pattern_size = (9, 6) # 棋盤格的大小
path_left = 'left/*.jpg' # 左相機圖像的路徑
path_right = 'right/*.jpg' # 右相機圖像的路徑
# 準備物體點: (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((np.prod(pattern_size), 3), np.float32)
objp[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
objp *= square_size
# 陣列來存放物體點和圖像點
objpoints = [] # 真實世界中的3D點
imgpoints_l = [] # 左圖像中的2D點
imgpoints_r = [] # 右圖像中的2D點
# 讀取圖像並找到棋盤格角點
images_left = glob.glob(path_left)
images_right = glob.glob(path_right)
for i, fname in enumerate(images_left):
img_l = cv2.imread(fname)
img_r = cv2.imread(images_right[i])
gray_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
gray_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)
# 找到棋盤格角點
ret_l, corners_l = cv2.findChessboardCorners(gray_l, pattern_size)
ret_r, corners_r = cv2.findChessboardCorners(gray_r, pattern_size)
# 如果找到足夠點對,將其存儲起來
if ret_l and ret_r:
objpoints.append(objp)
corners2_l = cv2.cornerSubPix(gray_l, corners_l, (11, 11), (-1, -1), criteria)
imgpoints_l.append(corners2_l)
corners2_r = cv2.cornerSubPix(gray_r, corners_r, (11, 11), (-1, -1), criteria)
imgpoints_r.append(corners2_r)
# 繪製並顯示角點
cv2.drawChessboardCorners(img_l, pattern_size, corners2_l, ret_l)
cv2.drawChessboardCorners(img_r, pattern_size, corners2_r, ret_r)
cv2.imshow('img left', img_l)
cv2.imshow('img right', img_r)
cv2.waitKey(500)
cv2.destroyAllWindows()
# 相機標定
ret_l, mtx_l, dist_l, rvecs_l, tvecs_l = cv2.calibrateCamera(objpoints, imgpoints_l, gray_l.shape[::-1], None, None)
ret_r, mtx_r, dist_r, rvecs_r, tvecs_r = cv2.calibrateCamera(objpoints, imgpoints_r, gray_r.shape[::-1], None, None)
# 立體校正
flags = 0
flags |= cv2.CALIB_FIX_INTRINSIC
# flags |= cv2.CALIB_FIX_PRINCIPAL_POINT
flags |= cv2.CALIB_USE_INTRINSIC_GUESS
flags |= cv2.CALIB_FIX_FOCAL_LENGTH
flags |= cv2.CALIB_FIX_ASPECT_RATIO
flags |= cv2.CALIB_ZERO_TANGENT_DIST
flags |= cv2.CALIB_RATIONAL_MODEL
flags |= cv2.CALIB_SAME_FOCAL_LENGTH
flags |= cv2.CALIB_FIX_K3
flags |= cv2.CALIB_FIX_K4
flags |= cv2.CALIB_FIX_K5
stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
cv2.TERM_CRITERIA_EPS, 100, 1e-5)
ret, mtx_l, dist_l, mtx_r, dist_r, R, T, E, F = cv2.stereoCalibrate(
objpoints, imgpoints_l, imgpoints_r, mtx_l, dist_l, mtx_r, dist_r, gray_l.shape[::-1], criteria=stereocalib_criteria, flags=flags)
# 立體校正映射
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(
mtx_l, dist_l, mtx_r, dist_r, gray_l.shape[::-1], R, T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0))
map1_l, map2_l = cv2.initUndistortRectifyMap(
mtx_l, dist_l, R1, P1, gray_l.shape[::-1], cv2.CV_16SC2)
map1_r, map2_r = cv2.initUndistortRectifyMap(
mtx_r, dist_r, R2, P2, gray_r.shape[::-1], cv2.CV_16SC2)
# 載入測試圖像
img_l = cv2.imread(images_left[0])
img_r = cv2.imread(images_right[0])
# 畸變校正和立體校正
img_l_rect = cv2.remap(img_l, map1_l, map2_l, cv2.INTER_LINEAR)
img_r_rect = cv2.remap(img_r, map1_r, map2_r, cv2.INTER_LINEAR)
# 計算視差
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(img_l_rect, img_r_rect)
# 計算深度
h, w = img_l_rect.shape[:2]
f = 0.8*w # 假設焦距
Q = np.float32([[1, 0, 0, -0.5*w],
[0,-1, 0, 0.5*h], # 調整坐標系,使其與左相機坐標系一致
[0, 0, 0, -f], # 設置焦距
[0, 0, 1, 0]])
points = cv2.reprojectImageTo3D(disparity, Q)
colors = cv2.cvtColor(img_l_rect, cv2.COLOR_BGR2RGB)
mask = disparity > disparity.min()
out_points = points[mask]
out_colors = colors[mask]
# 輸出到PLY
output_file = 'out.ply'
with open(output_file, 'w') as f:
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write('element vertex %d\n' % len(out_points))
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
f.write('property uchar red\n')
f.write('property uchar green\n')
f.write('property uchar blue\n')
f.write('end_header\n')
for p, c in zip(out_points, out_colors):
f.write('%f %f %f %d %d %d\n' % (p[0], p[1], p[2], c[2], c[1], c[0]))
# 顯示結果
plt.imshow(disparity,'gray')
plt.show()