繁体   English   中英

立体相机结构光视差问题

[英]Stereo camera structured light disparity problem

我想用立体相机和 LED 投影仪构建 3d 扫描仪。 我校准了立体相机并创建了机械系统。 我投影格雷码并用两个相机捕捉它们。 我对图像进行解码并拍摄对应图像。 但我无法对解码点进行三角测量。 这是我生成模式和解码它们的参考项目。

参考Github项目

这是我的代码

import numpy as np
import cv2
import structuredlight as sl


def generate_rectify_data(calib_data, size):
    XML_File = cv2.FileStorage(calib_data, cv2.FILE_STORAGE_READ)

    M1 = XML_File.getNode('Camera_Matrix_Left').mat()
    M2 = XML_File.getNode('Camera_Matrix_Right').mat()
    d1 = XML_File.getNode('Camera_Distortion_Left').mat()
    d2 = XML_File.getNode('Camera_Distortion_Right').mat()
    R = XML_File.getNode('Rotation_Matrix').mat()
    t = XML_File.getNode('Translation_Matrix').mat()

    flag = cv2.CALIB_ZERO_DISPARITY
    
    R1, R2, P1, P2, Q = cv2.stereoRectify(cameraMatrix1=M1, cameraMatrix2=M2, 
    distCoeffs1=d1, distCoeffs2=d2, R=R, T=t, flags=flag, alpha=-1, imageSize=size, 
        newImageSize=size)[0:5]

    map_x_l, map_y_l = cv2.initUndistortRectifyMap(M1, d1, R1, P1, size, cv2.CV_32FC1)
    map_x_r, map_y_r = cv2.initUndistortRectifyMap(M2, d2, R2, P2, size, cv2.CV_32FC1)

    return map_x_l, map_y_l, map_x_r, map_y_r, P1, P2,Q


def rectify(img, map_x, map_y):
    res = cv2.remap(img, map_x, map_y, cv2.INTER_LINEAR, cv2.BORDER_CONSTANT)
    return res


"""
***I generate pattern like this and capture them in another python script***

W = 240
H = 240

gray = sl.Gray()

imlist_posi_x_pat = gray.generate((W, H))
imlist_posi_y_pat = sl.transpose(gray.generate((H, W)))

"""


if __name__ == '__main__':
    img_size = (1648, 1232)

    map_x_l, map_y_l, map_x_r, map_y_r, P1, P2, Q = generate_rectify_data(
    


    "C:/Users/XXX/PycharmProjects/Stereo_Structured_Light/Calibration_Data"
        "/Calibration_Parameters_1.xml", size=(1648, 1232))
    rect_list_l, rect_list_r = [], []

    imlist_posi_x_cap_R = []
    imlist_posi_y_cap_R = []
    imlist_posi_x_cap_L = []
    imlist_posi_y_cap_L = []

    for i in range(0,16):
        img_l = cv2.imread("C:/OxO_Scan/Images_1/Left_cam3/L_" + str(i) + ".png", 0)
        img_r = cv2.imread("C:/OxO_Scan/Images_1/Right_cam3/R_" + str(i) + ".png", 0)

        l_rect = rectify(img_l, map_x_l, map_y_l)
        r_rect = rectify(img_r, map_x_r, map_y_r)

        if i < 8:  # 8 for the horizontal, 8 for the vertical pattern images
            imlist_posi_x_cap_R.append(r_rect)
            imlist_posi_x_cap_L.append(l_rect)
        else:
            imlist_posi_y_cap_R.append(r_rect)
            imlist_posi_y_cap_L.append(l_rect)

    W = 240
    H = 240

    gray = sl.Gray()
    img_index_x_R = gray.decode(imlist_posi_x_cap_R, thresh=40)
    img_index_x_L = gray.decode(imlist_posi_x_cap_L, thresh=40)

    img_index_y_R = gray.decode(imlist_posi_y_cap_R, thresh=40)
    img_index_y_L = gray.decode(imlist_posi_y_cap_L, thresh=40)

    img_correspondence_x_r = cv2.merge([0.0 * np.zeros_like(img_index_x_R), 
               img_index_x_R / W, img_index_y_R / H])
    img_correspondence_r = np.clip(img_correspondence_x_r * 255.0, 0, 
               255).astype(np.uint8)

    img_correspondence_y_l = cv2.merge([0.0 * np.zeros_like(img_index_x_L), 
               img_index_x_L / W, img_index_y_L / H])
    img_correspondence_l = np.clip(img_correspondence_y_l * 255.0, 0, 
               255).astype(np.uint8)

    ####################################

    cv2.imshow("a", cv2.resize(img_correspondence_l, (640, 480)))
    cv2.imshow("b", cv2.resize(img_correspondence_r, (640, 480)))

    cv2.waitKey()
    cv2.destroyAllWindows()

    img_correspondence_L_2 = np.copy(img_correspondence_l)
    img_correspondence_R_2 = np.copy(img_correspondence_r)

    cam_pts_l, cam_pts_r = [], []
    cam_pts_l2, cam_pts_r2 = [], []

    for i in range(img_correspondence_l.shape[0]):
        for j in range(img_correspondence_l.shape[1]):
            if (img_correspondence_l[i][j] != 0).any():
                qwert = img_index_x_L[i][j]

                cam_pts_l.append([j, i])
                cam_pts_r.append([j+qwert, i])

    cam_pts_l = np.array(cam_pts_l, dtype=np.float32)
    cam_pts_r = np.array(cam_pts_r, dtype=np.float32)

    cam_pts_l = np.array(cam_pts_l)[:, np.newaxis, :]
    cam_pts_r = np.array(cam_pts_r)[:, np.newaxis, :]

    pts4D = cv2.triangulatePoints(P1, P2, np.float32(cam_pts_l),np.float32(cam_pts_r)).T
    pts3D = (pts4D[:, :3] / pts4D[:, -1:])

我不知道在循环代码中要做什么“for i in range(img_correspondence_l.shape[0]):”例如,我找不到使用左侧相机图像中的左侧对应图像找到的点对应的位置在右侧的相机图像中。 j+img_index_x_L[i][j] 没有给出正确的结果。 我应该在这里做什么

感谢您的回答

暂无
暂无

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM