简体   繁体   中英

C++ Kinect v2 & freenect2: how to convert depth data to real world coordinates

I am trying to compute real world xyz coordinates using a Kinect v2 camera (in Linux), but my computation give me wrong results.

Here is the code:

cv::Point3f xyzWorld={0.0f};

xyzWorld.z = pointDepth;
xyzWorld.x = (float) ((float)x -(depthcx)) * xyzWorld.z / depthfx;
xyzWorld.y = (float) ((float)y - (depthcy)) * xyzWorld.z / depthfy;
xyzWorld.z = pointDepth;

return xyzWorld;

I think the problem is due to the depth value of fx , fy , cx and cy .

Can someone help me?

I am using freenect2.

Why not just use the OpenNi implementation

 OniStatus VideoStream::convertDepthToWorldCoordinates(float depthX, float depthY, float depthZ, float* pWorldX, float* pWorldY, float* pWorldZ)
{
    if (m_pSensorInfo->sensorType != ONI_SENSOR_DEPTH)
    {
        m_errorLogger.Append("convertDepthToWorldCoordinates: Stream is not from DEPTH\n");
        return ONI_STATUS_NOT_SUPPORTED;
    }

    float normalizedX = depthX / m_worldConvertCache.resolutionX - .5f;
    float normalizedY = .5f - depthY / m_worldConvertCache.resolutionY;

    OniVideoMode videoMode;
    int size = sizeof(videoMode);
    getProperty(ONI_STREAM_PROPERTY_VIDEO_MODE, &videoMode, &size);

    float const convertToMillimeters = (videoMode.pixelFormat == ONI_PIXEL_FORMAT_DEPTH_100_UM) ? 10.f : 1.f;
    *pWorldX = (normalizedX * depthZ * m_worldConvertCache.xzFactor) / convertToMillimeters;
    *pWorldY = (normalizedY * depthZ * m_worldConvertCache.yzFactor) / convertToMillimeters;
    *pWorldZ = depthZ / convertToMillimeters;

    return ONI_STATUS_OK;
}

and

OniStatus VideoStream::convertWorldToDepthCoordinates(float worldX, float worldY, float worldZ, float* pDepthX, float* pDepthY, float* pDepthZ)
{
    if (m_pSensorInfo->sensorType != ONI_SENSOR_DEPTH)
    {
        m_errorLogger.Append("convertWorldToDepthCoordinates: Stream is not from DEPTH\n");
        return ONI_STATUS_NOT_SUPPORTED;
    }

    *pDepthX = m_worldConvertCache.coeffX * worldX / worldZ + m_worldConvertCache.halfResX;
    *pDepthY = m_worldConvertCache.halfResY - m_worldConvertCache.coeffY * worldY / worldZ;
    *pDepthZ = worldZ;
    return ONI_STATUS_OK;
}

and the world conversion cache :

 void VideoStream::refreshWorldConversionCache()
{
    if (m_pSensorInfo->sensorType != ONI_SENSOR_DEPTH)
    {
        return;
    }

    OniVideoMode videoMode;
    int size = sizeof(videoMode);
    getProperty(ONI_STREAM_PROPERTY_VIDEO_MODE, &videoMode, &size);

    size = sizeof(float);
    float horizontalFov;
    float verticalFov;
    getProperty(ONI_STREAM_PROPERTY_HORIZONTAL_FOV, &horizontalFov, &size);
    getProperty(ONI_STREAM_PROPERTY_VERTICAL_FOV, &verticalFov, &size);

    m_worldConvertCache.xzFactor = tan(horizontalFov / 2) * 2;
    m_worldConvertCache.yzFactor = tan(verticalFov / 2) * 2;
    m_worldConvertCache.resolutionX = videoMode.resolutionX;
    m_worldConvertCache.resolutionY = videoMode.resolutionY;
    m_worldConvertCache.halfResX = m_worldConvertCache.resolutionX / 2;
    m_worldConvertCache.halfResY = m_worldConvertCache.resolutionY / 2;
    m_worldConvertCache.coeffX = m_worldConvertCache.resolutionX / m_worldConvertCache.xzFactor;
    m_worldConvertCache.coeffY = m_worldConvertCache.resolutionY / m_worldConvertCache.yzFactor;
}

struct WorldConversionCache
    {
        float xzFactor;
        float yzFactor;
        float coeffX;
        float coeffY;
        int resolutionX;
        int resolutionY;
        int halfResX;
        int halfResY;
    } m_worldConvertCache;

all taken from OpenNI GitHub repository

The horizontal and vertical fov you can just get directly from the from the description of each frame.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM