CUDA/OpenGL interop, draw to OpenGL texture with CUDA

你说的曾经没有我的故事 提交于 2019-11-29 02:42:38

问题


I am writing a rendering system in CUDA and want results to be quickly displayed via OpenGL, without touching main memory. I basically do the following:

Create and initialize OpenGL texture, and register it in CUDA as cudaGraphicsResource

GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;

void initialize() {
    glEnable(GL_TEXTURE_2D);
    glGenTextures(1, &viewGLTexture);

    glBindTexture(GL_TEXTURE_2D, viewGLTexture); 
    {
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
    } 
    glBindTexture(GL_TEXTURE_2D, 0);

    cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard)
}

Whenever view is resized I resize viewport and texture image appropriately:

void resize() {
    glViewport(0, 0, view.getWidth(), view.getHeight());

    glBindTexture(GL_TEXTURE_2D, viewGLTexture); 
    {
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
    } 
    glBindTexture(GL_TEXTURE_2D, 0);
}

And then each frame I map graphicsResource as a cudaSurfaceObject via cudaArray, call rendering kernel on it, unmap and synchronize to let OpenGL draw a fullscreen quad with this texture:

void renderFrame() {
    cudaGraphicsMapResources(1, &viewCudaResource); 
    {
        cudaArray_t viewCudaArray;
        cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0);
        cudaResourceDesc viewCudaArrayResourceDesc;
        {
            viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
            viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
        }
        cudaSurfaceObject_t viewCudaSurfaceObject;
        cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc); 
        {
            invokeRenderingKernel(viewCudaSurfaceObject);
        } 
        cudaDestroySurfaceObject(viewCudaSurfaceObject));
    } 
    cudaGraphicsUnmapResources(1, &viewCudaResource);

    cudaStreamSynchronize(0);

    glBindTexture(GL_TEXTURE_2D, viewGLTexture); 
    {
        glBegin(GL_QUADS); 
        {
            glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
            glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
            glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
            glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
        } 
        glEnd();
    }
    glBindTexture(GL_TEXTURE_2D, 0);

    glFinish();
}

The problem is: Whenever view is resized all CUDA calls start spewing out "unknown error"s and visually it looks like the texture is not in fact resized, just stretched across the whole view. Why is this happening and how do I fix it?


回答1:


It seems interop requires to re-register textures upon resize. The following works:

void resize() {
    glViewport(0, 0, view.getWidth(), view.getHeight());

        // unregister
    cudaGraphicsUnregisterResource(viewCudaResource);
        // resize
    glBindTexture(GL_TEXTURE_2D, viewGLTexture);
    {
        glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
    }
    glBindTexture(GL_TEXTURE_2D, 0);
        // register back
    cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);
}


来源:https://stackoverflow.com/questions/19244191/cuda-opengl-interop-draw-to-opengl-texture-with-cuda

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!