我正在尝试释放我在CUDA + OpenGL互操作代码中分配的设备内存OpenGL。在进行错误检查时,我收到了Invalid Device Pointer错误,程序在cudaFree(dev_inp);函数结束时停止了执行。一切都很好,但我担心的是内存泄漏。
问题:
为什么我不能释放我分配的本地设备内存?我将cuda_resource从像素缓冲区对象中取消映射,并取消资源注册。
摘自数据自动化系统C方案编制指南B.17节:
Memory allocated via malloc() cannot be freed using the runtime (i.e. by calling any of the free memory functions from Sections 3.2.2).
这就引出了另外两个问题:
我在内核中没有malloced内存,因为我没有。因此,利用cudaFree函数应该(技术上?)在这里工作对吗?是由程序员来释放提供给本地定义的指针的内存,还是nvcc编译器在程序退出时或在程序超出本地范围时处理解除分配?我不想在代码中出现内存泄漏,所以通过处理以前分配的内存,我感到更安全。
c.在cudaDeviceReset()函数的末尾调用renderScene()是否谨慎,从而破坏了主CUDA上下文(以及它的变量和指针,如CUDA C编程指南所示)?我已经看到NVidia Visual也提到了这一点:当我调用它时,呈现速度似乎比平常慢。如果我可以简单地在这里cudaFree内存,那就太好了,但我似乎无法让它工作起来。
完整代码:
#define GET_PROC_ADDRESS( str ) wglGetProcAddress( str )
GLuint tex;
GLuint pbo;
struct cudaGraphicsResource *cuda_resource;
PFNGLBINDBUFFERARBPROC glBindBuffer = NULL;
PFNGLDELETEBUFFERSARBPROC glDeleteBuffers = NULL;
PFNGLGENBUFFERSARBPROC glGenBuffers = NULL;
PFNGLBUFFERDATAARBPROC glBufferData = NULL;
// ==========================================================================================
// CUDA ERROR CHECKING CODE
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) getchar();
}
}
// ==========================================================================================
void initCUDADevice() {
gpuErrchk(cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() ));
}
// ==========================================================================================
void changeSize(int w, int h) {
//cudaDeviceReset();
//initCUDADevice();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
// Prevent a divide by zero, when window is too short
// (you cant make a window of zero width).
if (h == 0)
h = 1;
float ratio = w * 1.0 / h;
// Use the Projection Matrix
glMatrixMode(GL_PROJECTION);
// Reset Matrix
//glLoadIdentity();
//// Set the viewport to be the entire window
glViewport(0, 0, w, h);
//// Get Back to the Modelview
glMatrixMode(GL_MODELVIEW);
}
// ==========================================================================================
void renderScene(void) {
// Clear Color and Depth Buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Reset transformations
glLoadIdentity();
// ====================================================================================
// initiate GPU by setting it correctly
//initCUDADevice();
// ====================================================================================
// read the image that needs to be textured
Mat image, flipped;
image = imread("K:/Ultrasound experiment images/PA_175.png", CV_LOAD_IMAGE_GRAYSCALE); // Read the file from disk
if(!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
}
cv::flip(image, flipped, 0);
imshow("OpenCV - image", image); // displays output
// ====================================================================================
// allocate the PBO, texture, and CUDA resource
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
// ====================================================================================
// generate the pixel buffer object (PBO)
// Generate a buffer ID called a PBO (Pixel Buffer Object)
glGenBuffers(1, &pbo);
// Make this the current UNPACK buffer (OpenGL is state-based)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pbo);
// Allocate data for the buffer. 4-channel 8-bit image
glBufferData(GL_PIXEL_UNPACK_BUFFER, sizeof(unsigned char) * flipped.rows * flipped.cols, NULL, GL_STREAM_DRAW);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
gpuErrchk(cudaGraphicsGLRegisterBuffer(&cuda_resource, pbo, cudaGraphicsMapFlagsNone));
// ====================================================================================
// create the texture object
// enable 2D texturing
glEnable(GL_TEXTURE_2D);
// generate and bind the texture
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
// put flipped.data at the end for cpu rendering
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, image.cols, image.rows, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, 0 );
// put tex at the end for cpu rendering
glBindTexture(GL_TEXTURE_2D, 0);
// ====================================================================================
// copy OpenCV flipped image data into the device pointer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
unsigned char *dev_inp;
gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );
gpuErrchk( cudaGraphicsMapResources(1, &cuda_resource, 0) );
size_t size;
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );
gpuErrchk( cudaMemcpy(dev_inp, flipped.data, sizeof(unsigned char)*flipped.rows*flipped.cols, cudaMemcpyHostToDevice) );
gpuErrchk( cudaGraphicsUnmapResources(1, &cuda_resource, 0) );
// ====================================================================================
// bind pbo and texture to render data now
glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pbo);
//
glBindTexture(GL_TEXTURE_2D, tex);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, flipped.cols, flipped.rows, GL_LUMINANCE, GL_UNSIGNED_BYTE, NULL);
gpuErrchk( cudaGraphicsUnregisterResource(cuda_resource));
gpuErrchk( cudaThreadSynchronize());
//gpuErrchk(cudaFree(dev_inp));
// ====================================================================================
// map the texture coords to the vertex coords
glBegin(GL_QUADS);
// Front Face
glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, -1.0f, 1.0f); // Bottom Left Of The Texture and Quad
glTexCoord2f(1.0f, 0.0f); glVertex3f( 1.0f, -1.0f, 1.0f); // Bottom Right Of The Texture and Quad
glTexCoord2f(1.0f, 1.0f); glVertex3f( 1.0f, 1.0f, 1.0f); // Top Right Of The Texture and Quad
glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, 1.0f, 1.0f); // Top Left Of The Texture and Quad
glEnd();
glFlush(); // force rendering
glDisable(GL_TEXTURE_2D);
//glutSwapBuffers();
gpuErrchk(cudaFree(dev_inp)); // <--- Error here
//cudaGraphicsUnregisterResource(cuda_resource);
}
// ==========================================================================================
int main(int argc, char **argv) {
// init GLUT and create window
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DEPTH | GLUT_RGB );
glutInitWindowPosition(100,100);
glutInitWindowSize(1024,256);
glutCreateWindow("CUDA + OpenGL interop");
// register callbacks
glutDisplayFunc(renderScene);
glutReshapeFunc(changeSize);
//glutIdleFunc(renderScene);
// enter GLUT event processing cycle
glutMainLoop();
return 1;
}发布于 2014-08-29 14:53:44
这一行不是必需的,应该从代码中删除:
gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );这一行创建一个设备分配,并将该分配的指针分配给dev_inp。
这里出现的问题是:
gpuErrchk( cudaGraphicsResourceGetMappedPointer((void **)&dev_inp, &size, cuda_resource) );这一行获取一个从cuda_resource对象派生到另一个不同分配的新指针,并将该指针放置到dev_inp,覆盖以前分配的指针的(从cudaMalloc)。在此行中获得的新指针已经具有基础设备分配。在这一点上,您不需要单独/另外分配它。
此时,如果您试图释放dev_inp
gpuErrchk(cudaFree(dev_inp)); // <--- Error here您正在尝试释放程序未显式分配的数据(通过cudaMalloc),并且是持久性(此时) cuda_resource对象的必要组件。你不会想那样做的。不幸的是,放置在dev_inp中的原始指针现在丢失了(覆盖),因此在您的程序中无法“释放”它,而且只要程序正在执行,您就会有内存泄漏。
解决方案不是执行额外的、不需要的分配:
gpuErrchk( cudaMalloc((void**)&dev_inp, sizeof(unsigned char)*flipped.rows*flipped.cols) );这就意味着应该删除相应的cudaFree操作:
gpuErrchk(cudaFree(dev_inp)); // <--- Error here在程序实际退出之前,我不会在任何地方使用cudaDeviceReset代码,特别是CUDA/OpenGL代码。在其他一些非常特殊的情况下,您可能希望在实际退出程序之前使用cudaDeviceReset,但在这里不适用。
https://stackoverflow.com/questions/25559600
复制相似问题