我在使用CUDA和将类传递给内核时遇到了一些问题。我有一些为GPU上的类分配内存的函数,通过它,就可以正常工作。不过,还有一种方法就是行不通。我注意到只有在处理数组时才会发生这种情况。下面是一个例子。
File1.hh
#ifndef PROVA1_HH
#define PROVA1_HH
#include <cstdio>
class cls {
public:
int *x, y;
cls();
void kernel();
};
#endifFile1.cu
#include "Prova1.hh"
__global__ void kernel1(cls* c){
printf("%d\n", c->y);
c->y=2;
printf("%d\n", c->y);
c->x[0]=0; c->x[1]=1;
printf("%d %d\n", c->x[0], c->x[1]);
}
void cls::kernel(){
cls* dev_c; cudaMalloc(&dev_c, sizeof(cls));
cudaMemcpy(dev_c, this, sizeof(cls), cudaMemcpyHostToDevice);
printf("(%d, %d)\n", x[0], x[1]);
kernel1<<<1, 1>>> (dev_c);
cudaDeviceSynchronize();
cudaMemcpy(this, dev_c, sizeof(cls), cudaMemcpyDeviceToHost);
printf("(%d, %d)\n", x[0], x[1]);
}
cls::cls(){
y=3;
x=(int*) malloc(sizeof(int)*2);
x[0]=1; x[1]=2;
}File.cu
#include<cstdio>
#include "Prova1.hh"
int main(){
cls c=cls();
c.kernel();
return 0;
}我使用以下命令进行编译:
nvcc -std=c++11 -arch=sm_35 -rdc=true -c -o File1.o File1.cu
nvcc -std=c++11 -arch=sm_35 -rdc=true -g -G -o File.out File1.o File.cu当我简单地运行它时,输出将是:
(1, 2)
3
2
(1, 2)当我调试它时,我得到:
Starting program:
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib/aarch64-linux-gnu/libthread_db.so.1".
[New Thread 0x7fb10eb1e0 (LWP 806)]
(1, 2)
CUDA Exception: Warp Illegal Address
The exception was triggered at PC 0x84fa10
Thread 1 "File.out" received signal CUDA_EXCEPTION_14, Warp Illegal Address.
[Switching focus to CUDA kernel 0, grid 1, block (0,0,0), thread (0,0,0), device 0, sm 0, warp 0, lane 0]
0x000000000084fad0 in kernel1(ciao*)<<<(1,1,1),(1,1,1)>>> ()你们谁知道我在犯错吗?
发布于 2018-08-07 20:37:03
您发布的代码中有很多错误,但错误的核心来源是您试图访问内核中的主机指针(没有内存分配给设备上的x,也没有复制值)。除非你使用托管内存,否则这显然是行不通的。
您可以将您的示例重新编写为如下所示:
#include <cstdio>
class cls {
public:
int *x, y;
__host__ __device__
cls(int *x_, int y_) : x(x_), y(y_) {};
void kernel();
};
__global__ void kernel1(cls* c){
printf("%d\n", c->y);
c->y=2;
printf("%d\n", c->y);
c->x[0]=0; c->x[1]=1;
printf("%d %d\n", c->x[0], c->x[1]);
}
void cls::kernel(){
int* dev_x; cudaMalloc(&dev_x, sizeof(int)*2);
cudaMemcpy(dev_x, x, sizeof(int)*2, cudaMemcpyHostToDevice);
cls h_dev_c(dev_x, y);
cls* dev_c; cudaMalloc(&dev_c, sizeof(cls));
cudaMemcpy(dev_c, &h_dev_c, sizeof(cls), cudaMemcpyHostToDevice);
printf("(%d)\n", y);
printf("(%d, %d)\n", x[0], x[1]);
kernel1<<<1, 1>>> (dev_c);
cudaDeviceSynchronize();
cudaMemcpy(&y, &(dev_c->y), sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(x, dev_x, sizeof(int)*2, cudaMemcpyDeviceToHost);
printf("(%d)\n", y);
printf("(%d, %d)\n", x[0], x[1]);
}
int main(){
int y=3;
int* x=(int*) malloc(sizeof(int)*2);
x[0]=1; x[1]=2;
cls c(x,y);
c.kernel();
return 0;
}请注意,您基本上必须在主机内存中构建类的设备副本,然后将其复制到设备以使其正常工作(这是指针数组或包含指针的结构和类的一个非常常见的设计模式,尽管出于复杂性和性能原因,几乎从不推荐这样做)。
https://stackoverflow.com/questions/51724024
复制相似问题