首页
学习
活动
专区
圈层
工具
发布
社区首页 >问答首页 >CUDA BFS巨图(seg.fault)

CUDA BFS巨图(seg.fault)
EN

Stack Overflow用户
提问于 2011-11-08 00:42:09
回答 1查看 860关注 0票数 2

我在CUDA上做了一个BFS算法的测试(我知道它有一些同步问题,但无论如何测试它都是我工作的一部分),但我在使用(或创建?) 1M+大小的图时遇到了问题。

下面是我用来创建它们的代码:

代码语言:javascript
复制
#include <stdio.h>
#include <stdlib.h>
#include <time.h>


#define GRAPHSIZE 1000000

struct Node 
{
    int begin;     // comeco da sub-string de vizinhos
    int num;    // tamanho da sub-string de vizinhos
};



int getSize()
{
int size,arcs;

    printf("Size of the graph: \nNodes:\n>>");
    scanf ("%d", &size);

return size;
}



void createEdges(int graphSize, int* Edges)
{

int j,value, aux, rndIdx;

int edgesSize = 2*GRAPHSIZE;

srand(time(NULL));



printf ("\nGS : %d\n", graphSize);

j = 1;

    for (int i=0; i < edgesSize; i++) //first it creates an ordered array of edges
    {

        if (j < GRAPHSIZE)
        {
        Edges [i] = j;
        j++;
        }
            else
            {
            j=1;        
            Edges [i] = j; 
            j++;
            }

    }



    for (int i=0; i < edgesSize; i++) //now, it randomly swaps the edges array
    {

    rndIdx = rand()%graphSize;

    aux = Edges[rndIdx];
    Edges[rndIdx] = Edges [i];
    Edges [i] = aux;

    }

} 


int main ()
{

int size,graphAtts[2];

int edgesSize = 2*GRAPHSIZE;

int Edges[edgesSize];

struct Node node[GRAPHSIZE];

FILE *file;



printf("____________________________\nRandom graph generator in compact format, optmized for CUDA algorithms by Ianuarivs Severvs.\nFor details about this format read the README. \n");

//size = getSize(graphAtts);

//printf ("%d,%d",size,arcs);

createEdges(GRAPHSIZE,Edges); // or size?

/*
    for (int i = 0; i < edgesSize ; i ++)
    {
    printf ("-- %d --", Edges[i]);
    }   

*/

printf("\nEdges:\n");
for (int i=0; i < edgesSize; i++) 
printf("%d,",Edges[i]);


    for (int i=0,j=0 ; i < GRAPHSIZE; i++,j+=2) // now, completes the graph
    {
    node[i].begin=j;
    node[i].num=2;
    printf ("\n node %d : begin = %d, num = 2",i,j); 
    }

printf("\n");

//writes file:
file = fopen ("graph1M.g","wb");
fwrite (&Edges, edgesSize * sizeof(int),1,file);
fwrite (&node, GRAPHSIZE * sizeof(struct Node),1,file);
fclose(file);


    for (int i = 0; i < edgesSize ; i ++)
    {
    printf ("-- %d --", Edges[i]);
    }   


    for (int i = 0; i < GRAPHSIZE ; i ++)
    {
    printf ("* %d *", i);
    }


}

下面是我的BFS代码(在CUDA上):

代码语言:javascript
复制
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cutil.h> 

#define GRAPHSIZE 1000000

struct Node 
{
    int begin;     // begining of the substring
    int num;    // size of the sub-string 
};

__global__ void BFS (Node *Va, int *Ea, bool *Fa, bool *Xa, int *Ca, bool *parada) // memory races on both Xa and Ca
{

    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid > GRAPHSIZE)
           *parada=true;


        if (Fa[tid] == true && Xa[tid] == false)
        {
        Fa[tid] = false; 
        Xa[tid] = true;
        //__syncthreads(); // this solves the memrace problem as long as the threads are all on the same block
                   for (int i = Va[tid].begin;  i < (Va[tid].begin + Va[tid].num); i++) // Va begin is where it's edges' subarray begins, Va is it's                                                                        number of elements
            {             
                int nid = Ea[i];

                if (Xa[nid] == false)
                {
                Ca[nid] = Ca[tid] + 1;               
                Fa[nid] = true;             
                *parada = true;
                }

            }                   

        }

}

// The BFS frontier corresponds to all the nodes being processed at the current level.


int main()
{

    // for the time couting:
    cudaEvent_t start, stop;
    float time;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);


    FILE * file;

    printf("\nLoading graph file...\n");


    struct Node node[GRAPHSIZE];
    int edgesSize = 2*GRAPHSIZE;
    int edges[edgesSize];


    file = fopen ("graph1M.g","rb");
    printf("abriu");    
    fread (&edges, edgesSize * sizeof(int),1,file);
    fread (&node, GRAPHSIZE * sizeof(struct Node),1,file);
    fclose(file);

 //For file read test propouses only:

/*
    for (int i = 0; i < edgesSize ; i ++)
    {
    printf ("-- %d --", edges[i]);
    }   


    for (int i = 0; i < GRAPHSIZE ; i ++)
    {
    printf ("* %d *", i);
    }
*/  



    bool frontier[GRAPHSIZE]={false}; 
    bool visited[GRAPHSIZE]={false}; 
    int custo[GRAPHSIZE]={0}; 

    int source=0; 
    frontier[source]=true; 

    Node* Va; 
    cudaMalloc((void**)&Va,sizeof(Node)*GRAPHSIZE); 
    cudaMemcpy(Va,node,sizeof(Node)*GRAPHSIZE,cudaMemcpyHostToDevice); 

    int* Ea; 
    cudaMalloc((void**)&Ea,sizeof(Node)*GRAPHSIZE); 
    cudaMemcpy(Ea,edges,sizeof(Node)*GRAPHSIZE,cudaMemcpyHostToDevice); 

    bool* Fa; 
    cudaMalloc((void**)&Fa,sizeof(bool)*GRAPHSIZE); 
    cudaMemcpy(Fa,frontier,sizeof(bool)*GRAPHSIZE,cudaMemcpyHostToDevice); 

    bool* Xa; 
    cudaMalloc((void**)&Xa,sizeof(bool)*GRAPHSIZE); 
    cudaMemcpy(Xa,visited,sizeof(bool)*GRAPHSIZE,cudaMemcpyHostToDevice); 

    int* Ca; 
    cudaMalloc((void**)&Ca,sizeof(int)*GRAPHSIZE); 
    cudaMemcpy(Ca,custo,sizeof(int)*GRAPHSIZE,cudaMemcpyHostToDevice); 


    dim3 grid(100,100,1); //blocks per grid
    dim3 threads(100,1,1);  // threads per block




    bool para; 
    bool* parada; 
    cudaMalloc((void**)&parada,sizeof(bool)); 
    printf("_____________________________________________\n");
    int count=0;

    cudaEventRecord(start, 0);
    do{ 
        count ++;
        para=false; 
        cudaMemcpy(parada,&para,sizeof(bool),cudaMemcpyHostToDevice);       
        BFS <<<grid,threads,0>>>(Va,Ea,Fa,Xa,Ca,parada);    
        CUT_CHECK_ERROR("kernel1 execution failed"); 
        cudaMemcpy(&para,parada,sizeof(bool),cudaMemcpyDeviceToHost); 


    }while(para); 

    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);


    //printf("\nFinal:\n");
    cudaMemcpy(custo,Ca,sizeof(int)*GRAPHSIZE,cudaMemcpyDeviceToHost); 
 /*
    printf("\n_____________________________________________\n");
    for(int i=0;i<GRAPHSIZE;i++) 
        printf("%d  ",custo[i]); 
    printf("\n");

    printf("_____________________________________________\n");
*/

    cudaEventElapsedTime(&time, start, stop);
    printf ("\nTime for the kernel: %lf s \n", time/1000);
    printf ("Number of kernel calls : %d \n", count);


    file = fopen ("graph125MPar","w");


    for(int i=0;i<GRAPHSIZE;i++) 
        fprintf(file,"%d    ",custo[i]); 
    fprintf(file,"\n");
    fclose(file);



}

在尝试为1M+图形运行它时,我遇到了一个分段错误(请注意,我在linux上使用了‘ulimit -s 16384’命令更改了系统的堆栈大小)

有人能帮帮忙吗?

EN

回答 1

Stack Overflow用户

回答已采纳

发布于 2011-11-08 01:11:54

不要为图形使用静态分配的主机数组,而应使用动态内存分配。您的ulimit命令将堆栈大小设置为16384 kb,但是您需要对每个图条目使用类似于5*sizeof(int) + 2*sizeof(bool)的内容,每个条目可能为22字节。很容易看出,在有一百万个条目的情况下,堆栈空间将在哪里耗尽。

票数 3
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/8039631

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档