首页
学习
活动
专区
圈层
工具
发布
社区首页 >问答首页 >cudaMallocManaged (统一内存)和cuBLAS

cudaMallocManaged (统一内存)和cuBLAS
EN

Stack Overflow用户
提问于 2020-12-30 03:08:29
回答 1查看 584关注 0票数 1

我试图在cudaMallocManaged()和cuBLAS库中使用统一内存。我正在执行一个简单的矩阵向量乘法作为一个简单的例子,并将结果存储在一个数组results中。然而,当打印results数组时,我会得到所有的0,而不是矩阵mat乘以矢量vec的结果。

我使用的流程是:

用cudaMallocManaged()

  • Initializing分配内存用data

  • Allocating的数组cuBLAS handle

  • Calling cublasDgemv执行在results

中存储结果的乘法

当使用new,然后使用cublasSetMatrix()cublasSetVector()时,效果很好。

如何在cuBLAS中使用统一内存?

以下是最低限度的工作实例:

统一内存尝试(这将返回results中的所有0):

代码语言:javascript
复制
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <ctime>
#include "cublas_v2.h"

#define cudaErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
    if (code != cudaSuccess)
    {
        fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
        if (abort) exit(code);
    }
}

static const char *cublasErrChk(cublasStatus_t error)
{
    switch (error)
    {
        case CUBLAS_STATUS_SUCCESS:
            return "CUBLAS_STATUS_SUCCESS";

        case CUBLAS_STATUS_NOT_INITIALIZED:
            return "CUBLAS_STATUS_NOT_INITIALIZED";

        case CUBLAS_STATUS_ALLOC_FAILED:
            return "CUBLAS_STATUS_ALLOC_FAILED";

        case CUBLAS_STATUS_INVALID_VALUE:
            return "CUBLAS_STATUS_INVALID_VALUE";

        case CUBLAS_STATUS_ARCH_MISMATCH:
            return "CUBLAS_STATUS_ARCH_MISMATCH";

        case CUBLAS_STATUS_MAPPING_ERROR:
            return "CUBLAS_STATUS_MAPPING_ERROR";

        case CUBLAS_STATUS_EXECUTION_FAILED:
            return "CUBLAS_STATUS_EXECUTION_FAILED";

        case CUBLAS_STATUS_INTERNAL_ERROR:
            return "CUBLAS_STATUS_INTERNAL_ERROR";
    }

    return "<unknown>";
}

int main() {

    size_t dims = 4;

    double *vec, *mat, *results;

    cudaErrChk( cudaMallocManaged(&vec, dims * sizeof(double)) );
    cudaErrChk( cudaMallocManaged(&mat, dims * dims * sizeof(double)) );
    cudaErrChk( cudaMallocManaged(&results, dims * sizeof(double)) );

    printf("Vector:\n");
    for (int i = 1; i < dims + 1; i++) {
        vec[i] = 0.5 * i;
        printf("%.2lf ", vec[i]);
    } 
    printf("\n\nMatrix:\n");

    for (int i = 1; i < dims * dims + 1; i++) {
        mat[i] = 1.0 * i;
        printf("%.2lf ", mat[i]);

        if (i % dims == 0)
            printf("\n");
    }
    printf("\n");

    cublasHandle_t handle;
    cublasErrChk( cublasCreate(&handle) );

    double alpha = 1.f, beta = 1.f;

    // multiply mat by vec to get results
    cublasErrChk(
        cublasDgemv(
            handle, CUBLAS_OP_N,
            dims, dims,
            &alpha,
            mat, dims,
            vec, 1,
            &beta,
            results, 1
        )
    );

    for (int i = 0; i < dims; i++)
        printf("%.2lf ", results[i]);
    printf("\n");

    cudaErrChk( cudaFree(vec) );
    cudaErrChk( cudaFree(mat) );
    cudaErrChk( cudaFree(results) );

    return 0;
}

普通malloc/setMatrix()尝试:

代码语言:javascript
复制
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <ctime>
#include "cublas_v2.h"

#define cudaErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
    if (code != cudaSuccess)
    {
        fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
        if (abort) exit(code);
    }
}

static const char *cublasErrChk(cublasStatus_t error)
{
    switch (error)
    {
        case CUBLAS_STATUS_SUCCESS:
            return "CUBLAS_STATUS_SUCCESS";

        case CUBLAS_STATUS_NOT_INITIALIZED:
            return "CUBLAS_STATUS_NOT_INITIALIZED";

        case CUBLAS_STATUS_ALLOC_FAILED:
            return "CUBLAS_STATUS_ALLOC_FAILED";

        case CUBLAS_STATUS_INVALID_VALUE:
            return "CUBLAS_STATUS_INVALID_VALUE";

        case CUBLAS_STATUS_ARCH_MISMATCH:
            return "CUBLAS_STATUS_ARCH_MISMATCH";

        case CUBLAS_STATUS_MAPPING_ERROR:
            return "CUBLAS_STATUS_MAPPING_ERROR";

        case CUBLAS_STATUS_EXECUTION_FAILED:
            return "CUBLAS_STATUS_EXECUTION_FAILED";

        case CUBLAS_STATUS_INTERNAL_ERROR:
            return "CUBLAS_STATUS_INTERNAL_ERROR";
    }

    return "<unknown>";
}

int main() {

    size_t dims = 4;

    double *h_vec, *h_mat, *h_results;

    h_vec = new double[dims];
    h_mat = new double[dims * dims];
    h_results = new double[dims];

    printf("Vector:\n");
    for (int i = 1; i < dims + 1; i++) {
        h_vec[i] = 0.5 * i;
        printf("%.2lf ", h_vec[i]);
    } 
    printf("\n\nMatrix:\n");

    for (int i = 1; i < dims * dims + 1; i++) {
        h_mat[i] = 1.0 * i;
        printf("%.2lf ", h_mat[i]);

        if (i % dims == 0)
            printf("\n");
    }
    printf("\n");

    double *d_vec, *d_mat, *d_results;

    cudaErrChk( cudaMalloc(&d_vec, dims * sizeof(double)) );
    cudaErrChk( cudaMalloc(&d_mat, dims * dims * sizeof(double)) );
    cudaErrChk( cudaMalloc(&d_results, dims * sizeof(double)) );

    cublasHandle_t handle;
    cublasErrChk( cublasCreate(&handle) );

    // copy the data manually to the GPUs
    cublasErrChk( cublasSetVector(dims, sizeof(*d_vec), h_vec, 1, d_vec, 1) );
    cublasErrChk( cublasSetMatrix(dims, dims, sizeof(double), h_mat, dims, d_mat, dims) );

    double alpha = 1.f, beta = 1.f;

    // // multiply mat by vec to get results
    cublasErrChk(
        cublasDgemv(
            handle, CUBLAS_OP_N,
            dims, dims,
            &alpha,
            d_mat, dims,
            d_vec, 1,
            &beta,
            d_results, 1
        )
    );

    cublasErrChk( cublasGetVector(dims, sizeof(*h_results), d_results, 1, h_results, 1) );

    for (int i = 0; i < dims; i++)
        printf("%.2lf ", h_results[i]);
    printf("\n");

    cudaErrChk( cudaFree(d_vec) );
    cudaErrChk( cudaFree(d_mat) );
    cudaErrChk( cudaFree(d_results) );

    delete [] h_vec;
    delete [] h_mat;
    delete [] h_results;

    return 0;
}

nvcc -o main main.cu -lcublas

EN

回答 1

Stack Overflow用户

发布于 2020-12-31 03:44:15

正如@talonmies所指出的,问题在于我使用的是异步调用,而没有及时获得结果。这是通过在cudaDeviceSynchronize()调用之后添加cublasDgemv()来修正的:

代码语言:javascript
复制
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <ctime>
#include "cublas_v2.h"

#define cudaErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
    if (code != cudaSuccess)
    {
        fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
        if (abort) exit(code);
    }
}

static const char *cublasErrChk(cublasStatus_t error)
{
    switch (error)
    {
        case CUBLAS_STATUS_SUCCESS:
            return "CUBLAS_STATUS_SUCCESS";

        case CUBLAS_STATUS_NOT_INITIALIZED:
            return "CUBLAS_STATUS_NOT_INITIALIZED";

        case CUBLAS_STATUS_ALLOC_FAILED:
            return "CUBLAS_STATUS_ALLOC_FAILED";

        case CUBLAS_STATUS_INVALID_VALUE:
            return "CUBLAS_STATUS_INVALID_VALUE";

        case CUBLAS_STATUS_ARCH_MISMATCH:
            return "CUBLAS_STATUS_ARCH_MISMATCH";

        case CUBLAS_STATUS_MAPPING_ERROR:
            return "CUBLAS_STATUS_MAPPING_ERROR";

        case CUBLAS_STATUS_EXECUTION_FAILED:
            return "CUBLAS_STATUS_EXECUTION_FAILED";

        case CUBLAS_STATUS_INTERNAL_ERROR:
            return "CUBLAS_STATUS_INTERNAL_ERROR";
    }

    return "<unknown>";
}

int main() {

    size_t dims = 4;

    double *vec, *mat, *results;

    cudaErrChk( cudaMallocManaged(&vec, dims * sizeof(double)) );
    cudaErrChk( cudaMallocManaged(&mat, dims * dims * sizeof(double)) );
    cudaErrChk( cudaMallocManaged(&results, dims * sizeof(double)) );

    printf("Vector:\n");
    for (int i = 1; i < dims + 1; i++) {
        vec[i] = 0.5 * i;
        printf("%.2lf ", vec[i]);
    } 
    printf("\n\nMatrix:\n");

    for (int i = 1; i < dims * dims + 1; i++) {
        mat[i] = 1.0 * i;
        printf("%.2lf ", mat[i]);

        if (i % dims == 0)
            printf("\n");
    }
    printf("\n");

    cublasHandle_t handle;
    cublasErrChk( cublasCreate(&handle) );

    double alpha = 1.f, beta = 1.f;

    // multiply mat by vec to get results
    cublasErrChk(
        cublasDgemv(
            handle, CUBLAS_OP_N,
            dims, dims,
            &alpha,
            mat, dims,
            vec, 1,
            &beta,
            results, 1
        )
    );
    cudaDeviceSynchronize();

    for (int i = 0; i < dims; i++)
        printf("%.2lf ", results[i]);
    printf("\n");

    cudaErrChk( cudaFree(vec) );
    cudaErrChk( cudaFree(mat) );
    cudaErrChk( cudaFree(results) );

    return 0;
}
票数 3
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/65501537

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档