安装cuda 网上有很多
vim gpu.cu
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include <iostream>
#define Row 1024
#define Col 1024
__global__ void matrix_mul_gpu(int* M, int* N,int* P, int width)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int sum = 0;
for(int k = 0; k < width; ++k)
{
int a = M[j * width + k];
int b = N[k * width + i];
sum += a * b;
}
P[j * width + i] = sum;
}
int main()
{
struct timeval start, end;
gettimeofday(&start, NULL);
int *A = (int*)malloc(sizeof(int) * Row * Col);
int *B = (int*)malloc(sizeof(int) * Row * Col);
int *C = (int*)malloc(sizeof(int) * Row * Col);
int *d_dataA, *d_dataB, *d_dataC;
cudaMalloc((void**)&d_dataA, sizeof(int) * Row * Col);
cudaMalloc((void**)&d_dataB, sizeof(int) * Row * Col);
cudaMalloc((void**)&d_dataC, sizeof(int) * Row * Col);
for( int i = 0; i < Row * Col; ++i)
{
A[i] = 90;
B[i] = 10;
}
cudaMemcpy(d_dataA, A, sizeof(int) * Row * Col,cudaMemcpyHostToDevice);
cudaMemcpy(d_dataB, B, sizeof(int) * Row * Col,cudaMemcpyHostToDevice);
dim3 threadPerBlock(16, 16);
dim3 blockNumber((Col + threadPerBlock.x-1)/threadPerBlock.x,
(Row + threadPerBlock.y-1)/threadPerBlock.y);
matrix_mul_gpu << <blockNumber, threadPerBlock>> >(d_dataA,d_dataB,d_dataC,Col);
cudaMemcpy(C, d_dataC,sizeof(int) * Row * Col,cudaMemcpyDeviceToHost);
std::cout << C[ 512 * 1024 + 512] << std::endl;
free(A);
free(B);
free(C);
cudaFree(d_dataA);
cudaFree(d_dataB);
cudaFree(d_dataC);
gettimeofday(&end, NULL);
int timeuse = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("totoal time is %f ms\r",timeuse / 1000.0);
return 0;
}
保存(上代码仅供测试)
vim CMakeLists.txt
cmake_minimum_required(VERSION 2.6)
project(cudatest)
find_package(CUDA)
if( NOT CUDA_FOUND )
message(STATUS "CUDA Not Found. Project will not be build.")
endif( CUDA_FOUND )
cuda_add_executable(${PROJECT_NAME} gpu.cu)
保存
cmake .
cmake --build .
./gputest
版权声明:本文为tlglove326原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。