forked from HLRA-JHPCN/comm_bench
-
Notifications
You must be signed in to change notification settings - Fork 1
/
cuda.c
67 lines (57 loc) · 1.66 KB
/
cuda.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <mpi.h>
#include <cuda_runtime.h>
int main(int argc, char **argv)
{
int i;
int myid, nprocs, ierr, provided;
MPI_Status status;
int N = 1000, loops;
double time, t_min=999999.99, t_max=0.0, t_sum=0.0;
double *data, *d_data;
int gpu=-1;
if(argc!=4){
printf("usage: %s length loops gpuid\n", argv[0]);
return -1;
}
N = atoi(argv[1]);
loops = atoi(argv[2]);
//ierr = MPI_Init_thread(&argc,&argv,MPI_THREAD_FUNNELED,&provided);
//if(provided!=MPI_THREAD_FUNNELED)printf("MPI_THREAD_FUNNELED is not provided.\n");
ierr = MPI_Init(&argc,&argv);
ierr = MPI_Comm_rank(MPI_COMM_WORLD, &myid);
ierr = MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
if(nprocs!=1){
printf("1 process is required.\n");
return -1;
}
data = (double*)malloc(sizeof(double)*N);
gpu = atoi(argv[3]);
printf("cudaSetDevice(%d)\n", gpu);
cudaSetDevice(gpu);
cudaMalloc((void*)&d_data, sizeof(double)*N);
for(i=0; i<10; i++){
cudaMemcpy(d_data, data, sizeof(double)*N, cudaMemcpyHostToDevice);
cudaMemcpy(data, d_data, sizeof(double)*N, cudaMemcpyDeviceToHost);
}
for(i=0; i<loops; i++){
time = MPI_Wtime();
cudaMemcpy(d_data, data, sizeof(double)*N, cudaMemcpyHostToDevice);
cudaMemcpy(data, d_data, sizeof(double)*N, cudaMemcpyDeviceToHost);
time = MPI_Wtime() - time;
if(time>t_max)t_max=time;
if(time<t_min)t_min=time;
t_sum += time;
}
printf("TIME %d : %e (average %e msec, min %e msec, max %e msec)\n", myid, t_sum,
t_sum/(double)loops*1000.0,
t_min*1000.0,
t_max*1000.0
);
cudaFree(d_data);
free(data);
ierr = MPI_Finalize();
return 0;
}