darknet  v3
gemm.c
Go to the documentation of this file.
1 #include "gemm.h"
2 #include "utils.h"
3 #include "cuda.h"
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <math.h>
7 
8 void gemm_bin(int M, int N, int K, float ALPHA,
9  char *A, int lda,
10  float *B, int ldb,
11  float *C, int ldc)
12 {
13  int i,j,k;
14  for(i = 0; i < M; ++i){
15  for(k = 0; k < K; ++k){
16  char A_PART = A[i*lda+k];
17  if(A_PART){
18  for(j = 0; j < N; ++j){
19  C[i*ldc+j] += B[k*ldb+j];
20  }
21  } else {
22  for(j = 0; j < N; ++j){
23  C[i*ldc+j] -= B[k*ldb+j];
24  }
25  }
26  }
27  }
28 }
29 
30 float *random_matrix(int rows, int cols)
31 {
32  int i;
33  float *m = calloc(rows*cols, sizeof(float));
34  for(i = 0; i < rows*cols; ++i){
35  m[i] = (float)rand()/RAND_MAX;
36  }
37  return m;
38 }
39 
40 void time_random_matrix(int TA, int TB, int m, int k, int n)
41 {
42  float *a;
43  if(!TA) a = random_matrix(m,k);
44  else a = random_matrix(k,m);
45  int lda = (!TA)?k:m;
46  float *b;
47  if(!TB) b = random_matrix(k,n);
48  else b = random_matrix(n,k);
49  int ldb = (!TB)?n:k;
50 
51  float *c = random_matrix(m,n);
52  int i;
53  clock_t start = clock(), end;
54  for(i = 0; i<10; ++i){
55  gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
56  }
57  end = clock();
58  printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
59  free(a);
60  free(b);
61  free(c);
62 }
63 
64 
65 void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
66  float *A, int lda,
67  float *B, int ldb,
68  float BETA,
69  float *C, int ldc)
70 {
71  gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
72 }
73 
74 void gemm_nn(int M, int N, int K, float ALPHA,
75  float *A, int lda,
76  float *B, int ldb,
77  float *C, int ldc)
78 {
79  int i,j,k;
80  #pragma omp parallel for
81  for(i = 0; i < M; ++i){
82  for(k = 0; k < K; ++k){
83  register float A_PART = ALPHA*A[i*lda+k];
84  for(j = 0; j < N; ++j){
85  C[i*ldc+j] += A_PART*B[k*ldb+j];
86  }
87  }
88  }
89 }
90 
91 void gemm_nt(int M, int N, int K, float ALPHA,
92  float *A, int lda,
93  float *B, int ldb,
94  float *C, int ldc)
95 {
96  int i,j,k;
97  #pragma omp parallel for
98  for(i = 0; i < M; ++i){
99  for(j = 0; j < N; ++j){
100  register float sum = 0;
101  for(k = 0; k < K; ++k){
102  sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
103  }
104  C[i*ldc+j] += sum;
105  }
106  }
107 }
108 
109 void gemm_tn(int M, int N, int K, float ALPHA,
110  float *A, int lda,
111  float *B, int ldb,
112  float *C, int ldc)
113 {
114  int i,j,k;
115  #pragma omp parallel for
116  for(i = 0; i < M; ++i){
117  for(k = 0; k < K; ++k){
118  register float A_PART = ALPHA*A[k*lda+i];
119  for(j = 0; j < N; ++j){
120  C[i*ldc+j] += A_PART*B[k*ldb+j];
121  }
122  }
123  }
124 }
125 
126 void gemm_tt(int M, int N, int K, float ALPHA,
127  float *A, int lda,
128  float *B, int ldb,
129  float *C, int ldc)
130 {
131  int i,j,k;
132  #pragma omp parallel for
133  for(i = 0; i < M; ++i){
134  for(j = 0; j < N; ++j){
135  register float sum = 0;
136  for(k = 0; k < K; ++k){
137  sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
138  }
139  C[i*ldc+j] += sum;
140  }
141  }
142 }
143 
144 
145 void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
146  float *A, int lda,
147  float *B, int ldb,
148  float BETA,
149  float *C, int ldc)
150 {
151  //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
152  int i, j;
153  for(i = 0; i < M; ++i){
154  for(j = 0; j < N; ++j){
155  C[i*ldc + j] *= BETA;
156  }
157  }
158  if(!TA && !TB)
159  gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
160  else if(TA && !TB)
161  gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
162  else if(!TA && TB)
163  gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
164  else
165  gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
166 }
167 
168 #ifdef GPU
169 
170 #include <math.h>
171 
172 void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
173  float *A_gpu, int lda,
174  float *B_gpu, int ldb,
175  float BETA,
176  float *C_gpu, int ldc)
177 {
178  cublasHandle_t handle = blas_handle();
179  cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
180  (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
181  check_error(status);
182 }
183 
184 #include <stdio.h>
185 #include <stdlib.h>
186 #include <string.h>
187 #include <time.h>
188 
189 void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
190 {
191  float *a;
192  if(!TA) a = random_matrix(m,k);
193  else a = random_matrix(k,m);
194  int lda = (!TA)?k:m;
195  float *b;
196  if(!TB) b = random_matrix(k,n);
197  else b = random_matrix(n,k);
198  int ldb = (!TB)?n:k;
199 
200  float *c = random_matrix(m,n);
201  int i;
202  clock_t start = clock(), end;
203  for(i = 0; i<32; ++i){
204  gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
205  }
206  end = clock();
207  printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
208  free(a);
209  free(b);
210  free(c);
211 }
212 
213 void time_gpu(int TA, int TB, int m, int k, int n)
214 {
215  int iter = 10;
216  float *a = random_matrix(m,k);
217  float *b = random_matrix(k,n);
218 
219  int lda = (!TA)?k:m;
220  int ldb = (!TB)?n:k;
221 
222  float *c = random_matrix(m,n);
223 
224  float *a_cl = cuda_make_array(a, m*k);
225  float *b_cl = cuda_make_array(b, k*n);
226  float *c_cl = cuda_make_array(c, m*n);
227 
228  int i;
229  clock_t start = clock(), end;
230  for(i = 0; i<iter; ++i){
231  gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
232  cudaThreadSynchronize();
233  }
234  double flop = ((double)m)*n*(2.*k + 2.)*iter;
235  double gflop = flop/pow(10., 9);
236  end = clock();
237  double seconds = sec(end-start);
238  printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
239  cuda_free(a_cl);
240  cuda_free(b_cl);
241  cuda_free(c_cl);
242  free(a);
243  free(b);
244  free(c);
245 }
246 
247 
248 void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
249 {
250  srand(0);
251  float *a;
252  if(!TA) a = random_matrix(m,k);
253  else a = random_matrix(k,m);
254  int lda = (!TA)?k:m;
255  float *b;
256  if(!TB) b = random_matrix(k,n);
257  else b = random_matrix(n,k);
258  int ldb = (!TB)?n:k;
259 
260  float *c = random_matrix(m,n);
261  float *c_gpu = random_matrix(m,n);
262  memset(c, 0, m*n*sizeof(float));
263  memset(c_gpu, 0, m*n*sizeof(float));
264  int i;
265  //pm(m,k,b);
266  gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
267  //printf("GPU\n");
268  //pm(m, n, c_gpu);
269 
270  gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
271  //printf("\n\nCPU\n");
272  //pm(m, n, c);
273  double sse = 0;
274  for(i = 0; i < m*n; ++i) {
275  //printf("%f %f\n", c[i], c_gpu[i]);
276  sse += pow(c[i]-c_gpu[i], 2);
277  }
278  printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
279  free(a);
280  free(b);
281  free(c);
282  free(c_gpu);
283 }
284 
285 int test_gpu_blas()
286 {
287  /*
288  test_gpu_accuracy(0,0,10,576,75);
289 
290  test_gpu_accuracy(0,0,17,10,10);
291  test_gpu_accuracy(1,0,17,10,10);
292  test_gpu_accuracy(0,1,17,10,10);
293  test_gpu_accuracy(1,1,17,10,10);
294 
295  test_gpu_accuracy(0,0,1000,10,100);
296  test_gpu_accuracy(1,0,1000,10,100);
297  test_gpu_accuracy(0,1,1000,10,100);
298  test_gpu_accuracy(1,1,1000,10,100);
299 
300  test_gpu_accuracy(0,0,10,10,10);
301 
302  time_gpu(0,0,64,2916,363);
303  time_gpu(0,0,64,2916,363);
304  time_gpu(0,0,64,2916,363);
305  time_gpu(0,0,192,729,1600);
306  time_gpu(0,0,384,196,1728);
307  time_gpu(0,0,256,196,3456);
308  time_gpu(0,0,256,196,2304);
309  time_gpu(0,0,128,4096,12544);
310  time_gpu(0,0,128,4096,4096);
311  */
312  time_gpu(0,0,64,75,12544);
313  time_gpu(0,0,64,75,12544);
314  time_gpu(0,0,64,75,12544);
315  time_gpu(0,0,64,576,12544);
316  time_gpu(0,0,256,2304,784);
317  time_gpu(1,1,2304,256,784);
318  time_gpu(0,0,512,4608,196);
319  time_gpu(1,1,4608,512,196);
320 
321  return 0;
322 }
323 #endif
324 
void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc)
Definition: gemm.c:126
float * random_matrix(int rows, int cols)
Definition: gemm.c:30
void time_random_matrix(int TA, int TB, int m, int k, int n)
Definition: gemm.c:40
void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc)
Definition: gemm.c:91
void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc)
Definition: gemm.c:109
void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc)
Definition: gemm.c:65
int test_gpu_blas()
void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc)
Definition: gemm.c:8
float sec(clock_t clocks)
Definition: utils.c:232
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc)
Definition: gemm.c:145
void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc)
Definition: gemm.c:74