darknet  v3
blas_kernels.cu
Go to the documentation of this file.
1 #include "cuda_runtime.h"
2 #include "curand.h"
3 #include "cublas_v2.h"
4 #include <assert.h>
5 
6 extern "C" {
7 #include "blas.h"
8 #include "cuda.h"
9 #include "utils.h"
10 }
11 
12 __global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
13 {
14  int offset = blockIdx.x * blockDim.x + threadIdx.x;
15  int filter = blockIdx.y;
16  int batch = blockIdx.z;
17 
18  if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
19 }
20 
21 void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
22 {
23  dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
24  dim3 dimBlock(BLOCK, 1, 1);
25 
26  scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
27  check_error(cudaPeekAtLastError());
28 }
29 
30 __global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
31 {
32  __shared__ float part[BLOCK];
33  int i,b;
34  int filter = blockIdx.x;
35  int p = threadIdx.x;
36  float sum = 0;
37  for(b = 0; b < batch; ++b){
38  for(i = 0; i < size; i += BLOCK){
39  int index = p + i + size*(filter + n*b);
40  sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
41  }
42  }
43  part[p] = sum;
44  __syncthreads();
45  if (p == 0) {
46  for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
47  }
48 }
49 
50 void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
51 {
52  backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates);
53  check_error(cudaPeekAtLastError());
54 }
55 
56 __global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
57 {
58  int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
59  if (index >= n*size*batch) return;
60  int i = index % size;
61  index /= size;
62  int j = index % n;
63  index /= n;
64  int k = index;
65 
66  output[(k*n+j)*size + i] += biases[j];
67 }
68 
69 void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
70 {
71  int num = n*size*batch;
72 
73  add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size);
74  check_error(cudaPeekAtLastError());
75 }
76 
77 __global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
78 {
79  int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
80  if (index >= n) return;
81  int b;
82  float sum = 0;
83  for(b = 0; b < batch; ++b){
84  int i = b*n + index;
85  sum += delta[i];
86  }
87  bias_updates[index] += sum;
88 }
89 
90 __global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
91 {
92  __shared__ float part[BLOCK];
93  int i,b;
94  int filter = blockIdx.x;
95  int p = threadIdx.x;
96  float sum = 0;
97  for(b = 0; b < batch; ++b){
98  for(i = 0; i < size; i += BLOCK){
99  int index = p + i + size*(filter + n*b);
100  sum += (p+i < size) ? delta[index] : 0;
101  }
102  }
103  part[p] = sum;
104  __syncthreads();
105  if (p == 0) {
106  for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
107  }
108 }
109 
110 void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
111 {
112  if(size == 1){
113  backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n);
114  }else{
115  backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size);
116  }
117  check_error(cudaPeekAtLastError());
118 }
119 
120 /*
121 __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
122 {
123  int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
124  int f1 = index / n;
125  int f2 = index % n;
126  if (f2 <= f1) return;
127 
128  float sum = 0;
129  float norm1 = 0;
130  float norm2 = 0;
131  int b, i;
132  for(b = 0; b < batch; ++b){
133  for(i = 0; i < size; ++i){
134  int i1 = b * size * n + f1 * size + i;
135  int i2 = b * size * n + f2 * size + i;
136  sum += output[i1] * output[i2];
137  norm1 += output[i1] * output[i1];
138  norm2 += output[i2] * output[i2];
139  }
140  }
141  norm1 = sqrt(norm1);
142  norm2 = sqrt(norm2);
143  float norm = norm1 * norm2;
144  sum = sum / norm;
145  for(b = 0; b < batch; ++b){
146  for(i = 0; i < size; ++i){
147  int i1 = b * size * n + f1 * size + i;
148  int i2 = b * size * n + f2 * size + i;
149  delta[i1] += - scale * sum * output[i2] / norm;
150  delta[i2] += - scale * sum * output[i1] / norm;
151  }
152  }
153 }
154 
155 void dot_error_gpu(layer l)
156 {
157  dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
158  check_error(cudaPeekAtLastError());
159 }
160 */
161 
162 
163 __global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
164 {
165  int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
166  if (index >= N) return;
167 
168  float mhat = m[index] / (1.f - powf(B1, t));
169  float vhat = v[index] / (1.f - powf(B2, t));
170 
171  x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
172 }
173 
174 extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
175 {
176  adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps, t);
177  check_error(cudaPeekAtLastError());
178 }
179 
180 extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
181 {
182  scal_gpu(n, B1, m, 1);
183  scal_gpu(n, B2, v, 1);
184  axpy_gpu(n, -decay*batch, w, 1, d, 1);
185 
186  axpy_gpu(n, (1-B1), d, 1, m, 1);
187  mul_gpu(n, d, 1, d, 1);
188  axpy_gpu(n, (1-B2), d, 1, v, 1);
189 
190  adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
191  fill_gpu(n, 0, d, 1);
192 }
193 
194 __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
195 {
196  int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
197  if (index >= N) return;
198  int f = (index/spatial)%filters;
199 
200  x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
201 }
202 
203 __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
204 {
205  int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
206  if (index >= N) return;
207  int f = (index/spatial)%filters;
208 
209  delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
210 }
211 
212 extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
213 {
214  size_t N = batch*filters*spatial;
215  normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
216  check_error(cudaPeekAtLastError());
217 }
218 
219 __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
220 {
221  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
222  if (i >= filters) return;
223  int j,k;
224  variance_delta[i] = 0;
225  for(j = 0; j < batch; ++j){
226  for(k = 0; k < spatial; ++k){
227  int index = j*filters*spatial + i*spatial + k;
228  variance_delta[i] += delta[index]*(x[index] - mean[i]);
229  }
230  }
231  variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
232 }
233 
234 __global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
235 {
236  int k;
237  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
238  if (i >= groups) return;
239  sum[i] = 0;
240  for(k = 0; k < n; ++k){
241  sum[i] += x[k*groups + i];
242  }
243 }
244 
245 __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
246 {
247  const int threads = BLOCK;
248  __shared__ float local[threads];
249 
250  int id = threadIdx.x;
251  local[id] = 0;
252 
253  int filter = blockIdx.x;
254 
255  int i, j;
256  for(j = 0; j < batch; ++j){
257  for(i = 0; i < spatial; i += threads){
258  int index = j*spatial*filters + filter*spatial + i + id;
259  local[id] += (i+id < spatial) ? delta[index] : 0;
260  }
261  }
262 
263  __syncthreads();
264 
265  if(id == 0){
266  mean_delta[filter] = 0;
267  for(i = 0; i < threads; ++i){
268  mean_delta[filter] += local[i];
269  }
270  mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
271  }
272 }
273 
274 __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
275 {
276  const int threads = BLOCK;
277  __shared__ float local[threads];
278 
279  int id = threadIdx.x;
280  local[id] = 0;
281 
282  int filter = blockIdx.x;
283 
284  int i, j;
285  for(j = 0; j < batch; ++j){
286  for(i = 0; i < spatial; i += threads){
287  int index = j*spatial*filters + filter*spatial + i + id;
288 
289  local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
290  }
291  }
292 
293  __syncthreads();
294 
295  if(id == 0){
296  variance_delta[filter] = 0;
297  for(i = 0; i < threads; ++i){
298  variance_delta[filter] += local[i];
299  }
300  variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
301  }
302 }
303 
304 
305 __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
306 {
307  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
308  if (i >= filters) return;
309  int j,k;
310  mean_delta[i] = 0;
311  for (j = 0; j < batch; ++j) {
312  for (k = 0; k < spatial; ++k) {
313  int index = j*filters*spatial + i*spatial + k;
314  mean_delta[i] += delta[index];
315  }
316  }
317  mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
318 }
319 
320 extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
321 {
322  mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
323  check_error(cudaPeekAtLastError());
324 }
325 
326 extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
327 {
328  fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
329  check_error(cudaPeekAtLastError());
330 }
331 
332 extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
333 {
334  fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
335  check_error(cudaPeekAtLastError());
336 }
337 
338 __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
339 {
340  float scale = 1.f/(batch * spatial);
341  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
342  if (i >= filters) return;
343  int j,k;
344  mean[i] = 0;
345  for(j = 0; j < batch; ++j){
346  for(k = 0; k < spatial; ++k){
347  int index = j*filters*spatial + i*spatial + k;
348  mean[i] += x[index];
349  }
350  }
351  mean[i] *= scale;
352 }
353 
354 __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
355 {
356  float scale = 1.f/(batch * spatial - 1);
357  int j,k;
358  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
359  if (i >= filters) return;
360  variance[i] = 0;
361  for(j = 0; j < batch; ++j){
362  for(k = 0; k < spatial; ++k){
363  int index = j*filters*spatial + i*spatial + k;
364  variance[i] += powf((x[index] - mean[i]), 2);
365  }
366  }
367  variance[i] *= scale;
368 }
369 
370 __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
371 {
372  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
373  if(i >= N) return;
374  int in_index = i;
375  int in_w = i%w;
376  i = i/w;
377  int in_h = i%h;
378  i = i/h;
379  int in_c = i%c;
380  i = i/c;
381  int b = i%batch;
382 
383  int out_c = c/(stride*stride);
384 
385  int c2 = in_c % out_c;
386  int offset = in_c / out_c;
387  int w2 = in_w*stride + offset % stride;
388  int h2 = in_h*stride + offset / stride;
389  //printf("%d\n", offset);
390  int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
391 
392  // printf("%d %d %d\n", w2, h2, c2);
393  //printf("%d %d\n", in_index, out_index);
394  //if(out_index >= N || out_index < 0) printf("bad bad bad \n");
395 
396  if(forward) out[out_index] = x[in_index];
397  else out[in_index] = x[out_index];
398  //if(forward) out[1] = x[1];
399  //else out[0] = x[0];
400 }
401 
402 __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
403 {
404  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
405  if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
406 }
407 
408 __global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
409 {
410  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
411  if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
412 }
413 
414 __global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
415 {
416  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
417  if(i < N) X[i*INCX] = ALPHA;
418 }
419 
420 __global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
421 {
422  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
423  if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
424 }
425 
426 __global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
427 {
428  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
429  if(i < N) {
430  if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
431  }
432 }
433 
434 __global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
435 {
436  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
437  if(i < N) X[i*INCX] += ALPHA;
438 }
439 
440 __global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
441 {
442  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
443  if(i < N) X[i*INCX] *= ALPHA;
444 }
445 
446 __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
447 {
448  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
449  if(i < N) X[i*INCX] = ALPHA;
450 }
451 
452 __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
453 {
454  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
455  if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
456 }
457 
458 __global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
459 {
460  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
461  if(i < N) Y[i*INCY] *= X[i*INCX];
462 }
463 
464 
465 extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
466 {
467  size_t N = batch*filters*spatial;
468  normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial);
469  check_error(cudaPeekAtLastError());
470 }
471 
472 __global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial)
473 {
474  int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
475  if (index >= N) return;
476  int b = index / spatial;
477  int i = index % spatial;
478  int f;
479  float sum = 0;
480  for(f = 0; f < filters; ++f){
481  int index = b*filters*spatial + f*spatial + i;
482  sum += powf(x[index], 2);
483  }
484  sum = sqrtf(sum);
485  if(sum == 0) sum = 1;
486  //printf("%f\n", sum);
487  for(f = 0; f < filters; ++f){
488  int index = b*filters*spatial + f*spatial + i;
489  x[index] /= sum;
490  dx[index] = (1 - x[index]) / sum;
491  }
492 }
493 
494 extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial)
495 {
496  size_t N = batch*spatial;
497  l2norm_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, dx, batch, filters, spatial);
498  check_error(cudaPeekAtLastError());
499 }
500 
501 __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
502 {
503  const int threads = BLOCK;
504  __shared__ float local[threads];
505 
506  int id = threadIdx.x;
507  local[id] = 0;
508 
509  int filter = blockIdx.x;
510 
511  int i, j;
512  for(j = 0; j < batch; ++j){
513  for(i = 0; i < spatial; i += threads){
514  int index = j*spatial*filters + filter*spatial + i + id;
515  local[id] += (i+id < spatial) ? x[index] : 0;
516  }
517  }
518 
519  __syncthreads();
520 
521  if(id == 0){
522  mean[filter] = 0;
523  for(i = 0; i < threads; ++i){
524  mean[filter] += local[i];
525  }
526  mean[filter] /= spatial * batch;
527  }
528 }
529 
530 __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
531 {
532  const int threads = BLOCK;
533  __shared__ float local[threads];
534 
535  int id = threadIdx.x;
536  local[id] = 0;
537 
538  int filter = blockIdx.x;
539 
540  int i, j;
541  for(j = 0; j < batch; ++j){
542  for(i = 0; i < spatial; i += threads){
543  int index = j*spatial*filters + filter*spatial + i + id;
544 
545  local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
546  }
547  }
548 
549  __syncthreads();
550 
551  if(id == 0){
552  variance[filter] = 0;
553  for(i = 0; i < threads; ++i){
554  variance[filter] += local[i];
555  }
556  variance[filter] /= (spatial * batch - 1);
557  }
558 }
559 
560 extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
561 {
562  fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean);
563  check_error(cudaPeekAtLastError());
564 }
565 
566 extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
567 {
568  fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance);
569  check_error(cudaPeekAtLastError());
570 }
571 
572 
573 extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
574 {
575  mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean);
576  check_error(cudaPeekAtLastError());
577 }
578 
579 extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
580 {
581  variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance);
582  check_error(cudaPeekAtLastError());
583 }
584 
585 extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
586 {
587  axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
588 }
589 
590 extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
591 {
592  pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY);
593  check_error(cudaPeekAtLastError());
594 }
595 
596 extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
597 {
598  axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
599  check_error(cudaPeekAtLastError());
600 }
601 
602 extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY)
603 {
604  copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
605 }
606 
607 extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY)
608 {
609  mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY);
610  check_error(cudaPeekAtLastError());
611 }
612 
613 extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
614 {
615  copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY);
616  check_error(cudaPeekAtLastError());
617 }
618 
619 __global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
620 {
621  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
622  if(i >= N) return;
623  int in_s = i%spatial;
624  i = i/spatial;
625  int in_c = i%layers;
626  i = i/layers;
627  int b = i;
628 
629  int i1 = b*layers*spatial + in_c*spatial + in_s;
630  int i2 = b*layers*spatial + in_s*layers + in_c;
631 
632  if (forward) out[i2] = x[i1];
633  else out[i1] = x[i2];
634 }
635 
636 extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
637 {
638  int size = spatial*batch*layers;
639  flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out);
640  check_error(cudaPeekAtLastError());
641 }
642 
643 extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
644 {
645  int size = w*h*c*batch;
646  reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out);
647  check_error(cudaPeekAtLastError());
648 }
649 
650 __global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val)
651 {
652  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
653  if(i < n && mask[i] == mask_num) x[i] = val;
654 }
655 
656 extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val)
657 {
658  mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, val);
659  check_error(cudaPeekAtLastError());
660 }
661 
662 __global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
663 {
664  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
665  if(i < n && mask[i] == mask_num) x[i] *= scale;
666 }
667 
668 extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale)
669 {
670  scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale);
671  check_error(cudaPeekAtLastError());
672 }
673 
674 extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX)
675 {
676  const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
677  check_error(cudaPeekAtLastError());
678 }
679 
680 extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX)
681 {
682  constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
683  check_error(cudaPeekAtLastError());
684 }
685 
686 
687 extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX)
688 {
689  add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
690  check_error(cudaPeekAtLastError());
691 }
692 
693 extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX)
694 {
695  scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
696  check_error(cudaPeekAtLastError());
697 }
698 
699 extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX)
700 {
701  supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
702  check_error(cudaPeekAtLastError());
703 }
704 
705 extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX)
706 {
707  fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
708  check_error(cudaPeekAtLastError());
709 }
710 
711 __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
712 {
713  int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
714  if (id >= size) return;
715  int i = id % minw;
716  id /= minw;
717  int j = id % minh;
718  id /= minh;
719  int k = id % minc;
720  id /= minc;
721  int b = id % batch;
722 
723  int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
724  int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
725  out[out_index] = s1*out[out_index] + s2*add[add_index];
726  //out[out_index] += add[add_index];
727 }
728 
729 extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
730 {
731  int minw = (w1 < w2) ? w1 : w2;
732  int minh = (h1 < h2) ? h1 : h2;
733  int minc = (c1 < c2) ? c1 : c2;
734 
735  int stride = w1/w2;
736  int sample = w2/w1;
737  assert(stride == h1/h2);
738  assert(sample == h2/h1);
739  if(stride < 1) stride = 1;
740  if(sample < 1) sample = 1;
741 
742  int size = batch * minw * minh * minc;
743  shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out);
744  check_error(cudaPeekAtLastError());
745 }
746 
747 __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
748 {
749  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
750  if(i < n){
751  float diff = truth[i] - pred[i];
752  float abs_val = fabsf(diff);
753  if(abs_val < 1) {
754  error[i] = diff * diff;
755  delta[i] = diff;
756  }
757  else {
758  error[i] = 2*abs_val - 1;
759  delta[i] = (diff > 0) ? 1 : -1;
760  }
761  }
762 }
763 
764 extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
765 {
766  smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
767  check_error(cudaPeekAtLastError());
768 }
769 
770 __global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
771 {
772  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
773  if(i < n){
774  float t = truth[i];
775  float p = pred[i];
776  error[i] = (t) ? -log(p) : 0;
777  delta[i] = t-p;
778  }
779 }
780 
781 extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
782 {
783  softmax_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
784  check_error(cudaPeekAtLastError());
785 }
786 
787 __global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
788 {
789  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
790  if(i < n){
791  float t = truth[i];
792  float p = pred[i];
793  error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001);
794  delta[i] = t-p;
795  }
796 }
797 
798 extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
799 {
800  logistic_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
801  check_error(cudaPeekAtLastError());
802 }
803 
804 __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
805 {
806  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
807  if(i < n){
808  float diff = truth[i] - pred[i];
809  error[i] = diff * diff; //I know this is technically wrong, deal with it.
810  delta[i] = diff;
811  }
812 }
813 
814 extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
815 {
816  l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
817  check_error(cudaPeekAtLastError());
818 }
819 
820 __global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
821 {
822  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
823  if(i < n){
824  float diff = truth[i] - pred[i];
825  error[i] = abs(diff);
826  delta[i] = (diff > 0) ? 1 : -1;
827  }
828 }
829 
830 extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
831 {
832  l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
833  check_error(cudaPeekAtLastError());
834 }
835 
836 __global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error)
837 {
838  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
839  if(i < n){
840  error[i] = truth[i] ? -pred[i] : pred[i];
841  delta[i] = (truth[i] > 0) ? 1 : -1;
842  }
843 }
844 
845 extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error)
846 {
847  wgan_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
848  check_error(cudaPeekAtLastError());
849 }
850 
851 
852 
853 
854 __global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
855 {
856  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
857  if(i < n){
858  c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
859  }
860 }
861 
862 __global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
863 {
864  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
865  if(i < (NX+NY)*B){
866  int b = i / (NX+NY);
867  int j = i % (NX+NY);
868  if (j < NX){
869  if(X) X[b*NX + j] += OUT[i];
870  } else {
871  if(Y) Y[b*NY + j - NX] += OUT[i];
872  }
873  }
874 }
875 
876 extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
877 {
878  deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
879  check_error(cudaPeekAtLastError());
880 }
881 
882 __global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
883 {
884  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
885  if(i < (NX+NY)*B){
886  int b = i / (NX+NY);
887  int j = i % (NX+NY);
888  if (j < NX){
889  OUT[i] = X[b*NX + j];
890  } else {
891  OUT[i] = Y[b*NY + j - NX];
892  }
893  }
894 }
895 
896 extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
897 {
898  inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
899  check_error(cudaPeekAtLastError());
900 }
901 
902 extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
903 {
904  weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c);
905  check_error(cudaPeekAtLastError());
906 }
907 
908 __global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
909 {
910  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
911  if(i < n){
912  if(da) da[i] += dc[i] * s[i];
913  if(db) db[i] += dc[i] * (1-s[i]);
914  ds[i] += dc[i] * (a[i] - b[i]);
915  }
916 }
917 
918 extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
919 {
920  weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc);
921  check_error(cudaPeekAtLastError());
922 }
923 
924 __global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
925 {
926  int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
927  if(i < n){
928  c[i] += a[i]*b[i];
929  }
930 }
931 
932 extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c)
933 {
934  mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c);
935  check_error(cudaPeekAtLastError());
936 }
937 
938 
939 __device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
940 {
941  int i;
942  float sum = 0;
943  float largest = -INFINITY;
944  for(i = 0; i < n; ++i){
945  int val = input[i*stride];
946  largest = (val>largest) ? val : largest;
947  }
948  for(i = 0; i < n; ++i){
949  float e = expf(input[i*stride]/temp - largest/temp);
950  sum += e;
951  output[i*stride] = e;
952  }
953  for(i = 0; i < n; ++i){
954  output[i*stride] /= sum;
955  }
956 }
957 
958 
959 __global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
960 {
961  int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
962  if (id >= spatial*batch*groups) return;
963  int s = id % spatial;
964  id = id / spatial;
965  int g = id % groups;
966  int b = id / groups;
967  int goff = group_offset[g]*spatial;
968  int boff = b*stride;
969  softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
970 }
971 
972 extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
973 {
974  int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
975  int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
976  /*
977  static int *tree_groups_size = 0;
978  static int *tree_groups_offset = 0;
979  if(!tree_groups_size){
980  tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
981  tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
982  }
983  */
984  int num = spatial*batch*hier.groups;
985  softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
986  check_error(cudaPeekAtLastError());
987  cuda_free((float *)tree_groups_size);
988  cuda_free((float *)tree_groups_offset);
989 }
990 
991 __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
992 {
993  int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
994  if (id >= batch*groups) return;
995  int b = id / groups;
996  int g = id % groups;
997  softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
998 }
999 
1000 extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
1001 {
1002  softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
1003  check_error(cudaPeekAtLastError());
1004 }
1005 
1006 
1007 __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
1008 {
1009  size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
1010  if(i >= N) return;
1011  int out_index = i;
1012  int out_w = i%(w*stride);
1013  i = i/(w*stride);
1014  int out_h = i%(h*stride);
1015  i = i/(h*stride);
1016  int out_c = i%c;
1017  i = i/c;
1018  int b = i%batch;
1019 
1020  int in_w = out_w / stride;
1021  int in_h = out_h / stride;
1022  int in_c = out_c;
1023 
1024  int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
1025 
1026 
1027  if(forward) out[out_index] += scale * x[in_index];
1028  else atomicAdd(x+in_index, scale * out[out_index]);
1029 }
1030 extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
1031 {
1032  size_t size = w*h*c*batch*stride*stride;
1033  upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, in, w, h, c, batch, stride, forward, scale, out);
1034  check_error(cudaPeekAtLastError());
1035 }
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
def sample(probs)
Definition: darknet.py:5
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
Definition: blas_kernels.cu:69
__global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial)
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
void axpy_gpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
Definition: blas_kernels.cu:50
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
int * group_offset
Definition: darknet.h:52
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
void supp_gpu(int N, float ALPHA, float *X, int INCX)
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
void mul_gpu(int N, float *X, int INCX, float *Y, int INCY)
void fill_gpu(int N, float ALPHA, float *X, int INCX)
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
void copy_gpu_offset(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
Definition: darknet.h:42
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
int groups
Definition: darknet.h:50
void axpy_gpu_offset(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
void scale_mask_gpu(int N, float *X, float mask_num, float *mask, float scale)
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
__global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
__global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error)
void scal_gpu(int N, float ALPHA, float *X, int INCX)
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
Definition: blas_kernels.cu:12
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
void mult_add_into_gpu(int num, float *a, float *b, float *c)
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
Definition: blas_kernels.cu:21
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
Definition: blas_kernels.cu:30
void add_gpu(int N, float ALPHA, float *X, int INCX)
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
void l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
int * group_size
Definition: darknet.h:51
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
Definition: blas_kernels.cu:56
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
void copy_gpu(int N, float *X, int INCX, float *Y, int INCY)
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
Definition: blas_kernels.cu:77
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
void mask_gpu(int N, float *X, float mask_num, float *mask, float val)
void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
__global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
void constrain_gpu(int N, float ALPHA, float *X, int INCX)
void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
Definition: blas_kernels.cu:90
void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val)
void const_gpu(int N, float ALPHA, float *X, int INCX)
void pow_gpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
__global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial)
void error(const char *s)
Definition: utils.c:253
void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)