1 #include "cuda_runtime.h" 14 int offset = blockIdx.x * blockDim.x + threadIdx.x;
15 int filter = blockIdx.y;
16 int batch = blockIdx.z;
18 if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
21 void scale_bias_gpu(
float *output,
float *biases,
int batch,
int n,
int size)
23 dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
24 dim3 dimBlock(BLOCK, 1, 1);
26 scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
27 check_error(cudaPeekAtLastError());
30 __global__
void backward_scale_kernel(
float *x_norm,
float *delta,
int batch,
int n,
int size,
float *scale_updates)
32 __shared__
float part[BLOCK];
34 int filter = blockIdx.x;
37 for(b = 0; b < batch; ++b){
38 for(i = 0; i < size; i += BLOCK){
39 int index = p + i + size*(filter + n*b);
40 sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
46 for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
50 void backward_scale_gpu(
float *x_norm,
float *delta,
int batch,
int n,
int size,
float *scale_updates)
52 backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates);
53 check_error(cudaPeekAtLastError());
56 __global__
void add_bias_kernel(
float *output,
float *biases,
int batch,
int n,
int size)
58 int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
59 if (index >= n*size*batch)
return;
66 output[(k*n+j)*size + i] += biases[j];
69 void add_bias_gpu(
float *output,
float *biases,
int batch,
int n,
int size)
71 int num = n*size*batch;
73 add_bias_kernel<<<cuda_gridsize(num), BLOCK>>>(output, biases, batch, n, size);
74 check_error(cudaPeekAtLastError());
79 int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
80 if (index >= n)
return;
83 for(b = 0; b < batch; ++b){
87 bias_updates[index] += sum;
92 __shared__
float part[BLOCK];
94 int filter = blockIdx.x;
97 for(b = 0; b < batch; ++b){
98 for(i = 0; i < size; i += BLOCK){
99 int index = p + i + size*(filter + n*b);
100 sum += (p+i < size) ? delta[index] : 0;
106 for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
113 backward_bias_conn_kernel<<<cuda_gridsize(n), BLOCK>>>(bias_updates, delta, batch, n);
115 backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size);
117 check_error(cudaPeekAtLastError());
163 __global__
void adam_kernel(
int N,
float *x,
float *m,
float *v,
float B1,
float B2,
float rate,
float eps,
int t)
165 int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
166 if (index >= N)
return;
168 float mhat = m[index] / (1.f - powf(B1, t));
169 float vhat = v[index] / (1.f - powf(B2, t));
171 x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
174 extern "C" void adam_gpu(
int n,
float *x,
float *m,
float *v,
float B1,
float B2,
float rate,
float eps,
int t)
176 adam_kernel<<<cuda_gridsize(n), BLOCK>>>(n, x, m, v, B1, B2, rate, eps,
t);
177 check_error(cudaPeekAtLastError());
180 extern "C" void adam_update_gpu(
float *w,
float *d,
float *m,
float *v,
float B1,
float B2,
float eps,
float decay,
float rate,
int n,
int batch,
int t)
184 axpy_gpu(n, -decay*batch, w, 1, d, 1);
190 adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
194 __global__
void normalize_kernel(
int N,
float *x,
float *mean,
float *variance,
int batch,
int filters,
int spatial)
196 int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
197 if (index >= N)
return;
198 int f = (index/spatial)%filters;
200 x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
203 __global__
void normalize_delta_kernel(
int N,
float *x,
float *mean,
float *variance,
float *mean_delta,
float *variance_delta,
int batch,
int filters,
int spatial,
float *delta)
205 int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
206 if (index >= N)
return;
207 int f = (index/spatial)%filters;
209 delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
212 extern "C" void normalize_delta_gpu(
float *x,
float *mean,
float *variance,
float *mean_delta,
float *variance_delta,
int batch,
int filters,
int spatial,
float *delta)
214 size_t N = batch*filters*spatial;
215 normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
216 check_error(cudaPeekAtLastError());
219 __global__
void variance_delta_kernel(
float *x,
float *delta,
float *mean,
float *variance,
int batch,
int filters,
int spatial,
float *variance_delta)
221 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
222 if (i >= filters)
return;
224 variance_delta[i] = 0;
225 for(j = 0; j < batch; ++j){
226 for(k = 0; k < spatial; ++k){
227 int index = j*filters*spatial + i*spatial + k;
228 variance_delta[i] += delta[index]*(x[index] - mean[i]);
231 variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (
float)(-3.f/2.f));
237 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
238 if (i >= groups)
return;
240 for(k = 0; k < n; ++k){
241 sum[i] += x[k*groups + i];
245 __global__
void fast_mean_delta_kernel(
float *delta,
float *variance,
int batch,
int filters,
int spatial,
float *mean_delta)
247 const int threads = BLOCK;
248 __shared__
float local[threads];
250 int id = threadIdx.x;
253 int filter = blockIdx.x;
256 for(j = 0; j < batch; ++j){
257 for(i = 0; i < spatial; i += threads){
258 int index = j*spatial*filters + filter*spatial + i + id;
259 local[id] += (i+
id < spatial) ? delta[index] : 0;
266 mean_delta[filter] = 0;
267 for(i = 0; i < threads; ++i){
268 mean_delta[filter] += local[i];
270 mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
274 __global__
void fast_variance_delta_kernel(
float *x,
float *delta,
float *mean,
float *variance,
int batch,
int filters,
int spatial,
float *variance_delta)
276 const int threads = BLOCK;
277 __shared__
float local[threads];
279 int id = threadIdx.x;
282 int filter = blockIdx.x;
285 for(j = 0; j < batch; ++j){
286 for(i = 0; i < spatial; i += threads){
287 int index = j*spatial*filters + filter*spatial + i + id;
289 local[id] += (i+
id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
296 variance_delta[filter] = 0;
297 for(i = 0; i < threads; ++i){
298 variance_delta[filter] += local[i];
300 variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (
float)(-3.f/2.f));
305 __global__
void mean_delta_kernel(
float *delta,
float *variance,
int batch,
int filters,
int spatial,
float *mean_delta)
307 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
308 if (i >= filters)
return;
311 for (j = 0; j < batch; ++j) {
312 for (k = 0; k < spatial; ++k) {
313 int index = j*filters*spatial + i*spatial + k;
314 mean_delta[i] += delta[index];
317 mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
320 extern "C" void mean_delta_gpu(
float *delta,
float *variance,
int batch,
int filters,
int spatial,
float *mean_delta)
322 mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
323 check_error(cudaPeekAtLastError());
326 extern "C" void fast_mean_delta_gpu(
float *delta,
float *variance,
int batch,
int filters,
int spatial,
float *mean_delta)
328 fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
329 check_error(cudaPeekAtLastError());
332 extern "C" void fast_variance_delta_gpu(
float *x,
float *delta,
float *mean,
float *variance,
int batch,
int filters,
int spatial,
float *variance_delta)
334 fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
335 check_error(cudaPeekAtLastError());
338 __global__
void mean_kernel(
float *x,
int batch,
int filters,
int spatial,
float *mean)
340 float scale = 1.f/(batch * spatial);
341 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
342 if (i >= filters)
return;
345 for(j = 0; j < batch; ++j){
346 for(k = 0; k < spatial; ++k){
347 int index = j*filters*spatial + i*spatial + k;
354 __global__
void variance_kernel(
float *x,
float *mean,
int batch,
int filters,
int spatial,
float *variance)
356 float scale = 1.f/(batch * spatial - 1);
358 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
359 if (i >= filters)
return;
361 for(j = 0; j < batch; ++j){
362 for(k = 0; k < spatial; ++k){
363 int index = j*filters*spatial + i*spatial + k;
364 variance[i] += powf((x[index] - mean[i]), 2);
367 variance[i] *= scale;
370 __global__
void reorg_kernel(
int N,
float *x,
int w,
int h,
int c,
int batch,
int stride,
int forward,
float *out)
372 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
383 int out_c = c/(stride*stride);
385 int c2 = in_c % out_c;
386 int offset = in_c / out_c;
387 int w2 = in_w*stride + offset % stride;
388 int h2 = in_h*stride + offset / stride;
390 int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
396 if(forward) out[out_index] = x[in_index];
397 else out[in_index] = x[out_index];
402 __global__
void axpy_kernel(
int N,
float ALPHA,
float *X,
int OFFX,
int INCX,
float *Y,
int OFFY,
int INCY)
404 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
405 if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
408 __global__
void pow_kernel(
int N,
float ALPHA,
float *X,
int INCX,
float *Y,
int INCY)
410 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
411 if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
416 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
417 if(i < N) X[i*INCX] = ALPHA;
422 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
423 if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
426 __global__
void supp_kernel(
int N,
float ALPHA,
float *X,
int INCX)
428 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
430 if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
434 __global__
void add_kernel(
int N,
float ALPHA,
float *X,
int INCX)
436 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
437 if(i < N) X[i*INCX] += ALPHA;
440 __global__
void scal_kernel(
int N,
float ALPHA,
float *X,
int INCX)
442 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
443 if(i < N) X[i*INCX] *= ALPHA;
446 __global__
void fill_kernel(
int N,
float ALPHA,
float *X,
int INCX)
448 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
449 if(i < N) X[i*INCX] = ALPHA;
452 __global__
void copy_kernel(
int N,
float *X,
int OFFX,
int INCX,
float *Y,
int OFFY,
int INCY)
454 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
455 if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
458 __global__
void mul_kernel(
int N,
float *X,
int INCX,
float *Y,
int INCY)
460 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
461 if(i < N) Y[i*INCY] *= X[i*INCX];
465 extern "C" void normalize_gpu(
float *x,
float *mean,
float *variance,
int batch,
int filters,
int spatial)
467 size_t N = batch*filters*spatial;
468 normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial);
469 check_error(cudaPeekAtLastError());
472 __global__
void l2norm_kernel(
int N,
float *x,
float *dx,
int batch,
int filters,
int spatial)
474 int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
475 if (index >= N)
return;
476 int b = index / spatial;
477 int i = index % spatial;
480 for(f = 0; f < filters; ++f){
481 int index = b*filters*spatial + f*spatial + i;
482 sum += powf(x[index], 2);
485 if(sum == 0) sum = 1;
487 for(f = 0; f < filters; ++f){
488 int index = b*filters*spatial + f*spatial + i;
490 dx[index] = (1 - x[index]) / sum;
494 extern "C" void l2normalize_gpu(
float *x,
float *dx,
int batch,
int filters,
int spatial)
496 size_t N = batch*spatial;
497 l2norm_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, dx, batch, filters, spatial);
498 check_error(cudaPeekAtLastError());
501 __global__
void fast_mean_kernel(
float *x,
int batch,
int filters,
int spatial,
float *mean)
503 const int threads = BLOCK;
504 __shared__
float local[threads];
506 int id = threadIdx.x;
509 int filter = blockIdx.x;
512 for(j = 0; j < batch; ++j){
513 for(i = 0; i < spatial; i += threads){
514 int index = j*spatial*filters + filter*spatial + i + id;
515 local[id] += (i+
id < spatial) ? x[index] : 0;
523 for(i = 0; i < threads; ++i){
524 mean[filter] += local[i];
526 mean[filter] /= spatial * batch;
530 __global__
void fast_variance_kernel(
float *x,
float *mean,
int batch,
int filters,
int spatial,
float *variance)
532 const int threads = BLOCK;
533 __shared__
float local[threads];
535 int id = threadIdx.x;
538 int filter = blockIdx.x;
541 for(j = 0; j < batch; ++j){
542 for(i = 0; i < spatial; i += threads){
543 int index = j*spatial*filters + filter*spatial + i + id;
545 local[id] += (i+
id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
552 variance[filter] = 0;
553 for(i = 0; i < threads; ++i){
554 variance[filter] += local[i];
556 variance[filter] /= (spatial * batch - 1);
560 extern "C" void fast_mean_gpu(
float *x,
int batch,
int filters,
int spatial,
float *mean)
562 fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean);
563 check_error(cudaPeekAtLastError());
566 extern "C" void fast_variance_gpu(
float *x,
float *mean,
int batch,
int filters,
int spatial,
float *variance)
568 fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance);
569 check_error(cudaPeekAtLastError());
573 extern "C" void mean_gpu(
float *x,
int batch,
int filters,
int spatial,
float *mean)
575 mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean);
576 check_error(cudaPeekAtLastError());
579 extern "C" void variance_gpu(
float *x,
float *mean,
int batch,
int filters,
int spatial,
float *variance)
581 variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance);
582 check_error(cudaPeekAtLastError());
585 extern "C" void axpy_gpu(
int N,
float ALPHA,
float * X,
int INCX,
float * Y,
int INCY)
590 extern "C" void pow_gpu(
int N,
float ALPHA,
float * X,
int INCX,
float * Y,
int INCY)
592 pow_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX, Y, INCY);
593 check_error(cudaPeekAtLastError());
596 extern "C" void axpy_gpu_offset(
int N,
float ALPHA,
float * X,
int OFFX,
int INCX,
float * Y,
int OFFY,
int INCY)
598 axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
599 check_error(cudaPeekAtLastError());
602 extern "C" void copy_gpu(
int N,
float * X,
int INCX,
float * Y,
int INCY)
607 extern "C" void mul_gpu(
int N,
float * X,
int INCX,
float * Y,
int INCY)
609 mul_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, INCX, Y, INCY);
610 check_error(cudaPeekAtLastError());
613 extern "C" void copy_gpu_offset(
int N,
float * X,
int OFFX,
int INCX,
float * Y,
int OFFY,
int INCY)
615 copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY);
616 check_error(cudaPeekAtLastError());
619 __global__
void flatten_kernel(
int N,
float *x,
int spatial,
int layers,
int batch,
int forward,
float *out)
621 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
623 int in_s = i%spatial;
629 int i1 = b*layers*spatial + in_c*spatial + in_s;
630 int i2 = b*layers*spatial + in_s*layers + in_c;
632 if (forward) out[i2] = x[i1];
633 else out[i1] = x[i2];
636 extern "C" void flatten_gpu(
float *x,
int spatial,
int layers,
int batch,
int forward,
float *out)
638 int size = spatial*batch*layers;
639 flatten_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, spatial, layers, batch, forward, out);
640 check_error(cudaPeekAtLastError());
643 extern "C" void reorg_gpu(
float *x,
int w,
int h,
int c,
int batch,
int stride,
int forward,
float *out)
645 int size = w*h*c*batch;
646 reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out);
647 check_error(cudaPeekAtLastError());
650 __global__
void mask_kernel(
int n,
float *x,
float mask_num,
float *mask,
float val)
652 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
653 if(i < n && mask[i] == mask_num) x[i] = val;
656 extern "C" void mask_gpu(
int N,
float * X,
float mask_num,
float * mask,
float val)
658 mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, val);
659 check_error(cudaPeekAtLastError());
664 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
665 if(i < n && mask[i] == mask_num) x[i] *= scale;
668 extern "C" void scale_mask_gpu(
int N,
float * X,
float mask_num,
float * mask,
float scale)
670 scale_mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask, scale);
671 check_error(cudaPeekAtLastError());
674 extern "C" void const_gpu(
int N,
float ALPHA,
float * X,
int INCX)
676 const_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
677 check_error(cudaPeekAtLastError());
682 constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
683 check_error(cudaPeekAtLastError());
687 extern "C" void add_gpu(
int N,
float ALPHA,
float * X,
int INCX)
689 add_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
690 check_error(cudaPeekAtLastError());
693 extern "C" void scal_gpu(
int N,
float ALPHA,
float * X,
int INCX)
695 scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
696 check_error(cudaPeekAtLastError());
699 extern "C" void supp_gpu(
int N,
float ALPHA,
float * X,
int INCX)
701 supp_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
702 check_error(cudaPeekAtLastError());
705 extern "C" void fill_gpu(
int N,
float ALPHA,
float * X,
int INCX)
707 fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
708 check_error(cudaPeekAtLastError());
711 __global__
void shortcut_kernel(
int size,
int minw,
int minh,
int minc,
int stride,
int sample,
int batch,
int w1,
int h1,
int c1,
float *add,
int w2,
int h2,
int c2,
float s1,
float s2,
float *out)
713 int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
714 if (
id >= size)
return;
723 int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
724 int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
725 out[out_index] = s1*out[out_index] + s2*add[add_index];
729 extern "C" void shortcut_gpu(
int batch,
int w1,
int h1,
int c1,
float *add,
int w2,
int h2,
int c2,
float s1,
float s2,
float *out)
731 int minw = (w1 < w2) ? w1 : w2;
732 int minh = (h1 < h2) ? h1 : h2;
733 int minc = (c1 < c2) ? c1 : c2;
737 assert(stride == h1/h2);
738 assert(sample == h2/h1);
739 if(stride < 1) stride = 1;
740 if(sample < 1) sample = 1;
742 int size = batch * minw * minh * minc;
743 shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride,
sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out);
744 check_error(cudaPeekAtLastError());
749 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
751 float diff = truth[i] - pred[i];
752 float abs_val = fabsf(diff);
754 error[i] = diff * diff;
758 error[i] = 2*abs_val - 1;
759 delta[i] = (diff > 0) ? 1 : -1;
766 smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta,
error);
767 check_error(cudaPeekAtLastError());
772 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
776 error[i] = (
t) ? -log(p) : 0;
783 softmax_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta,
error);
784 check_error(cudaPeekAtLastError());
789 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
793 error[i] = -t*log(p+.0000001) - (1-
t)*log(1-p+.0000001);
800 logistic_x_ent_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta,
error);
801 check_error(cudaPeekAtLastError());
804 __global__
void l2_kernel(
int n,
float *pred,
float *truth,
float *delta,
float *
error)
806 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
808 float diff = truth[i] - pred[i];
809 error[i] = diff * diff;
814 extern "C" void l2_gpu(
int n,
float *pred,
float *truth,
float *delta,
float *
error)
816 l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta,
error);
817 check_error(cudaPeekAtLastError());
820 __global__
void l1_kernel(
int n,
float *pred,
float *truth,
float *delta,
float *
error)
822 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
824 float diff = truth[i] - pred[i];
825 error[i] = abs(diff);
826 delta[i] = (diff > 0) ? 1 : -1;
830 extern "C" void l1_gpu(
int n,
float *pred,
float *truth,
float *delta,
float *
error)
832 l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta,
error);
833 check_error(cudaPeekAtLastError());
838 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
840 error[i] = truth[i] ? -pred[i] : pred[i];
841 delta[i] = (truth[i] > 0) ? 1 : -1;
845 extern "C" void wgan_gpu(
int n,
float *pred,
float *truth,
float *delta,
float *
error)
847 wgan_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta,
error);
848 check_error(cudaPeekAtLastError());
856 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
858 c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
862 __global__
void deinter_kernel(
int NX,
float *X,
int NY,
float *Y,
int B,
float *OUT)
864 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
869 if(X) X[b*NX + j] += OUT[i];
871 if(Y) Y[b*NY + j - NX] += OUT[i];
876 extern "C" void deinter_gpu(
int NX,
float *X,
int NY,
float *Y,
int B,
float *OUT)
878 deinter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
879 check_error(cudaPeekAtLastError());
882 __global__
void inter_kernel(
int NX,
float *X,
int NY,
float *Y,
int B,
float *OUT)
884 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
889 OUT[i] = X[b*NX + j];
891 OUT[i] = Y[b*NY + j - NX];
896 extern "C" void inter_gpu(
int NX,
float *X,
int NY,
float *Y,
int B,
float *OUT)
898 inter_kernel<<<cuda_gridsize((NX+NY)*B), BLOCK>>>(NX, X, NY, Y, B, OUT);
899 check_error(cudaPeekAtLastError());
904 weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c);
905 check_error(cudaPeekAtLastError());
908 __global__
void weighted_delta_kernel(
int n,
float *a,
float *b,
float *s,
float *da,
float *db,
float *ds,
float *dc)
910 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
912 if(da) da[i] += dc[i] * s[i];
913 if(db) db[i] += dc[i] * (1-s[i]);
914 ds[i] += dc[i] * (a[i] - b[i]);
918 extern "C" void weighted_delta_gpu(
float *a,
float *b,
float *s,
float *da,
float *db,
float *ds,
int num,
float *dc)
920 weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc);
921 check_error(cudaPeekAtLastError());
926 int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
934 mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c);
935 check_error(cudaPeekAtLastError());
939 __device__
void softmax_device(
float *input,
int n,
float temp,
int stride,
float *output)
943 float largest = -INFINITY;
944 for(i = 0; i < n; ++i){
945 int val = input[i*stride];
946 largest = (val>largest) ? val : largest;
948 for(i = 0; i < n; ++i){
949 float e = expf(input[i*stride]/temp - largest/temp);
951 output[i*stride] = e;
953 for(i = 0; i < n; ++i){
954 output[i*stride] /= sum;
959 __global__
void softmax_tree_kernel(
float *input,
int spatial,
int batch,
int stride,
float temp,
float *output,
int groups,
int *group_size,
int *group_offset)
961 int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
962 if (
id >= spatial*batch*groups)
return;
963 int s =
id % spatial;
967 int goff = group_offset[g]*spatial;
969 softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
972 extern "C" void softmax_tree(
float *input,
int spatial,
int batch,
int stride,
float temp,
float *output,
tree hier)
984 int num = spatial*batch*hier.
groups;
985 softmax_tree_kernel<<<cuda_gridsize(num), BLOCK>>>(input, spatial, batch, stride, temp, output, hier.
groups, tree_groups_size, tree_groups_offset);
986 check_error(cudaPeekAtLastError());
987 cuda_free((
float *)tree_groups_size);
988 cuda_free((
float *)tree_groups_offset);
991 __global__
void softmax_kernel(
float *input,
int n,
int batch,
int batch_offset,
int groups,
int group_offset,
int stride,
float temp,
float *output)
993 int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
994 if (
id >= batch*groups)
return;
997 softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
1000 extern "C" void softmax_gpu(
float *input,
int n,
int batch,
int batch_offset,
int groups,
int group_offset,
int stride,
float temp,
float *output)
1002 softmax_kernel<<<cuda_gridsize(batch*groups), BLOCK>>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
1003 check_error(cudaPeekAtLastError());
1007 __global__
void upsample_kernel(
size_t N,
float *x,
int w,
int h,
int c,
int batch,
int stride,
int forward,
float scale,
float *out)
1009 size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
1012 int out_w = i%(w*stride);
1014 int out_h = i%(h*stride);
1020 int in_w = out_w / stride;
1021 int in_h = out_h / stride;
1024 int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
1027 if(forward) out[out_index] += scale * x[in_index];
1028 else atomicAdd(x+in_index, scale * out[out_index]);
1030 extern "C" void upsample_gpu(
float *in,
int w,
int h,
int c,
int batch,
int stride,
int forward,
float scale,
float *out)
1032 size_t size = w*h*c*batch*stride*stride;
1033 upsample_kernel<<<cuda_gridsize(size), BLOCK>>>(size, in, w, h, c, batch, stride, forward, scale, out);
1034 check_error(cudaPeekAtLastError());
__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
__global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial)
__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
void axpy_gpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
void supp_gpu(int N, float ALPHA, float *X, int INCX)
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
void mul_gpu(int N, float *X, int INCX, float *Y, int INCY)
void fill_gpu(int N, float ALPHA, float *X, int INCX)
__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
void copy_gpu_offset(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
void axpy_gpu_offset(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
void scale_mask_gpu(int N, float *X, float mask_num, float *mask, float scale)
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
__global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
__global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error)
void scal_gpu(int N, float ALPHA, float *X, int INCX)
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
void mult_add_into_gpu(int num, float *a, float *b, float *c)
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
void add_gpu(int N, float ALPHA, float *X, int INCX)
__global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
void l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
void copy_gpu(int N, float *X, int INCX, float *Y, int INCY)
__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
void mask_gpu(int N, float *X, float mask_num, float *mask, float val)
void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
__global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
void constrain_gpu(int N, float ALPHA, float *X, int INCX)
void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
__global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val)
void const_gpu(int N, float ALPHA, float *X, int INCX)
void pow_gpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
__global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial)
void error(const char *s)
void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)