11 fprintf(stderr,
"res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c);
53 cuda_free(l->output_gpu);
54 cuda_free(l->delta_gpu);
65 shortcut_cpu(l.
batch, l.
w, l.
h, l.
c, net.
layers[l.
index].
output, l.
out_w, l.
out_h, l.
out_c, l.
alpha, l.
beta, l.
output);
73 shortcut_cpu(l.
batch, l.
out_w, l.
out_h, l.
out_c, l.
delta, l.
w, l.
h, l.
c, 1, l.
beta, net.
layers[l.
index].
delta);
80 shortcut_gpu(l.
batch, l.
w, l.
h, l.
c, net.
layers[l.
index].output_gpu, l.
out_w, l.
out_h, l.
out_c, l.
alpha, l.
beta, l.output_gpu);
84 void backward_shortcut_layer_gpu(
const layer l,
network net)
88 shortcut_gpu(l.
batch, l.
out_w, l.
out_h, l.
out_c, l.delta_gpu, l.
w, l.
h, l.
c, 1, l.
beta, net.
layers[l.
index].delta_gpu);
void backward_shortcut_layer(const layer l, network net)
void resize_shortcut_layer(layer *l, int w, int h)
void(* forward_gpu)(struct layer, struct network)
void(* backward_gpu)(struct layer, struct network)
void axpy_gpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
void(* forward)(struct layer, struct network)
void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta)
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
void(* backward)(struct layer, struct network)
void copy_gpu(int N, float *X, int INCX, float *Y, int INCY)
void forward_shortcut_layer(const layer l, network net)
void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
void activate_array(float *x, const int n, const ACTIVATION a)
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
void activate_array_gpu(float *x, int n, ACTIVATION a)
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta)