22 l.
out_c = c/(stride*stride);
26 l.
out_c = c*(stride*stride);
40 fprintf(stderr,
"reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.
out_w, l.
out_h, l.
out_c);
42 int output_size = l.
outputs * batch;
43 l.
output = calloc(output_size,
sizeof(
float));
44 l.
delta = calloc(output_size,
sizeof(
float));
52 l.output_gpu = cuda_make_array(l.
output, output_size);
53 l.delta_gpu = cuda_make_array(l.
delta, output_size);
69 l->
out_c = c/(stride*stride);
73 l->
out_c = c*(stride*stride);
80 l->
output = realloc(l->
output, output_size *
sizeof(
float));
81 l->
delta = realloc(l->
delta, output_size *
sizeof(
float));
84 cuda_free(l->output_gpu);
85 cuda_free(l->delta_gpu);
86 l->output_gpu = cuda_make_array(l->
output, output_size);
87 l->delta_gpu = cuda_make_array(l->
delta, output_size);
101 }
else if (l.
extra) {
102 for(i = 0; i < l.
batch; ++i){
124 }
else if (l.
extra) {
125 for(i = 0; i < l.
batch; ++i){
143 }
else if (l.
extra) {
144 for(i = 0; i < l.
batch; ++i){
162 }
else if (l.
extra) {
164 for(i = 0; i < l.
batch; ++i){
void(* forward_gpu)(struct layer, struct network)
void(* backward_gpu)(struct layer, struct network)
void resize_reorg_layer(layer *l, int w, int h)
void backward_reorg_layer(const layer l, network net)
void(* forward)(struct layer, struct network)
void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
void forward_reorg_layer(const layer l, network net)
void flatten(float *x, int size, int layers, int batch, int forward)
void(* backward)(struct layer, struct network)
void copy_gpu(int N, float *X, int INCX, float *Y, int INCY)
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)