darknet  v3
reorg_layer.c
Go to the documentation of this file.
1 #include "reorg_layer.h"
2 #include "cuda.h"
3 #include "blas.h"
4 
5 #include <stdio.h>
6 
7 
8 layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
9 {
10  layer l = {0};
11  l.type = REORG;
12  l.batch = batch;
13  l.stride = stride;
14  l.extra = extra;
15  l.h = h;
16  l.w = w;
17  l.c = c;
18  l.flatten = flatten;
19  if(reverse){
20  l.out_w = w*stride;
21  l.out_h = h*stride;
22  l.out_c = c/(stride*stride);
23  }else{
24  l.out_w = w/stride;
25  l.out_h = h/stride;
26  l.out_c = c*(stride*stride);
27  }
28  l.reverse = reverse;
29 
30  l.outputs = l.out_h * l.out_w * l.out_c;
31  l.inputs = h*w*c;
32  if(l.extra){
33  l.out_w = l.out_h = l.out_c = 0;
34  l.outputs = l.inputs + l.extra;
35  }
36 
37  if(extra){
38  fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs);
39  } else {
40  fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
41  }
42  int output_size = l.outputs * batch;
43  l.output = calloc(output_size, sizeof(float));
44  l.delta = calloc(output_size, sizeof(float));
45 
48 #ifdef GPU
49  l.forward_gpu = forward_reorg_layer_gpu;
50  l.backward_gpu = backward_reorg_layer_gpu;
51 
52  l.output_gpu = cuda_make_array(l.output, output_size);
53  l.delta_gpu = cuda_make_array(l.delta, output_size);
54 #endif
55  return l;
56 }
57 
58 void resize_reorg_layer(layer *l, int w, int h)
59 {
60  int stride = l->stride;
61  int c = l->c;
62 
63  l->h = h;
64  l->w = w;
65 
66  if(l->reverse){
67  l->out_w = w*stride;
68  l->out_h = h*stride;
69  l->out_c = c/(stride*stride);
70  }else{
71  l->out_w = w/stride;
72  l->out_h = h/stride;
73  l->out_c = c*(stride*stride);
74  }
75 
76  l->outputs = l->out_h * l->out_w * l->out_c;
77  l->inputs = l->outputs;
78  int output_size = l->outputs * l->batch;
79 
80  l->output = realloc(l->output, output_size * sizeof(float));
81  l->delta = realloc(l->delta, output_size * sizeof(float));
82 
83 #ifdef GPU
84  cuda_free(l->output_gpu);
85  cuda_free(l->delta_gpu);
86  l->output_gpu = cuda_make_array(l->output, output_size);
87  l->delta_gpu = cuda_make_array(l->delta, output_size);
88 #endif
89 }
90 
92 {
93  int i;
94  if(l.flatten){
95  memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
96  if(l.reverse){
97  flatten(l.output, l.w*l.h, l.c, l.batch, 0);
98  }else{
99  flatten(l.output, l.w*l.h, l.c, l.batch, 1);
100  }
101  } else if (l.extra) {
102  for(i = 0; i < l.batch; ++i){
103  copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1);
104  }
105  } else if (l.reverse){
106  reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output);
107  } else {
108  reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output);
109  }
110 }
111 
113 {
114  int i;
115  if(l.flatten){
116  memcpy(net.delta, l.delta, l.outputs*l.batch*sizeof(float));
117  if(l.reverse){
118  flatten(net.delta, l.w*l.h, l.c, l.batch, 1);
119  }else{
120  flatten(net.delta, l.w*l.h, l.c, l.batch, 0);
121  }
122  } else if(l.reverse){
123  reorg_cpu(l.delta, l.w, l.h, l.c, l.batch, l.stride, 0, net.delta);
124  } else if (l.extra) {
125  for(i = 0; i < l.batch; ++i){
126  copy_cpu(l.inputs, l.delta + i*l.outputs, 1, net.delta + i*l.inputs, 1);
127  }
128  }else{
129  reorg_cpu(l.delta, l.w, l.h, l.c, l.batch, l.stride, 1, net.delta);
130  }
131 }
132 
133 #ifdef GPU
134 void forward_reorg_layer_gpu(layer l, network net)
135 {
136  int i;
137  if(l.flatten){
138  if(l.reverse){
139  flatten_gpu(net.input_gpu, l.w*l.h, l.c, l.batch, 0, l.output_gpu);
140  }else{
141  flatten_gpu(net.input_gpu, l.w*l.h, l.c, l.batch, 1, l.output_gpu);
142  }
143  } else if (l.extra) {
144  for(i = 0; i < l.batch; ++i){
145  copy_gpu(l.inputs, net.input_gpu + i*l.inputs, 1, l.output_gpu + i*l.outputs, 1);
146  }
147  } else if (l.reverse) {
148  reorg_gpu(net.input_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, l.output_gpu);
149  }else {
150  reorg_gpu(net.input_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, l.output_gpu);
151  }
152 }
153 
154 void backward_reorg_layer_gpu(layer l, network net)
155 {
156  if(l.flatten){
157  if(l.reverse){
158  flatten_gpu(l.delta_gpu, l.w*l.h, l.c, l.batch, 1, net.delta_gpu);
159  }else{
160  flatten_gpu(l.delta_gpu, l.w*l.h, l.c, l.batch, 0, net.delta_gpu);
161  }
162  } else if (l.extra) {
163  int i;
164  for(i = 0; i < l.batch; ++i){
165  copy_gpu(l.inputs, l.delta_gpu + i*l.outputs, 1, net.delta_gpu + i*l.inputs, 1);
166  }
167  } else if(l.reverse){
168  reorg_gpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, net.delta_gpu);
169  } else {
170  reorg_gpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, net.delta_gpu);
171  }
172 }
173 #endif
int w
Definition: darknet.h:140
void(* forward_gpu)(struct layer, struct network)
Definition: darknet.h:126
void(* backward_gpu)(struct layer, struct network)
Definition: darknet.h:127
void resize_reorg_layer(layer *l, int w, int h)
Definition: reorg_layer.c:58
void backward_reorg_layer(const layer l, network net)
Definition: reorg_layer.c:112
void(* forward)(struct layer, struct network)
Definition: darknet.h:123
int out_w
Definition: darknet.h:141
float * delta
Definition: darknet.h:486
void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
int out_c
Definition: darknet.h:141
void forward_reorg_layer(const layer l, network net)
Definition: reorg_layer.c:91
void flatten(float *x, int size, int layers, int batch, int forward)
Definition: blas.c:32
int h
Definition: darknet.h:140
float * delta
Definition: darknet.h:245
int out_h
Definition: darknet.h:141
int inputs
Definition: darknet.h:134
void(* backward)(struct layer, struct network)
Definition: darknet.h:124
int batch
Definition: darknet.h:131
float * output
Definition: darknet.h:246
void copy_gpu(int N, float *X, int INCX, float *Y, int INCY)
int flatten
Definition: darknet.h:149
int stride
Definition: darknet.h:147
int c
Definition: darknet.h:140
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
Definition: blas.c:226
void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
Definition: blas.c:9
Definition: darknet.h:90
int reverse
Definition: darknet.h:148
LAYER_TYPE type
Definition: darknet.h:120
float * input
Definition: darknet.h:484
int outputs
Definition: darknet.h:135
layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
Definition: reorg_layer.c:8
int extra
Definition: darknet.h:138
void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
Definition: darknet.h:119