9 void reorg_cpu(
float *x,
int w,
int h,
int c,
int batch,
int stride,
int forward,
float *out)
12 int out_c = c/(stride*stride);
14 for(b = 0; b < batch; ++b){
15 for(k = 0; k < c; ++k){
16 for(j = 0; j < h; ++j){
17 for(i = 0; i < w; ++i){
18 int in_index = i + w*(j + h*(k + c*b));
20 int offset = k / out_c;
21 int w2 = i*stride + offset % stride;
22 int h2 = j*stride + offset / stride;
23 int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
24 if(forward) out[out_index] = x[in_index];
25 else out[in_index] = x[out_index];
32 void flatten(
float *x,
int size,
int layers,
int batch,
int forward)
34 float *swap = calloc(size*layers*batch,
sizeof(
float));
36 for(b = 0; b < batch; ++b){
37 for(c = 0; c < layers; ++c){
38 for(i = 0; i < size; ++i){
39 int i1 = b*layers*size + c*size + i;
40 int i2 = b*layers*size + i*layers + c;
41 if (forward) swap[i2] = x[i1];
42 else swap[i1] = x[i2];
46 memcpy(x, swap, size*layers*batch*
sizeof(
float));
53 for(i = 0; i < n; ++i){
54 c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
58 void weighted_delta_cpu(
float *a,
float *b,
float *s,
float *da,
float *db,
float *ds,
int n,
float *dc)
61 for(i = 0; i < n; ++i){
62 if(da) da[i] += dc[i] * s[i];
63 if(db) db[i] += dc[i] * (1-s[i]);
64 ds[i] += dc[i] * (a[i] - b[i]);
68 void shortcut_cpu(
int batch,
int w1,
int h1,
int c1,
float *add,
int w2,
int h2,
int c2,
float s1,
float s2,
float *out)
72 assert(stride == h1/h2);
73 assert(sample == h2/h1);
74 if(stride < 1) stride = 1;
75 if(sample < 1) sample = 1;
76 int minw = (w1 < w2) ? w1 : w2;
77 int minh = (h1 < h2) ? h1 : h2;
78 int minc = (c1 < c2) ? c1 : c2;
81 for(b = 0; b < batch; ++b){
82 for(k = 0; k < minc; ++k){
83 for(j = 0; j < minh; ++j){
84 for(i = 0; i < minw; ++i){
85 int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
86 int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
87 out[out_index] = s1*out[out_index] + s2*add[add_index];
94 void mean_cpu(
float *x,
int batch,
int filters,
int spatial,
float *mean)
96 float scale = 1./(batch * spatial);
98 for(i = 0; i < filters; ++i){
100 for(j = 0; j < batch; ++j){
101 for(k = 0; k < spatial; ++k){
102 int index = j*filters*spatial + i*spatial + k;
110 void variance_cpu(
float *x,
float *mean,
int batch,
int filters,
int spatial,
float *variance)
112 float scale = 1./(batch * spatial - 1);
114 for(i = 0; i < filters; ++i){
116 for(j = 0; j < batch; ++j){
117 for(k = 0; k < spatial; ++k){
118 int index = j*filters*spatial + i*spatial + k;
119 variance[i] += pow((x[index] - mean[i]), 2);
122 variance[i] *= scale;
129 for(b = 0; b < batch; ++b){
130 for(i = 0; i < spatial; ++i){
132 for(f = 0; f < filters; ++f){
133 int index = b*filters*spatial + f*spatial + i;
134 sum += powf(x[index], 2);
137 for(f = 0; f < filters; ++f){
138 int index = b*filters*spatial + f*spatial + i;
140 dx[index] = (1 - x[index]) / sum;
147 void normalize_cpu(
float *x,
float *mean,
float *variance,
int batch,
int filters,
int spatial)
150 for(b = 0; b < batch; ++b){
151 for(f = 0; f < filters; ++f){
152 for(i = 0; i < spatial; ++i){
153 int index = b*filters*spatial + f*spatial + i;
154 x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
163 for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
166 void mul_cpu(
int N,
float *X,
int INCX,
float *Y,
int INCY)
169 for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX];
172 void pow_cpu(
int N,
float ALPHA,
float *X,
int INCX,
float *Y,
int INCY)
175 for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA);
178 void axpy_cpu(
int N,
float ALPHA,
float *X,
int INCX,
float *Y,
int INCY)
181 for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX];
184 void scal_cpu(
int N,
float ALPHA,
float *X,
int INCX)
187 for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA;
190 void fill_cpu(
int N,
float ALPHA,
float *X,
int INCX)
193 for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
196 void deinter_cpu(
int NX,
float *X,
int NY,
float *Y,
int B,
float *OUT)
200 for(j = 0; j < B; ++j) {
201 for(i = 0; i < NX; ++i){
202 if(X) X[j*NX + i] += OUT[index];
205 for(i = 0; i < NY; ++i){
206 if(Y) Y[j*NY + i] += OUT[index];
212 void inter_cpu(
int NX,
float *X,
int NY,
float *Y,
int B,
float *OUT)
216 for(j = 0; j < B; ++j) {
217 for(i = 0; i < NX; ++i){
218 OUT[index++] = X[j*NX + i];
220 for(i = 0; i < NY; ++i){
221 OUT[index++] = Y[j*NY + i];
226 void copy_cpu(
int N,
float *X,
int INCX,
float *Y,
int INCY)
229 for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
235 for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i];
241 for(i = 0; i < n; ++i){
242 float diff = truth[i] - pred[i];
243 float abs_val = fabs(diff);
245 error[i] = diff * diff;
249 error[i] = 2*abs_val - 1;
250 delta[i] = (diff < 0) ? 1 : -1;
255 void l1_cpu(
int n,
float *pred,
float *truth,
float *delta,
float *
error)
258 for(i = 0; i < n; ++i){
259 float diff = truth[i] - pred[i];
260 error[i] = fabs(diff);
261 delta[i] = diff > 0 ? 1 : -1;
268 for(i = 0; i < n; ++i){
271 error[i] = (
t) ? -log(p) : 0;
279 for(i = 0; i < n; ++i){
282 error[i] = -t*log(p) - (1-
t)*log(1-p);
287 void l2_cpu(
int n,
float *pred,
float *truth,
float *delta,
float *
error)
290 for(i = 0; i < n; ++i){
291 float diff = truth[i] - pred[i];
292 error[i] = diff * diff;
297 float dot_cpu(
int N,
float *X,
int INCX,
float *Y,
int INCY)
301 for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY];
305 void softmax(
float *input,
int n,
float temp,
int stride,
float *output)
309 float largest = -FLT_MAX;
310 for(i = 0; i < n; ++i){
311 if(input[i*stride] > largest) largest = input[i*stride];
313 for(i = 0; i < n; ++i){
314 float e = exp(input[i*stride]/temp - largest/temp);
316 output[i*stride] = e;
318 for(i = 0; i < n; ++i){
319 output[i*stride] /= sum;
324 void softmax_cpu(
float *input,
int n,
int batch,
int batch_offset,
int groups,
int group_offset,
int stride,
float temp,
float *output)
327 for(b = 0; b < batch; ++b){
328 for(g = 0; g < groups; ++g){
329 softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
334 void upsample_cpu(
float *in,
int w,
int h,
int c,
int batch,
int stride,
int forward,
float scale,
float *out)
337 for(b = 0; b < batch; ++b){
338 for(k = 0; k < c; ++k){
339 for(j = 0; j < h*stride; ++j){
340 for(i = 0; i < w*stride; ++i){
341 int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride;
342 int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
343 if(forward) out[out_index] = scale*in[in_index];
344 else in[in_index] += scale*out[out_index];
void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
void l2normalize_cpu(float *x, float *dx, int batch, int filters, int spatial)
void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
void l2_cpu(int n, float *pred, float *truth, float *delta, float *error)
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
void scal_cpu(int N, float ALPHA, float *X, int INCX)
void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
void mul_cpu(int N, float *X, int INCX, float *Y, int INCY)
float dot_cpu(int N, float *X, int INCX, float *Y, int INCY)
void softmax(float *input, int n, float temp, int stride, float *output)
void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
void mean_cpu(float *x, int batch, int filters, int spatial, float *mean)
void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
void flatten(float *x, int size, int layers, int batch, int forward)
void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
void l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
void const_cpu(int N, float ALPHA, float *X, int INCX)
void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc)
void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
void mult_add_into_cpu(int N, float *X, float *Y, float *Z)
void fill_cpu(int N, float ALPHA, float *X, int INCX)
void error(const char *s)
void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c)