1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7 #include <math.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11
12 #include <xnnpack.h>
13 #include <xnnpack/allocator.h>
14 #include <xnnpack/log.h>
15 #include <xnnpack/operator.h>
16 #include <xnnpack/params-init.h>
17 #include <xnnpack/params.h>
18
19
xnn_create_global_average_pooling_ncw_f32(size_t channels,float output_min,float output_max,uint32_t flags,xnn_operator_t * global_average_pooling_op_out)20 enum xnn_status xnn_create_global_average_pooling_ncw_f32(
21 size_t channels,
22 float output_min,
23 float output_max,
24 uint32_t flags,
25 xnn_operator_t* global_average_pooling_op_out)
26 {
27 xnn_operator_t global_average_pooling_op = NULL;
28 enum xnn_status status = xnn_status_uninitialized;
29
30 if (!xnn_params.initialized) {
31 xnn_log_error("failed to create Global Average Pooling operator: XNNPACK is not initialized");
32 goto error;
33 }
34
35 status = xnn_status_invalid_parameter;
36
37 if (channels == 0) {
38 xnn_log_error(
39 "failed to create Global Average Pooling operator with %zu channels: number of channels must be non-zero",
40 channels);
41 goto error;
42 }
43
44 if (isnan(output_min)) {
45 xnn_log_error(
46 "failed to create Global Average Pooling operator with NaN output lower bound: lower bound must be non-NaN");
47 goto error;
48 }
49
50 if (isnan(output_max)) {
51 xnn_log_error(
52 "failed to create Global Average Pooling operator with NaN output upper bound: upper bound must be non-NaN");
53 goto error;
54 }
55
56 if (output_min >= output_max) {
57 xnn_log_error(
58 "failed to create Global Average Pooling operator with [%.7g, %.7g] output range: "
59 "lower bound must be below upper bound",
60 output_min, output_max);
61 goto error;
62 }
63
64 status = xnn_status_unsupported_parameter;
65 if (xnn_params.f32.spchw_gavgpool.ukernel == NULL) {
66 xnn_log_error(
67 "failed to create Global Average Pooling operator: "
68 "only selected configurations parameters are supported");
69 goto error;
70 }
71
72 status = xnn_status_out_of_memory;
73
74 global_average_pooling_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
75 if (global_average_pooling_op == NULL) {
76 xnn_log_error("failed to allocate %zu bytes for Global Average Pooling operator descriptor", sizeof(struct xnn_operator));
77 goto error;
78 }
79
80 global_average_pooling_op->channels = channels;
81 global_average_pooling_op->f32_gavgpool_params = xnn_init_f32_gavgpool_params(nanf(""), output_min, output_max, 0);
82
83 global_average_pooling_op->type = xnn_operator_type_global_average_pooling_ncw_f32;
84 global_average_pooling_op->ukernel.type = xnn_ukernel_type_global_average_pooling;
85
86 global_average_pooling_op->state = xnn_run_state_invalid;
87
88 *global_average_pooling_op_out = global_average_pooling_op;
89 return xnn_status_success;
90
91 error:
92 xnn_delete_operator(global_average_pooling_op);
93 return status;
94 }
95
xnn_setup_global_average_pooling_ncw_f32(xnn_operator_t global_average_pooling_op,size_t batch_size,size_t width,const float * input,float * output,pthreadpool_t threadpool)96 enum xnn_status xnn_setup_global_average_pooling_ncw_f32(
97 xnn_operator_t global_average_pooling_op,
98 size_t batch_size,
99 size_t width,
100 const float* input,
101 float* output,
102 pthreadpool_t threadpool)
103 {
104 if (global_average_pooling_op->type != xnn_operator_type_global_average_pooling_ncw_f32) {
105 xnn_log_error("failed to setup Global Average Pooling (F32, NCW) operator: operator type mismatch");
106 return xnn_status_invalid_parameter;
107 }
108 global_average_pooling_op->state = xnn_run_state_invalid;
109
110 if (!xnn_params.initialized) {
111 xnn_log_error("failed to setup Global Average Pooling operator: XNNPACK is not initialized");
112 return xnn_status_uninitialized;
113 }
114
115 if (width == 0) {
116 xnn_log_error("failed to setup Global Average Pooling operator with width %zu: width must be non-zero", width);
117 return xnn_status_invalid_parameter;
118 }
119
120 if (batch_size == 0) {
121 global_average_pooling_op->state = xnn_run_state_skip;
122 return xnn_status_success;
123 }
124
125 xnn_update_f32_gavgpool_params(&global_average_pooling_op->f32_gavgpool_params,
126 1.0f / (float) width, width);
127
128 global_average_pooling_op->context.global_average_pooling_ncw = (struct global_average_pooling_ncw_context) {
129 .input_elements = width * sizeof(float),
130 .input = input,
131 .input_channel_stride = width * sizeof(float),
132 .input_batch_stride = global_average_pooling_op->channels * width * sizeof(float),
133 .output = output,
134 .output_channel_stride = sizeof(float),
135 .output_batch_stride = global_average_pooling_op->channels * sizeof(float),
136 .ukernel = xnn_params.f32.spchw_gavgpool.ukernel,
137 .params.f32 = global_average_pooling_op->f32_gavgpool_params,
138 };
139
140 global_average_pooling_op->compute.type = xnn_parallelization_type_2d_tile_1d;
141 global_average_pooling_op->compute.task_2d_tile_1d =
142 (pthreadpool_task_2d_tile_1d_t) xnn_compute_global_average_pooling_ncw;
143 global_average_pooling_op->compute.range[0] = batch_size;
144 global_average_pooling_op->compute.range[1] = global_average_pooling_op->channels;
145 global_average_pooling_op->compute.tile[0] = global_average_pooling_op->channels; //xnn_params.f32.spchw_gavgpool.channel_tile;
146
147 global_average_pooling_op->state = xnn_run_state_ready;
148
149 return xnn_status_success;
150 }
151