1 /*
2 * Copyright (c) 2016-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/NEON/functions/NEHOGMultiDetection.h"
25
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/TensorInfo.h"
28 #include "arm_compute/core/Validate.h"
29 #include "arm_compute/runtime/NEON/NEScheduler.h"
30 #include "arm_compute/runtime/Tensor.h"
31 #include "src/core/NEON/kernels/NEDerivativeKernel.h"
32 #include "src/core/NEON/kernels/NEFillBorderKernel.h"
33 #include "src/core/NEON/kernels/NEHOGDescriptorKernel.h"
34
35 namespace arm_compute
36 {
37 NEHOGMultiDetection::~NEHOGMultiDetection() = default;
38
NEHOGMultiDetection(std::shared_ptr<IMemoryManager> memory_manager)39 NEHOGMultiDetection::NEHOGMultiDetection(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
40 : _memory_group(std::move(memory_manager)),
41 _gradient_kernel(),
42 _orient_bin_kernel(),
43 _block_norm_kernel(),
44 _hog_detect_kernel(),
45 _non_maxima_kernel(),
46 _hog_space(),
47 _hog_norm_space(),
48 _detection_windows(),
49 _mag(),
50 _phase(),
51 _non_maxima_suppression(false),
52 _num_orient_bin_kernel(0),
53 _num_block_norm_kernel(0),
54 _num_hog_detect_kernel(0)
55 {
56 }
57
configure(ITensor * input,const IMultiHOG * multi_hog,IDetectionWindowArray * detection_windows,const ISize2DArray * detection_window_strides,BorderMode border_mode,uint8_t constant_border_value,float threshold,bool non_maxima_suppression,float min_distance)58 void NEHOGMultiDetection::configure(ITensor *input, const IMultiHOG *multi_hog, IDetectionWindowArray *detection_windows, const ISize2DArray *detection_window_strides, BorderMode border_mode,
59 uint8_t constant_border_value, float threshold, bool non_maxima_suppression, float min_distance)
60 {
61 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
62 ARM_COMPUTE_ERROR_ON_INVALID_MULTI_HOG(multi_hog);
63 ARM_COMPUTE_ERROR_ON(nullptr == detection_windows);
64 ARM_COMPUTE_ERROR_ON(detection_window_strides->num_values() != multi_hog->num_models());
65
66 const size_t width = input->info()->dimension(Window::DimX);
67 const size_t height = input->info()->dimension(Window::DimY);
68 const TensorShape &shape_img = input->info()->tensor_shape();
69 const size_t num_models = multi_hog->num_models();
70 PhaseType phase_type = multi_hog->model(0)->info()->phase_type();
71
72 size_t prev_num_bins = multi_hog->model(0)->info()->num_bins();
73 Size2D prev_cell_size = multi_hog->model(0)->info()->cell_size();
74 Size2D prev_block_size = multi_hog->model(0)->info()->block_size();
75 Size2D prev_block_stride = multi_hog->model(0)->info()->block_stride();
76
77 /* Check if NEHOGOrientationBinningKernel and NEHOGBlockNormalizationKernel kernels can be skipped for a specific HOG data-object
78 *
79 * 1) NEHOGOrientationBinningKernel and NEHOGBlockNormalizationKernel are skipped if the cell size and the number of bins don't change.
80 * Since "multi_hog" is sorted,it is enough to check the HOG descriptors at level "ith" and level "(i-1)th
81 * 2) NEHOGBlockNormalizationKernel is skipped if the cell size, the number of bins and block size do not change.
82 * Since "multi_hog" is sorted,it is enough to check the HOG descriptors at level "ith" and level "(i-1)th
83 *
84 * @note Since the orientation binning and block normalization kernels can be skipped, we need to keep track of the input to process for each kernel
85 * with "input_orient_bin", "input_hog_detect" and "input_block_norm"
86 */
87 std::vector<size_t> input_orient_bin;
88 std::vector<size_t> input_hog_detect;
89 std::vector<std::pair<size_t, size_t>> input_block_norm;
90
91 input_orient_bin.push_back(0);
92 input_hog_detect.push_back(0);
93 input_block_norm.emplace_back(0, 0);
94
95 for(size_t i = 1; i < num_models; ++i)
96 {
97 size_t cur_num_bins = multi_hog->model(i)->info()->num_bins();
98 Size2D cur_cell_size = multi_hog->model(i)->info()->cell_size();
99 Size2D cur_block_size = multi_hog->model(i)->info()->block_size();
100 Size2D cur_block_stride = multi_hog->model(i)->info()->block_stride();
101
102 if((cur_num_bins != prev_num_bins) || (cur_cell_size.width != prev_cell_size.width) || (cur_cell_size.height != prev_cell_size.height))
103 {
104 prev_num_bins = cur_num_bins;
105 prev_cell_size = cur_cell_size;
106 prev_block_size = cur_block_size;
107 prev_block_stride = cur_block_stride;
108
109 // Compute orientation binning and block normalization kernels. Update input to process
110 input_orient_bin.push_back(i);
111 input_block_norm.emplace_back(i, input_orient_bin.size() - 1);
112 }
113 else if((cur_block_size.width != prev_block_size.width) || (cur_block_size.height != prev_block_size.height) || (cur_block_stride.width != prev_block_stride.width)
114 || (cur_block_stride.height != prev_block_stride.height))
115 {
116 prev_block_size = cur_block_size;
117 prev_block_stride = cur_block_stride;
118
119 // Compute block normalization kernel. Update input to process
120 input_block_norm.emplace_back(i, input_orient_bin.size() - 1);
121 }
122
123 // Update input to process for hog detector kernel
124 input_hog_detect.push_back(input_block_norm.size() - 1);
125 }
126
127 _detection_windows = detection_windows;
128 _non_maxima_suppression = non_maxima_suppression;
129 _num_orient_bin_kernel = input_orient_bin.size(); // Number of NEHOGOrientationBinningKernel kernels to compute
130 _num_block_norm_kernel = input_block_norm.size(); // Number of NEHOGBlockNormalizationKernel kernels to compute
131 _num_hog_detect_kernel = input_hog_detect.size(); // Number of NEHOGDetector functions to compute
132
133 _orient_bin_kernel.clear();
134 _block_norm_kernel.clear();
135 _hog_detect_kernel.clear();
136 _hog_space.clear();
137 _hog_norm_space.clear();
138
139 _orient_bin_kernel.resize(_num_orient_bin_kernel);
140 _block_norm_kernel.resize(_num_block_norm_kernel);
141 _hog_detect_kernel.resize(_num_hog_detect_kernel);
142 _hog_space.resize(_num_orient_bin_kernel);
143 _hog_norm_space.resize(_num_block_norm_kernel);
144 _non_maxima_kernel = CPPDetectionWindowNonMaximaSuppressionKernel();
145
146 // Allocate tensors for magnitude and phase
147 TensorInfo info_mag(shape_img, Format::S16);
148 _mag.allocator()->init(info_mag);
149
150 TensorInfo info_phase(shape_img, Format::U8);
151 _phase.allocator()->init(info_phase);
152
153 // Manage intermediate buffers
154 _memory_group.manage(&_mag);
155 _memory_group.manage(&_phase);
156
157 // Initialise gradient kernel
158 _gradient_kernel.configure(input, &_mag, &_phase, phase_type, border_mode, constant_border_value);
159
160 // Configure NETensor for the HOG space and orientation binning kernel
161 for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
162 {
163 const size_t idx_multi_hog = input_orient_bin[i];
164
165 // Get the corresponding cell size and number of bins
166 const Size2D &cell = multi_hog->model(idx_multi_hog)->info()->cell_size();
167 const size_t num_bins = multi_hog->model(idx_multi_hog)->info()->num_bins();
168
169 // Calculate number of cells along the x and y directions for the hog_space
170 const size_t num_cells_x = width / cell.width;
171 const size_t num_cells_y = height / cell.height;
172
173 // TensorShape of hog space
174 TensorShape shape_hog_space = input->info()->tensor_shape();
175 shape_hog_space.set(Window::DimX, num_cells_x);
176 shape_hog_space.set(Window::DimY, num_cells_y);
177
178 // Allocate HOG space
179 TensorInfo info_space(shape_hog_space, num_bins, DataType::F32);
180 _hog_space[i].allocator()->init(info_space);
181
182 // Manage intermediate buffers
183 _memory_group.manage(&_hog_space[i]);
184
185 // Initialise orientation binning kernel
186 _orient_bin_kernel[i].configure(&_mag, &_phase, &_hog_space[i], multi_hog->model(idx_multi_hog)->info());
187 }
188
189 // Allocate intermediate tensors
190 _mag.allocator()->allocate();
191 _phase.allocator()->allocate();
192
193 // Configure NETensor for the normalized HOG space and block normalization kernel
194 for(size_t i = 0; i < _num_block_norm_kernel; ++i)
195 {
196 const size_t idx_multi_hog = input_block_norm[i].first;
197 const size_t idx_orient_bin = input_block_norm[i].second;
198
199 // Allocate normalized HOG space
200 TensorInfo tensor_info(*(multi_hog->model(idx_multi_hog)->info()), width, height);
201 _hog_norm_space[i].allocator()->init(tensor_info);
202
203 // Manage intermediate buffers
204 _memory_group.manage(&_hog_norm_space[i]);
205
206 // Initialize block normalization kernel
207 _block_norm_kernel[i].configure(&_hog_space[idx_orient_bin], &_hog_norm_space[i], multi_hog->model(idx_multi_hog)->info());
208 }
209
210 // Allocate intermediate tensors
211 for(size_t i = 0; i < _num_orient_bin_kernel; ++i)
212 {
213 _hog_space[i].allocator()->allocate();
214 }
215
216 // Configure HOG detector kernel
217 for(size_t i = 0; i < _num_hog_detect_kernel; ++i)
218 {
219 const size_t idx_block_norm = input_hog_detect[i];
220
221 _hog_detect_kernel[i].configure(&_hog_norm_space[idx_block_norm], multi_hog->model(i), detection_windows, detection_window_strides->at(i), threshold, i);
222 }
223
224 // Configure non maxima suppression kernel
225 _non_maxima_kernel.configure(_detection_windows, min_distance);
226
227 // Allocate intermediate tensors
228 for(size_t i = 0; i < _num_block_norm_kernel; ++i)
229 {
230 _hog_norm_space[i].allocator()->allocate();
231 }
232 }
233
run()234 void NEHOGMultiDetection::run()
235 {
236 ARM_COMPUTE_ERROR_ON_MSG(_detection_windows == nullptr, "Unconfigured function");
237
238 MemoryGroupResourceScope scope_mg(_memory_group);
239
240 // Reset detection window
241 _detection_windows->clear();
242
243 // Run gradient
244 _gradient_kernel.run();
245
246 // Run orientation binning kernel
247 for(auto &kernel : _orient_bin_kernel)
248 {
249 NEScheduler::get().schedule(&kernel, Window::DimY);
250 }
251
252 // Run block normalization kernel
253 for(auto &kernel : _block_norm_kernel)
254 {
255 NEScheduler::get().schedule(&kernel, Window::DimY);
256 }
257
258 // Run HOG detector kernel
259 for(auto &kernel : _hog_detect_kernel)
260 {
261 kernel.run();
262 }
263
264 // Run non-maxima suppression kernel if enabled
265 if(_non_maxima_suppression)
266 {
267 NEScheduler::get().schedule(&_non_maxima_kernel, Window::DimY);
268 }
269 }
270 } // namespace arm_compute
271