• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NEGaussianPyramidKernel.h"
25 
26 #include "arm_compute/core/Coordinates.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/Helpers.h"
29 #include "arm_compute/core/ITensor.h"
30 #include "arm_compute/core/TensorInfo.h"
31 #include "arm_compute/core/Types.h"
32 #include "arm_compute/core/Validate.h"
33 #include "arm_compute/core/Window.h"
34 #include "src/core/NEON/INEKernel.h"
35 #include "src/core/helpers/AutoConfiguration.h"
36 #include "src/core/helpers/WindowHelpers.h"
37 
38 #include <arm_neon.h>
39 #include <cstddef>
40 #include <cstdint>
41 #include <tuple>
42 
43 using namespace arm_compute;
44 
NEGaussianPyramidHorKernel()45 NEGaussianPyramidHorKernel::NEGaussianPyramidHorKernel()
46     : _l2_load_offset(0)
47 {
48 }
49 
border_size() const50 BorderSize NEGaussianPyramidHorKernel::border_size() const
51 {
52     return BorderSize{ 0, 2 };
53 }
54 
configure(const ITensor * input,ITensor * output)55 void NEGaussianPyramidHorKernel::configure(const ITensor *input, ITensor *output)
56 {
57     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
58     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S16);
59     ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != output->info()->dimension(1));
60 
61     for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i)
62     {
63         ARM_COMPUTE_ERROR_ON(input->info()->dimension(i) != output->info()->dimension(i));
64     }
65 
66     _input  = input;
67     _output = output;
68 
69     // Configure kernel window
70     constexpr unsigned int num_elems_processed_per_iteration = 16;
71     constexpr unsigned int num_elems_read_per_iteration      = 32;
72     constexpr unsigned int num_elems_written_per_iteration   = 8;
73     const float            scale_x                           = static_cast<float>(output->info()->dimension(0)) / input->info()->dimension(0);
74 
75     Window                 win = calculate_max_window_horizontal(*input->info(), Steps(num_elems_processed_per_iteration));
76     AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration, scale_x);
77 
78     // Sub sampling selects odd pixels (1, 3, 5, ...) for images with even
79     // width and even pixels (0, 2, 4, ...) for images with odd width. (Whether
80     // a pixel is even or odd is determined based on the tensor shape not the
81     // valid region!)
82     // Thus the offset from which the first pixel (L2) for the convolution is
83     // loaded depends on the anchor and shape of the valid region.
84     // In the case of an even shape (= even image width) we need to load L2
85     // from -2 if the anchor is odd and from -1 if the anchor is even. That
86     // makes sure that L2 is always loaded from an odd pixel.
87     // On the other hand, for an odd shape (= odd image width) we need to load
88     // L2 from -1 if the anchor is odd and from -2 if the anchor is even to
89     // achieve the opposite effect.
90     // The condition can be simplified to checking whether anchor + shape is
91     // odd (-2) or even (-1) as only adding an odd and an even number will have
92     // an odd result.
93     _l2_load_offset = -border_size().left;
94 
95     if((_input->info()->valid_region().anchor[0] + _input->info()->valid_region().shape[0]) % 2 == 0)
96     {
97         _l2_load_offset += 1;
98     }
99 
100     // Replace input access with static window
101     update_window_and_padding(win,
102                               AccessWindowHorizontal(input->info(), _l2_load_offset, num_elems_read_per_iteration),
103                               output_access);
104 
105     output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
106 
107     INEKernel::configure(win);
108 }
109 
run(const Window & window,const ThreadInfo & info)110 void NEGaussianPyramidHorKernel::run(const Window &window, const ThreadInfo &info)
111 {
112     ARM_COMPUTE_UNUSED(info);
113     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
114     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
115     ARM_COMPUTE_ERROR_ON(window.x().step() % 2);
116 
117     static const int16x8_t six  = vdupq_n_s16(6);
118     static const int16x8_t four = vdupq_n_s16(4);
119 
120     Window win_in(window);
121     win_in.shift(Window::DimX, _l2_load_offset);
122 
123     Iterator in(_input, win_in);
124 
125     // The output is half the width of the input
126     Window win_out(window);
127     win_out.scale(Window::DimX, 0.5f);
128 
129     Iterator out(_output, win_out);
130 
131     execute_window_loop(window, [&](const Coordinates &)
132     {
133         const uint8x16x2_t data_2q   = vld2q_u8(in.ptr());
134         const uint8x16_t &data_even = data_2q.val[0];
135         const uint8x16_t &data_odd  = data_2q.val[1];
136 
137         const int16x8_t data_l2 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data_even)));
138         const int16x8_t data_l1 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(data_odd)));
139         const int16x8_t data_m  = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(vextq_u8(data_even, data_even, 1))));
140         const int16x8_t data_r1 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(vextq_u8(data_odd, data_odd, 1))));
141         const int16x8_t data_r2 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(vextq_u8(data_even, data_even, 2))));
142 
143         int16x8_t out_val = vaddq_s16(data_l2, data_r2);
144         out_val           = vmlaq_s16(out_val, data_l1, four);
145         out_val           = vmlaq_s16(out_val, data_m, six);
146         out_val           = vmlaq_s16(out_val, data_r1, four);
147 
148         vst1q_s16(reinterpret_cast<int16_t *>(out.ptr()), out_val);
149     },
150     in, out);
151 }
152 
NEGaussianPyramidVertKernel()153 NEGaussianPyramidVertKernel::NEGaussianPyramidVertKernel()
154     : _t2_load_offset(0)
155 {
156 }
157 
border_size() const158 BorderSize NEGaussianPyramidVertKernel::border_size() const
159 {
160     return BorderSize{ 2, 0 };
161 }
162 
configure(const ITensor * input,ITensor * output)163 void NEGaussianPyramidVertKernel::configure(const ITensor *input, ITensor *output)
164 {
165     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::S16);
166     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
167     ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != output->info()->dimension(0));
168 
169     for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i)
170     {
171         ARM_COMPUTE_ERROR_ON(input->info()->dimension(i) != output->info()->dimension(i));
172     }
173 
174     _input  = input;
175     _output = output;
176 
177     // Configure kernel window
178     constexpr unsigned int num_elems_processed_per_iteration = 16;
179     constexpr unsigned int num_rows_processed_per_iteration  = 2;
180 
181     constexpr unsigned int num_elems_written_per_iteration = 16;
182     constexpr unsigned int num_rows_written_per_iteration  = 1;
183 
184     constexpr unsigned int num_elems_read_per_iteration = 16;
185     constexpr unsigned int num_rows_read_per_iteration  = 5;
186 
187     const float scale_y = static_cast<float>(output->info()->dimension(1)) / input->info()->dimension(1);
188 
189     Window                win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration, num_rows_processed_per_iteration));
190     AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_written_per_iteration, num_rows_written_per_iteration, 1.f, scale_y);
191 
192     // Determine whether we need to load even or odd rows. See above for a
193     // detailed explanation.
194     _t2_load_offset = -border_size().top;
195 
196     if((_input->info()->valid_region().anchor[1] + _input->info()->valid_region().shape[1]) % 2 == 0)
197     {
198         _t2_load_offset += 1;
199     }
200 
201     update_window_and_padding(win,
202                               AccessWindowRectangle(input->info(), 0, _t2_load_offset, num_elems_read_per_iteration, num_rows_read_per_iteration),
203                               output_access);
204 
205     output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
206 
207     INEKernel::configure(win);
208 }
209 
run(const Window & window,const ThreadInfo & info)210 void NEGaussianPyramidVertKernel::run(const Window &window, const ThreadInfo &info)
211 {
212     ARM_COMPUTE_UNUSED(info);
213     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
214     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
215     ARM_COMPUTE_ERROR_ON(window.x().step() != 16);
216     ARM_COMPUTE_ERROR_ON(window.y().step() % 2);
217     ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
218 
219     static const uint16x8_t six  = vdupq_n_u16(6);
220     static const uint16x8_t four = vdupq_n_u16(4);
221 
222     Window win_in(window);
223     // Need to load two times 8 values instead of 16 values once
224     win_in.set_dimension_step(Window::DimX, 8);
225     win_in.shift(Window::DimY, _t2_load_offset);
226 
227     Iterator in(_input, win_in);
228 
229     // Output's height is half of input's
230     Window win_out(window);
231     win_out.scale(Window::DimY, 0.5f);
232 
233     Iterator out(_output, win_out);
234 
235     const uint8_t *input_top2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 0));
236     const uint8_t *input_top_ptr  = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 1));
237     const uint8_t *input_mid_ptr  = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 2));
238     const uint8_t *input_low_ptr  = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 3));
239     const uint8_t *input_low2_ptr = _input->buffer() + _input->info()->offset_element_in_bytes(Coordinates(0, 4));
240 
241     execute_window_loop(window, [&](const Coordinates &)
242     {
243         // Low data
244         const uint16x8_t data_low_t2 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_top2_ptr + in.offset())));
245         const uint16x8_t data_low_t1 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_top_ptr + in.offset())));
246         const uint16x8_t data_low_m  = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_mid_ptr + in.offset())));
247         const uint16x8_t data_low_b1 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_low_ptr + in.offset())));
248         const uint16x8_t data_low_b2 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_low2_ptr + in.offset())));
249 
250         uint16x8_t out_low = vaddq_u16(data_low_t2, data_low_b2);
251         out_low            = vmlaq_u16(out_low, data_low_t1, four);
252         out_low            = vmlaq_u16(out_low, data_low_m, six);
253         out_low            = vmlaq_u16(out_low, data_low_b1, four);
254 
255         in.increment(Window::DimX);
256 
257         // High data
258         const uint16x8_t data_high_t2 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_top2_ptr + in.offset())));
259         const uint16x8_t data_high_t1 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_top_ptr + in.offset())));
260         const uint16x8_t data_high_m  = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_mid_ptr + in.offset())));
261         const uint16x8_t data_high_b1 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_low_ptr + in.offset())));
262         const uint16x8_t data_high_b2 = vreinterpretq_u16_s16(vld1q_s16(reinterpret_cast<const int16_t *>(input_low2_ptr + in.offset())));
263 
264         uint16x8_t out_high = vaddq_u16(data_high_t2, data_high_b2);
265         out_high            = vmlaq_u16(out_high, data_high_t1, four);
266         out_high            = vmlaq_u16(out_high, data_high_m, six);
267         out_high            = vmlaq_u16(out_high, data_high_b1, four);
268 
269         vst1q_u8(out.ptr(), vcombine_u8(vqshrn_n_u16(out_low, 8), vqshrn_n_u16(out_high, 8)));
270     },
271     in, out);
272 }
273