1 /*
2 * Copyright (c) 2016-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/NEON/kernels/NEMeanStdDevKernel.h"
25
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/IAccessWindow.h"
29 #include "arm_compute/core/TensorInfo.h"
30 #include "arm_compute/core/Types.h"
31 #include "arm_compute/core/Validate.h"
32 #include "src/core/helpers/AutoConfiguration.h"
33 #include "src/core/helpers/WindowHelpers.h"
34
35 #include <arm_neon.h>
36 #include <cmath>
37 #include <tuple>
38 #include <utility>
39
40 using namespace arm_compute;
41
42 namespace arm_compute
43 {
44 class Coordinates;
45 } // namespace arm_compute
46
47 namespace
48 {
49 template <bool calc_sum_squared>
accumulate(const Window & window,Iterator & iterator)50 std::pair<uint64x1_t, uint64x1_t> accumulate(const Window &window, Iterator &iterator)
51 {
52 uint64x1_t sum = vdup_n_u64(0);
53 uint64x1_t sum_squared = vdup_n_u64(0);
54
55 // Calculate sum
56 execute_window_loop(window, [&](const Coordinates &)
57 {
58 const uint8x16_t in_data = vld1q_u8(iterator.ptr());
59
60 // Sum of the low and high elements of data
61 const uint16x8_t tmp0 = vaddl_u8(vget_low_u8(in_data), vget_high_u8(in_data));
62 const uint32x4_t tmp1 = vaddl_u16(vget_low_u16(tmp0), vget_high_u16(tmp0));
63 const uint32x2_t tmp2 = vadd_u32(vget_low_u32(tmp1), vget_high_u32(tmp1));
64
65 // Update sum
66 sum = vpadal_u32(sum, tmp2);
67
68 if(calc_sum_squared)
69 {
70 const uint16x8_t square_data_low = vmull_u8(vget_low_u8(in_data), vget_low_u8(in_data));
71 const uint16x8_t square_data_high = vmull_u8(vget_high_u8(in_data), vget_high_u8(in_data));
72
73 // Sum of the low and high elements of data
74 const uint32x4_t tmp0_low = vaddl_u16(vget_low_u16(square_data_low), vget_high_u16(square_data_low));
75 const uint32x4_t tmp0_high = vaddl_u16(vget_low_u16(square_data_high), vget_high_u16(square_data_high));
76 const uint32x4_t tmp1 = vaddq_u32(tmp0_low, tmp0_high);
77 const uint32x2_t tmp2 = vadd_u32(vget_low_u32(tmp1), vget_high_u32(tmp1));
78
79 // Update sum
80 sum_squared = vpadal_u32(sum_squared, tmp2);
81 }
82 },
83 iterator);
84
85 return std::make_pair(sum, sum_squared);
86 }
87 } // namespace
88
NEMeanStdDevKernel()89 NEMeanStdDevKernel::NEMeanStdDevKernel()
90 : _input(nullptr), _mean(nullptr), _stddev(nullptr), _global_sum(nullptr), _global_sum_squared(nullptr), _mtx(), _border_size(0)
91 {
92 }
93
border_size() const94 BorderSize NEMeanStdDevKernel::border_size() const
95 {
96 return _border_size;
97 }
98
configure(const IImage * input,float * mean,uint64_t * global_sum,float * stddev,uint64_t * global_sum_squared)99 void NEMeanStdDevKernel::configure(const IImage *input, float *mean, uint64_t *global_sum, float *stddev, uint64_t *global_sum_squared)
100 {
101 ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(input);
102 ARM_COMPUTE_ERROR_ON(nullptr == mean);
103 ARM_COMPUTE_ERROR_ON(nullptr == global_sum);
104 ARM_COMPUTE_ERROR_ON(stddev && nullptr == global_sum_squared);
105 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
106
107 _input = input;
108 _mean = mean;
109 _stddev = stddev;
110 _global_sum = global_sum;
111 _global_sum_squared = global_sum_squared;
112
113 constexpr unsigned int num_elems_processed_per_iteration = 16;
114
115 _border_size = BorderSize(ceil_to_multiple(input->info()->dimension(0), num_elems_processed_per_iteration) - input->info()->dimension(0));
116
117 // Configure kernel window
118 Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
119
120 update_window_and_padding(win, AccessWindowHorizontal(input->info(), 0, num_elems_processed_per_iteration));
121
122 INEKernel::configure(win);
123 }
124
run(const Window & window,const ThreadInfo & info)125 void NEMeanStdDevKernel::run(const Window &window, const ThreadInfo &info)
126 {
127 ARM_COMPUTE_UNUSED(info);
128 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
129 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
130 Iterator input(_input, window);
131
132 uint64x1_t local_sum = vdup_n_u64(0);
133 uint64x1_t local_sum_squared = vdup_n_u64(0);
134
135 if(_stddev != nullptr)
136 {
137 std::tie(local_sum, local_sum_squared) = accumulate<true>(window, input);
138 }
139 else
140 {
141 std::tie(local_sum, local_sum_squared) = accumulate<false>(window, input);
142 }
143
144 const float num_pixels = _input->info()->dimension(0) * _input->info()->dimension(1);
145
146 // Merge sum and calculate mean and stddev
147 arm_compute::unique_lock<arm_compute::Mutex> lock(_mtx);
148
149 *_global_sum += vget_lane_u64(local_sum, 0);
150
151 const float mean = *_global_sum / num_pixels;
152 *_mean = mean;
153
154 if(_stddev != nullptr)
155 {
156 const uint64_t tmp_sum_squared = vget_lane_u64(local_sum_squared, 0);
157 *_global_sum_squared += tmp_sum_squared;
158 *_stddev = std::sqrt((*_global_sum_squared / num_pixels) - (mean * mean));
159 }
160
161 lock.unlock();
162 }
163