1 /*
2 * Copyright (c) 2017-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "BatchNormalizationLayer.h"
25
26 #include "ActivationLayer.h"
27
28 #include "tests/validation/Helpers.h"
29
30 namespace arm_compute
31 {
32 namespace test
33 {
34 namespace validation
35 {
36 namespace reference
37 {
38 // Batch Normalization Layer for floating point type
39 template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type *>
batch_normalization_layer(const SimpleTensor<T> & src,const SimpleTensor<T> & mean,const SimpleTensor<T> & var,const SimpleTensor<T> & beta,const SimpleTensor<T> & gamma,float epsilon,ActivationLayerInfo act_info)40 SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
41 ActivationLayerInfo act_info)
42 {
43 SimpleTensor<T> result(src.shape(), src.data_type());
44
45 const auto cols = static_cast<int>(src.shape()[0]);
46 const auto rows = static_cast<int>(src.shape()[1]);
47 const auto depth = static_cast<int>(src.shape()[2]);
48 const int upper_dims = src.shape().total_size() / (cols * rows * depth);
49 #if defined(_OPENMP)
50 #pragma omp parallel for schedule(dynamic, 1) collapse(4)
51 #endif /* _OPENMP */
52 for(int r = 0; r < upper_dims; ++r)
53 {
54 for(int i = 0; i < depth; ++i)
55 {
56 for(int k = 0; k < rows; ++k)
57 {
58 for(int l = 0; l < cols; ++l)
59 {
60 const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
61 const float denominator = sqrt(var[i] + epsilon);
62 const float numerator = src[pos] - mean[i];
63 const float x_bar = numerator / denominator;
64 result[pos] = beta[i] + x_bar * gamma[i];
65 }
66 }
67 }
68 }
69
70 if(act_info.enabled())
71 {
72 result = activation_layer(result, act_info);
73 }
74
75 return result;
76 }
77 template SimpleTensor<float> batch_normalization_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &mean, const SimpleTensor<float> &var, const SimpleTensor<float> &beta,
78 const SimpleTensor<float> &gamma, float epsilon, ActivationLayerInfo act_info);
79 template SimpleTensor<half> batch_normalization_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &mean, const SimpleTensor<half> &var,
80 const SimpleTensor<half> &beta,
81 const SimpleTensor<half> &gamma, float epsilon, ActivationLayerInfo act_info);
82 } // namespace reference
83 } // namespace validation
84 } // namespace test
85 } // namespace arm_compute
86