• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2018 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FIXTURE
25 #define ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "tests/AssetsLibrary.h"
30 #include "tests/Globals.h"
31 #include "tests/IAccessor.h"
32 #include "tests/framework/Asserts.h"
33 #include "tests/framework/Fixture.h"
34 #include "tests/validation/Helpers.h"
35 #include "tests/validation/reference/BatchNormalizationLayer.h"
36 
37 namespace arm_compute
38 {
39 namespace test
40 {
41 namespace validation
42 {
43 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
44 class BatchNormalizationLayerValidationFixture : public framework::Fixture
45 {
46 public:
47     template <typename...>
setup(TensorShape shape0,TensorShape shape1,float epsilon,bool use_beta,bool use_gamma,ActivationLayerInfo act_info,DataType dt,DataLayout data_layout)48     void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
49     {
50         _data_type = dt;
51         _use_beta  = use_beta;
52         _use_gamma = use_gamma;
53 
54         _target    = compute_target(shape0, shape1, epsilon, act_info, dt, data_layout);
55         _reference = compute_reference(shape0, shape1, epsilon, act_info, dt);
56     }
57 
58 protected:
59     template <typename U>
fill(U && src_tensor,U && mean_tensor,U && var_tensor,U && beta_tensor,U && gamma_tensor)60     void fill(U &&src_tensor, U &&mean_tensor, U &&var_tensor, U &&beta_tensor, U &&gamma_tensor)
61     {
62         const float                      min_bound = -1.f;
63         const float                      max_bound = 1.f;
64         std::uniform_real_distribution<> distribution(min_bound, max_bound);
65         std::uniform_real_distribution<> distribution_var(0, max_bound);
66         library->fill(src_tensor, distribution, 0);
67         library->fill(mean_tensor, distribution, 1);
68         library->fill(var_tensor, distribution_var, 0);
69         if(_use_beta)
70         {
71             library->fill(beta_tensor, distribution, 3);
72         }
73         else
74         {
75             // Fill with default value 0.f
76             library->fill_tensor_value(beta_tensor, 0.f);
77         }
78         if(_use_gamma)
79         {
80             library->fill(gamma_tensor, distribution, 4);
81         }
82         else
83         {
84             // Fill with default value 1.f
85             library->fill_tensor_value(gamma_tensor, 1.f);
86         }
87     }
88 
compute_target(TensorShape shape0,const TensorShape & shape1,float epsilon,ActivationLayerInfo act_info,DataType dt,DataLayout data_layout)89     TensorType compute_target(TensorShape shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, DataLayout data_layout)
90     {
91         if(data_layout == DataLayout::NHWC)
92         {
93             permute(shape0, PermutationVector(2U, 0U, 1U));
94         }
95 
96         // Create tensors
97         TensorType src   = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout);
98         TensorType dst   = create_tensor<TensorType>(shape0, dt, 1, QuantizationInfo(), data_layout);
99         TensorType mean  = create_tensor<TensorType>(shape1, dt, 1);
100         TensorType var   = create_tensor<TensorType>(shape1, dt, 1);
101         TensorType beta  = create_tensor<TensorType>(shape1, dt, 1);
102         TensorType gamma = create_tensor<TensorType>(shape1, dt, 1);
103 
104         // Create and configure function
105         FunctionType norm;
106         TensorType *beta_ptr  = _use_beta ? &beta : nullptr;
107         TensorType *gamma_ptr = _use_gamma ? &gamma : nullptr;
108         norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon, act_info);
109 
110         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
111         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
112         ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS);
113         ARM_COMPUTE_EXPECT(var.info()->is_resizable(), framework::LogLevel::ERRORS);
114         ARM_COMPUTE_EXPECT(beta.info()->is_resizable(), framework::LogLevel::ERRORS);
115         ARM_COMPUTE_EXPECT(gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
116 
117         // Allocate tensors
118         src.allocator()->allocate();
119         dst.allocator()->allocate();
120         mean.allocator()->allocate();
121         var.allocator()->allocate();
122         beta.allocator()->allocate();
123         gamma.allocator()->allocate();
124 
125         ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
126         ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
127         ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS);
128         ARM_COMPUTE_EXPECT(!var.info()->is_resizable(), framework::LogLevel::ERRORS);
129         ARM_COMPUTE_EXPECT(!beta.info()->is_resizable(), framework::LogLevel::ERRORS);
130         ARM_COMPUTE_EXPECT(!gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
131 
132         // Fill tensors
133         fill(AccessorType(src), AccessorType(mean), AccessorType(var), AccessorType(beta), AccessorType(gamma));
134 
135         // Compute function
136         norm.run();
137 
138         return dst;
139     }
140 
compute_reference(const TensorShape & shape0,const TensorShape & shape1,float epsilon,ActivationLayerInfo act_info,DataType dt)141     SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, ActivationLayerInfo act_info, DataType dt)
142     {
143         // Create reference
144         SimpleTensor<T> ref_src{ shape0, dt, 1 };
145         SimpleTensor<T> ref_mean{ shape1, dt, 1 };
146         SimpleTensor<T> ref_var{ shape1, dt, 1 };
147         SimpleTensor<T> ref_beta{ shape1, dt, 1 };
148         SimpleTensor<T> ref_gamma{ shape1, dt, 1 };
149 
150         // Fill reference
151         fill(ref_src, ref_mean, ref_var, ref_beta, ref_gamma);
152 
153         return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, act_info);
154     }
155 
156     TensorType      _target{};
157     SimpleTensor<T> _reference{};
158     DataType        _data_type{};
159     bool            _use_beta{};
160     bool            _use_gamma{};
161 };
162 } // namespace validation
163 } // namespace test
164 } // namespace arm_compute
165 #endif /* ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FIXTURE */
166