• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2018 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FUSION_FIXTURE
25 #define ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FUSION_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "tests/AssetsLibrary.h"
30 #include "tests/Globals.h"
31 #include "tests/IAccessor.h"
32 #include "tests/framework/Asserts.h"
33 #include "tests/framework/Fixture.h"
34 #include "tests/validation/Helpers.h"
35 #include "tests/validation/reference/BatchNormalizationLayer.h"
36 #include "tests/validation/reference/ConvolutionLayer.h"
37 
38 namespace arm_compute
39 {
40 namespace test
41 {
42 namespace validation
43 {
44 template <typename TensorType, typename AccessorType, typename ConvolutionFunctionType, typename FusionFunctionType, typename T>
45 class BatchNormalizationLayerFusionValidationFixture : public framework::Fixture
46 {
47 public:
48     template <typename...>
setup(TensorShape src_shape,TensorShape w_shape,TensorShape b_shape,TensorShape dst_shape,PadStrideInfo info,Size2D dilation,bool use_conv_b,bool use_beta,bool use_gamma,float epsilon,DataType dt,DataLayout data_layout)49     void setup(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, Size2D dilation,
50                bool use_conv_b, bool use_beta, bool use_gamma, float epsilon, DataType dt, DataLayout data_layout)
51     {
52         ARM_COMPUTE_UNUSED(dilation);
53 
54         _data_type   = dt;
55         _data_layout = data_layout;
56         _use_conv_b  = use_conv_b;
57         _use_beta    = use_beta;
58         _use_gamma   = use_gamma;
59 
60         _target    = compute_target(src_shape, w_shape, b_shape, dst_shape, info, epsilon);
61         _reference = compute_reference(src_shape, w_shape, b_shape, dst_shape, info, epsilon);
62     }
63 
64 protected:
65     template <typename U>
fill(U && src,U && w_tensor,U && b_tensor,U && mean_tensor,U && var_tensor,U && beta_tensor,U && gamma_tensor)66     void fill(U &&src, U &&w_tensor, U &&b_tensor, U &&mean_tensor, U &&var_tensor, U &&beta_tensor, U &&gamma_tensor)
67     {
68         std::uniform_real_distribution<> distribution(-1.f, 1.f);
69         std::uniform_real_distribution<> distribution_gz(0, 1.f);
70 
71         library->fill(src, distribution, 0);
72         library->fill(w_tensor, distribution, 1);
73         library->fill(mean_tensor, distribution, 2);
74         library->fill(var_tensor, distribution_gz, 3);
75         _use_conv_b ? library->fill(b_tensor, distribution, 4) : library->fill_tensor_value(b_tensor, 0.f);
76         _use_beta ? library->fill(beta_tensor, distribution, 5) : library->fill_tensor_value(beta_tensor, 0.f);
77         _use_gamma ? library->fill(gamma_tensor, distribution, 6) : library->fill_tensor_value(gamma_tensor, 1.f);
78     }
79 
compute_target(TensorShape src_shape,TensorShape w_shape,TensorShape b_shape,TensorShape dst_shape,PadStrideInfo info,float epsilon)80     TensorType compute_target(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, float epsilon)
81     {
82         if(_data_layout == DataLayout::NHWC)
83         {
84             permute(src_shape, PermutationVector(2U, 0U, 1U));
85             permute(w_shape, PermutationVector(2U, 0U, 1U));
86             permute(dst_shape, PermutationVector(2U, 0U, 1U));
87         }
88 
89         // Create tensors
90         TensorType src      = create_tensor<TensorType>(src_shape, _data_type, 1, QuantizationInfo(), _data_layout);
91         TensorType conv_w   = create_tensor<TensorType>(w_shape, _data_type, 1, QuantizationInfo(), _data_layout);
92         TensorType conv_b   = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
93         TensorType bn_mean  = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
94         TensorType bn_var   = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
95         TensorType bn_beta  = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
96         TensorType bn_gamma = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
97         TensorType fused_w  = create_tensor<TensorType>(w_shape, _data_type, 1, QuantizationInfo(), _data_layout);
98         TensorType fused_b  = create_tensor<TensorType>(b_shape, _data_type, 1, QuantizationInfo(), _data_layout);
99         TensorType dst      = create_tensor<TensorType>(dst_shape, _data_type, 1, QuantizationInfo(), _data_layout);
100 
101         // Create and configure function
102         FusionFunctionType      fuse_fn;
103         ConvolutionFunctionType conv_fn;
104         TensorType             *conv_b_ptr = _use_conv_b ? &conv_b : nullptr;
105         TensorType             *beta_ptr   = _use_beta ? &bn_beta : nullptr;
106         TensorType             *gamma_ptr  = _use_gamma ? &bn_gamma : nullptr;
107         fuse_fn.configure(&conv_w, &bn_mean, &bn_var, &fused_w, &fused_b, conv_b_ptr, beta_ptr, gamma_ptr, epsilon);
108         conv_fn.configure(&src, &fused_w, &fused_b, &dst, info);
109 
110         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
111         ARM_COMPUTE_EXPECT(conv_w.info()->is_resizable(), framework::LogLevel::ERRORS);
112         ARM_COMPUTE_EXPECT(conv_b.info()->is_resizable(), framework::LogLevel::ERRORS);
113         ARM_COMPUTE_EXPECT(bn_mean.info()->is_resizable(), framework::LogLevel::ERRORS);
114         ARM_COMPUTE_EXPECT(bn_var.info()->is_resizable(), framework::LogLevel::ERRORS);
115         ARM_COMPUTE_EXPECT(bn_beta.info()->is_resizable(), framework::LogLevel::ERRORS);
116         ARM_COMPUTE_EXPECT(bn_gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
117         ARM_COMPUTE_EXPECT(fused_w.info()->is_resizable(), framework::LogLevel::ERRORS);
118         ARM_COMPUTE_EXPECT(fused_b.info()->is_resizable(), framework::LogLevel::ERRORS);
119         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
120 
121         // Allocate tensors
122         src.allocator()->allocate();
123         conv_w.allocator()->allocate();
124         conv_b.allocator()->allocate();
125         bn_mean.allocator()->allocate();
126         bn_var.allocator()->allocate();
127         bn_beta.allocator()->allocate();
128         bn_gamma.allocator()->allocate();
129         fused_w.allocator()->allocate();
130         fused_b.allocator()->allocate();
131         dst.allocator()->allocate();
132 
133         ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
134         ARM_COMPUTE_EXPECT(!conv_w.info()->is_resizable(), framework::LogLevel::ERRORS);
135         ARM_COMPUTE_EXPECT(!conv_b.info()->is_resizable(), framework::LogLevel::ERRORS);
136         ARM_COMPUTE_EXPECT(!bn_mean.info()->is_resizable(), framework::LogLevel::ERRORS);
137         ARM_COMPUTE_EXPECT(!bn_var.info()->is_resizable(), framework::LogLevel::ERRORS);
138         ARM_COMPUTE_EXPECT(!bn_beta.info()->is_resizable(), framework::LogLevel::ERRORS);
139         ARM_COMPUTE_EXPECT(!bn_gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
140         ARM_COMPUTE_EXPECT(!fused_w.info()->is_resizable(), framework::LogLevel::ERRORS);
141         ARM_COMPUTE_EXPECT(!fused_b.info()->is_resizable(), framework::LogLevel::ERRORS);
142         ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
143 
144         // Fill tensors
145         fill(AccessorType(src),
146              AccessorType(conv_w), AccessorType(conv_b),
147              AccessorType(bn_mean), AccessorType(bn_var), AccessorType(bn_beta), AccessorType(bn_gamma));
148 
149         // Compute function
150         fuse_fn.run();
151         conv_fn.run();
152 
153         return dst;
154     }
155 
compute_reference(TensorShape src_shape,TensorShape w_shape,TensorShape b_shape,TensorShape dst_shape,PadStrideInfo info,float epsilon)156     SimpleTensor<T> compute_reference(TensorShape src_shape, TensorShape w_shape, TensorShape b_shape, TensorShape dst_shape, PadStrideInfo info, float epsilon)
157     {
158         // Create reference
159         SimpleTensor<T> src{ src_shape, _data_type, 1 };
160         SimpleTensor<T> conv_w{ w_shape, _data_type, 1 };
161         SimpleTensor<T> conv_b{ b_shape, _data_type, 1 };
162         SimpleTensor<T> bn_var{ b_shape, _data_type, 1 };
163         SimpleTensor<T> bn_mean{ b_shape, _data_type, 1 };
164         SimpleTensor<T> bn_beta{ b_shape, _data_type, 1 };
165         SimpleTensor<T> bn_gamma{ b_shape, _data_type, 1 };
166 
167         // Fill reference
168         fill(src, conv_w, conv_b, bn_mean, bn_var, bn_beta, bn_gamma);
169 
170         // Calculate Conv + BN
171         auto conv_res = reference::convolution_layer(src, conv_w, conv_b, dst_shape, info);
172         return reference::batch_normalization_layer(conv_res, bn_mean, bn_var, bn_beta, bn_gamma, epsilon, ActivationLayerInfo());
173     }
174 
175     TensorType      _target{};
176     SimpleTensor<T> _reference{};
177     DataType        _data_type{};
178     DataLayout      _data_layout{};
179     bool            _use_conv_b{};
180     bool            _use_beta{};
181     bool            _use_gamma{};
182 };
183 } // namespace validation
184 } // namespace test
185 } // namespace arm_compute
186 #endif /* ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FUSION_FIXTURE */
187