1 /* 2 * Copyright (c) 2017-2020 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #include "arm_compute/core/Helpers.h" 25 #include "arm_compute/core/TensorShape.h" 26 #include "arm_compute/core/Types.h" 27 #include "arm_compute/core/utils/misc/ShapeCalculator.h" 28 #include "tests/AssetsLibrary.h" 29 #include "tests/Globals.h" 30 #include "tests/IAccessor.h" 31 #include "tests/framework/Asserts.h" 32 #include "tests/framework/Fixture.h" 33 #include "tests/validation/Helpers.h" 34 #include "tests/validation/fixtures/ConvolutionLayerFixture.h" 35 #include "tests/validation/reference/ConvolutionLayer.h" 36 #include "tests/validation/reference/Permute.h" 37 38 #include <random> 39 40 namespace arm_compute 41 { 42 namespace test 43 { 44 namespace validation 45 { 46 using namespace arm_compute::misc::shape_calculator; 47 48 template <typename TensorType, typename AccessorType, typename FunctionType, typename T> 49 class DirectConvolutionValidationGenericFixture : public framework::Fixture 50 { 51 public: 52 using TBias = typename std::conditional < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int32_t, T >::type; 53 54 template <typename...> setup(TensorShape input_shape,int stride_x,int stride_y,int pad_x,int pad_y,unsigned int kernel_size,unsigned int num_kernels,DataType data_type,QuantizationInfo quantization_info,ActivationLayerInfo act_info,DataLayout data_layout)55 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, 56 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) 57 { 58 _quantization_info = quantization_info; 59 _data_type = data_type; 60 61 TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels); 62 const TensorShape bias_shape(num_kernels); 63 const PadStrideInfo info(stride_x, stride_y, pad_x, pad_y, DimensionRoundingType::FLOOR); 64 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; 65 66 TensorInfo input_info = TensorInfo(input_shape, 1, data_type); 67 TensorInfo weights_info = TensorInfo(weights_shape, 1, data_type); 68 69 const TensorShape output_shape = compute_deep_convolution_shape(input_info, weights_info, info); 70 71 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout); 72 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info); 73 } 74 75 template <typename...> setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,Size2D dilation,DataType data_type,QuantizationInfo quantization_info,ActivationLayerInfo act_info,DataLayout data_layout)76 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, 77 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) 78 { 79 ARM_COMPUTE_ERROR_ON(data_layout == DataLayout::UNKNOWN); 80 ARM_COMPUTE_UNUSED(dilation); 81 82 _quantization_info = quantization_info; 83 _data_type = data_type; 84 85 const DataType bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; 86 87 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info, data_layout); 88 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, bias_data_type, quantization_info, act_info); 89 } 90 91 protected: 92 template <typename U> fill(U && tensor,int i)93 void fill(U &&tensor, int i) 94 { 95 switch(tensor.data_type()) 96 { 97 case DataType::QASYMM8: 98 { 99 std::uniform_int_distribution<uint8_t> distribution(0, 50); 100 library->fill(tensor, distribution, i); 101 break; 102 } 103 case DataType::QASYMM8_SIGNED: 104 { 105 // Use small input range to avoid all the test results being saturated at the end. 106 std::uniform_int_distribution<int8_t> distribution(-25, 25); 107 library->fill(tensor, distribution, i); 108 break; 109 } 110 case DataType::F16: 111 case DataType::F32: 112 { 113 std::uniform_real_distribution<> distribution(-1.f, 1.f); 114 library->fill(tensor, distribution, i); 115 break; 116 } 117 case DataType::S32: 118 { 119 std::uniform_int_distribution<int32_t> distribution(-5, 5); 120 library->fill(tensor, distribution, i); 121 break; 122 } 123 default: 124 library->fill_tensor_uniform(tensor, i); 125 } 126 } 127 compute_target(TensorShape input_shape,TensorShape weights_shape,const TensorShape & bias_shape,TensorShape output_shape,const PadStrideInfo & info,DataType data_type,DataType bias_data_type,QuantizationInfo quantization_info,ActivationLayerInfo act_info,const DataLayout & data_layout)128 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape &bias_shape, TensorShape output_shape, const PadStrideInfo &info, 129 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, const DataLayout &data_layout) 130 { 131 if(data_layout == DataLayout::NHWC) 132 { 133 permute(input_shape, PermutationVector(2U, 0U, 1U)); 134 permute(weights_shape, PermutationVector(2U, 0U, 1U)); 135 permute(output_shape, PermutationVector(2U, 0U, 1U)); 136 } 137 138 // Create tensors 139 TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, quantization_info, data_layout); 140 TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1, quantization_info, data_layout); 141 TensorType bias = create_tensor<TensorType>(bias_shape, bias_data_type, 1, quantization_info); 142 TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, quantization_info, data_layout); 143 144 // Create and configure function 145 FunctionType conv; 146 conv.configure(&src, &weights, &bias, &dst, info, act_info); 147 148 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); 149 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); 150 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); 151 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); 152 153 // Allocate tensors 154 src.allocator()->allocate(); 155 weights.allocator()->allocate(); 156 bias.allocator()->allocate(); 157 dst.allocator()->allocate(); 158 159 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); 160 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); 161 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); 162 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); 163 164 // Fill tensors 165 fill(AccessorType(src), 0); 166 fill(AccessorType(weights), 1); 167 fill(AccessorType(bias), 2); 168 169 // Compute NEConvolutionLayer function 170 conv.run(); 171 172 return dst; 173 } 174 compute_reference(const TensorShape & input_shape,const TensorShape & weights_shape,const TensorShape & bias_shape,const TensorShape & output_shape,const PadStrideInfo & info,DataType data_type,DataType bias_data_type,QuantizationInfo quantization_info,ActivationLayerInfo act_info)175 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, 176 DataType data_type, DataType bias_data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info) 177 { 178 // Create reference 179 SimpleTensor<T> src{ input_shape, data_type, 1, quantization_info }; 180 SimpleTensor<T> weights{ weights_shape, data_type, 1, quantization_info }; 181 SimpleTensor<TBias> bias{ bias_shape, bias_data_type, 1, quantization_info }; 182 183 // Fill reference 184 fill(src, 0); 185 fill(weights, 1); 186 fill(bias, 2); 187 188 SimpleTensor<T> dst = reference::convolution_layer<T>(src, weights, bias, output_shape, info); 189 return (act_info.enabled()) ? reference::activation_layer<T>(dst, act_info) : dst; 190 } 191 TensorType _target{}; 192 SimpleTensor<T> _reference{}; 193 QuantizationInfo _quantization_info{}; 194 DataType _data_type{}; 195 }; 196 197 template <typename TensorType, typename AccessorType, typename FunctionType, typename T> 198 class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> 199 { 200 public: 201 template <typename...> setup(TensorShape input_shape,int stride_x,int stride_y,int pad_x,int pad_y,unsigned int kernel_size,unsigned int num_kernels,DataType data_type,ActivationLayerInfo act_info,DataLayout data_layout)202 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, ActivationLayerInfo act_info, 203 DataLayout data_layout) 204 { 205 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(), 206 act_info, data_layout); 207 } 208 }; 209 210 template <typename TensorType, typename AccessorType, typename FunctionType, typename T> 211 class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> 212 { 213 public: 214 template <typename...> setup(TensorShape input_shape,int stride_x,int stride_y,int pad_x,int pad_y,unsigned int kernel_size,unsigned int num_kernels,DataType data_type,QuantizationInfo quantization_info,ActivationLayerInfo act_info,DataLayout data_layout)215 void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, DataType data_type, QuantizationInfo quantization_info, 216 ActivationLayerInfo act_info, DataLayout data_layout) 217 { 218 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info, 219 act_info, data_layout); 220 } 221 }; 222 223 template <typename TensorType, typename AccessorType, typename FunctionType, typename T> 224 class DirectConvolutionValidationWithTensorShapesQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> 225 { 226 public: 227 template <typename...> setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,Size2D dilation,DataType data_type,QuantizationInfo quantization_info,ActivationLayerInfo act_info,DataLayout data_layout)228 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, 229 DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) 230 { 231 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, quantization_info, 232 act_info, data_layout); 233 } 234 }; 235 236 template <typename TensorType, typename AccessorType, typename FunctionType, typename T> 237 class DirectConvolutionValidationWithTensorShapesFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T> 238 { 239 public: 240 template <typename...> setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,Size2D dilation,DataType data_type,ActivationLayerInfo act_info)241 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, 242 DataType data_type, ActivationLayerInfo act_info) 243 { 244 DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, data_type, QuantizationInfo(), 245 act_info, DataLayout::NCHW); 246 } 247 }; 248 249 } // namespace validation 250 } // namespace test 251 } // namespace arm_compute 252