1 /* 2 * Copyright (c) 2017-2020 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #include "arm_compute/core/TensorShape.h" 25 #include "arm_compute/core/Types.h" 26 #include "arm_compute/core/utils/misc/ShapeCalculator.h" 27 #include "tests/AssetsLibrary.h" 28 #include "tests/Globals.h" 29 #include "tests/IAccessor.h" 30 #include "tests/framework/Asserts.h" 31 #include "tests/framework/Fixture.h" 32 #include "tests/validation/Helpers.h" 33 #include "tests/validation/reference/DeconvolutionLayer.h" 34 35 #include <random> 36 37 namespace arm_compute 38 { 39 namespace test 40 { 41 namespace validation 42 { 43 using namespace arm_compute::misc::shape_calculator; 44 45 template <typename TensorType, typename AccessorType, typename FunctionType, typename T> 46 class DeconvolutionLayerFixtureBase : public framework::Fixture 47 { 48 public: 49 using TBias = typename std::conditional < std::is_same<typename std::decay<T>::type, uint8_t>::value || std::is_same<typename std::decay<T>::type, int8_t>::value, int32_t, T >::type; 50 51 public: 52 template <typename...> setup(TensorShape input_shape,TensorShape weights_shape,TensorShape bias_shape,TensorShape output_shape,PadStrideInfo info,DataType data_type,DataLayout data_layout,QuantizationInfo input_quantization_info,QuantizationInfo output_quantization_info,bool add_bias)53 void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, 54 DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias) 55 { 56 _data_type = data_type; 57 _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; 58 _data_layout = data_layout; 59 _input_quantization_info = input_quantization_info; 60 _output_quantization_info = output_quantization_info; 61 62 _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, add_bias); 63 _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, add_bias); 64 } 65 66 protected: 67 template <typename U> fill(U && tensor,int i)68 void fill(U &&tensor, int i) 69 { 70 switch(tensor.data_type()) 71 { 72 case DataType::QASYMM8: 73 { 74 std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f); 75 std::uniform_int_distribution<uint8_t> distribution(bounds.first, bounds.second); 76 library->fill(tensor, distribution, i); 77 break; 78 } 79 case DataType::QASYMM8_SIGNED: 80 { 81 std::pair<int, int> bounds = get_quantized_qasymm8_signed_bounds(tensor.quantization_info(), -1.0f, 1.0f); 82 std::uniform_int_distribution<int8_t> distribution(bounds.first, bounds.second); 83 library->fill(tensor, distribution, i); 84 break; 85 } 86 case DataType::S32: 87 { 88 std::uniform_int_distribution<int32_t> distribution(-100, 100); 89 library->fill(tensor, distribution, i); 90 break; 91 } 92 case DataType::F16: 93 case DataType::F32: 94 { 95 std::uniform_real_distribution<> distribution(-1.0f, 1.0f); 96 library->fill(tensor, distribution, i); 97 break; 98 } 99 default: 100 library->fill_tensor_uniform(tensor, i); 101 } 102 } 103 104 template <typename U> fill_zeros(U && tensor)105 void fill_zeros(U &&tensor) 106 { 107 switch(tensor.data_type()) 108 { 109 case DataType::S32: 110 { 111 const int32_t value = static_cast<int32_t>(tensor.quantization_info().uniform().offset); 112 library->fill_tensor_value(tensor, value); 113 break; 114 } 115 case DataType::F16: 116 library->fill_tensor_value(tensor, static_cast<half>(0.0f)); 117 break; 118 case DataType::F32: 119 library->fill_tensor_value(tensor, static_cast<float>(0.0f)); 120 break; 121 default: 122 ARM_COMPUTE_ERROR("Not supported"); 123 } 124 } 125 compute_target(TensorShape input_shape,TensorShape weights_shape,const TensorShape bias_shape,TensorShape output_shape,const PadStrideInfo & info,bool add_bias)126 TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, const TensorShape bias_shape, TensorShape output_shape, 127 const PadStrideInfo &info, bool add_bias) 128 { 129 if(_data_layout == DataLayout::NHWC) 130 { 131 permute(input_shape, PermutationVector(2U, 0U, 1U)); 132 permute(weights_shape, PermutationVector(2U, 0U, 1U)); 133 permute(output_shape, PermutationVector(2U, 0U, 1U)); 134 } 135 136 // Create tensors 137 TensorType src = create_tensor<TensorType>(input_shape, _data_type, 1, _input_quantization_info, _data_layout); 138 TensorType weights = create_tensor<TensorType>(weights_shape, _data_type, 1, _input_quantization_info, _data_layout); 139 TensorType bias = create_tensor<TensorType>(bias_shape, _bias_data_type, 1, _input_quantization_info, _data_layout); 140 TensorType dst = create_tensor<TensorType>(output_shape, _data_type, 1, _output_quantization_info, _data_layout); 141 142 // Create and configure function 143 FunctionType conv; 144 conv.configure(&src, &weights, add_bias ? &bias : nullptr, &dst, info); 145 146 ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); 147 ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); 148 if(add_bias) 149 { 150 ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); 151 } 152 ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); 153 154 // Allocate tensors 155 src.allocator()->allocate(); 156 weights.allocator()->allocate(); 157 if(add_bias) 158 { 159 bias.allocator()->allocate(); 160 } 161 dst.allocator()->allocate(); 162 163 ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); 164 ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); 165 if(add_bias) 166 { 167 ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); 168 } 169 ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); 170 171 // Fill tensors 172 fill(AccessorType(src), 0); 173 fill(AccessorType(weights), 1); 174 if(add_bias) 175 { 176 fill(AccessorType(bias), 2); 177 } 178 179 // Compute DeconvolutionLayer function 180 conv.run(); 181 182 return dst; 183 } 184 compute_reference(const TensorShape & input_shape,const TensorShape & weights_shape,const TensorShape & bias_shape,const TensorShape & output_shape,const PadStrideInfo & info,bool add_bias)185 SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, 186 const PadStrideInfo &info, bool add_bias) 187 { 188 // Create reference 189 SimpleTensor<T> src{ input_shape, _data_type, 1, _input_quantization_info }; 190 SimpleTensor<T> weights{ weights_shape, _data_type, 1, _input_quantization_info }; 191 SimpleTensor<TBias> bias{ bias_shape, _bias_data_type, 1, _input_quantization_info }; 192 193 // Fill reference 194 fill(src, 0); 195 fill(weights, 1); 196 197 if(add_bias) 198 { 199 fill(bias, 2); 200 } 201 else 202 { 203 fill_zeros(bias); 204 } 205 206 return reference::deconvolution_layer<T>(src, weights, bias, output_shape, info, _output_quantization_info); 207 } 208 209 TensorType _target{}; 210 SimpleTensor<T> _reference{}; 211 DataType _data_type{}; 212 DataType _bias_data_type{}; 213 DataLayout _data_layout{}; 214 QuantizationInfo _input_quantization_info{}; 215 QuantizationInfo _output_quantization_info{}; 216 }; 217 218 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y> 219 class DeconvolutionValidationFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T> 220 { 221 public: 222 template <typename...> setup(TensorShape input_shape,unsigned int sx,unsigned int sy,unsigned int padx,unsigned int pady,unsigned int num_kernels,DataType data_type,DataLayout data_layout,bool add_bias)223 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, 224 unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias) 225 { 226 ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); 227 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); 228 const TensorShape bias_shape(num_kernels); 229 const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); 230 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info); 231 TensorInfo input_info(input_shape, 1, data_type); 232 TensorInfo weights_info(weights_shape, 1, data_type); 233 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); 234 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, QuantizationInfo(), 235 QuantizationInfo(), add_bias); 236 } 237 }; 238 239 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y> 240 class DeconvolutionValidationAsymmFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T> 241 { 242 public: 243 template <typename...> setup(TensorShape input_shape,unsigned int sx,unsigned int sy,unsigned int pad_left,unsigned int pad_right,unsigned int pad_top,unsigned int pad_bottom,unsigned int num_kernels,DataType data_type,DataLayout data_layout,bool add_bias)244 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int pad_left, unsigned int pad_right, unsigned int pad_top, 245 unsigned int pad_bottom, unsigned int num_kernels, DataType data_type, DataLayout data_layout, bool add_bias) 246 { 247 ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); 248 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); 249 const TensorShape bias_shape(num_kernels); 250 const PadStrideInfo info(sx, sy, pad_left, pad_right, pad_top, pad_bottom, DimensionRoundingType::CEIL); 251 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info); 252 TensorInfo input_info(input_shape, 1, data_type); 253 TensorInfo weights_info(weights_shape, 1, data_type); 254 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); 255 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, QuantizationInfo(), 256 QuantizationInfo(), add_bias); 257 } 258 }; 259 260 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, unsigned int kernel_size_x, unsigned int kernel_size_y> 261 class DeconvolutionValidationQuantizedFixture : public DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T> 262 { 263 public: 264 template <typename...> setup(TensorShape input_shape,unsigned int sx,unsigned int sy,unsigned int padx,unsigned int pady,unsigned int num_kernels,DataType data_type,DataLayout data_layout,QuantizationInfo input_quantization_info,QuantizationInfo output_quantization_info,bool add_bias)265 void setup(TensorShape input_shape, unsigned int sx, unsigned int sy, unsigned int padx, unsigned int pady, 266 unsigned int num_kernels, DataType data_type, DataLayout data_layout, QuantizationInfo input_quantization_info, QuantizationInfo output_quantization_info, bool add_bias) 267 { 268 ARM_COMPUTE_ERROR_ON_MSG(kernel_size_x != kernel_size_y, "Only square kernels supported"); 269 const TensorShape weights_shape(kernel_size_x, kernel_size_y, input_shape.z(), num_kernels); 270 const TensorShape bias_shape(num_kernels); 271 const PadStrideInfo info(sx, sy, padx, pady, DimensionRoundingType::CEIL); 272 auto out_dim = deconvolution_output_dimensions(input_shape.x(), input_shape.y(), kernel_size_x, kernel_size_y, info); 273 TensorInfo input_info(input_shape, 1, data_type, input_quantization_info); 274 TensorInfo weights_info(weights_shape, 1, data_type, input_quantization_info); 275 TensorShape output_shape = compute_deconvolution_output_shape(out_dim, input_info, weights_info); 276 DeconvolutionLayerFixtureBase<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, data_type, data_layout, input_quantization_info, 277 output_quantization_info, add_bias); 278 } 279 }; 280 281 } // namespace validation 282 } // namespace test 283 } // namespace arm_compute 284