• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_DEQUANTIZATION_LAYER_FIXTURE
25 #define ARM_COMPUTE_TEST_DEQUANTIZATION_LAYER_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/runtime/Tensor.h"
30 #include "tests/AssetsLibrary.h"
31 #include "tests/Globals.h"
32 #include "tests/IAccessor.h"
33 #include "tests/framework/Asserts.h"
34 #include "tests/framework/Fixture.h"
35 #include "tests/validation/Helpers.h"
36 #include "tests/validation/reference/DequantizationLayer.h"
37 
38 #include <random>
39 
40 namespace arm_compute
41 {
42 namespace test
43 {
44 namespace validation
45 {
46 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
47 class DequantizationValidationFixture : public framework::Fixture
48 {
49 public:
50     template <typename...>
setup(TensorShape shape,DataType src_data_type,DataType dst_datatype,DataLayout data_layout)51     void setup(TensorShape shape, DataType src_data_type, DataType dst_datatype, DataLayout data_layout)
52     {
53         _quantization_info = generate_quantization_info(src_data_type, shape.z());
54         _target            = compute_target(shape, src_data_type, dst_datatype, data_layout);
55         _reference         = compute_reference(shape, src_data_type);
56     }
57 
58 protected:
59     template <typename U>
fill(U && tensor)60     void fill(U &&tensor)
61     {
62         library->fill_tensor_uniform(tensor, 0);
63     }
64 
compute_target(TensorShape shape,DataType src_data_type,DataType dst_datatype,DataLayout data_layout)65     TensorType compute_target(TensorShape shape, DataType src_data_type, DataType dst_datatype, DataLayout data_layout)
66     {
67         if(data_layout == DataLayout::NHWC)
68         {
69             permute(shape, PermutationVector(2U, 0U, 1U));
70         }
71 
72         // Create tensors
73         TensorType src = create_tensor<TensorType>(shape, src_data_type, 1, _quantization_info, data_layout);
74         TensorType dst = create_tensor<TensorType>(shape, dst_datatype, 1, QuantizationInfo(), data_layout);
75 
76         // Create and configure function
77         FunctionType dequantization_layer;
78         dequantization_layer.configure(&src, &dst);
79 
80         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
81         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
82 
83         // Allocate tensors
84         src.allocator()->allocate();
85         dst.allocator()->allocate();
86 
87         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
88         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
89 
90         // Fill tensors
91         fill(AccessorType(src));
92 
93         // Compute function
94         dequantization_layer.run();
95 
96         return dst;
97     }
98 
compute_reference(const TensorShape & shape,DataType src_data_type)99     SimpleTensor<T> compute_reference(const TensorShape &shape, DataType src_data_type)
100     {
101         switch(src_data_type)
102         {
103             case DataType::QASYMM8:
104             {
105                 SimpleTensor<uint8_t> src{ shape, src_data_type, 1, _quantization_info };
106                 fill(src);
107                 return reference::dequantization_layer<T>(src);
108             }
109             case DataType::QASYMM8_SIGNED:
110             case DataType::QSYMM8_PER_CHANNEL:
111             case DataType::QSYMM8:
112             {
113                 SimpleTensor<int8_t> src{ shape, src_data_type, 1, _quantization_info };
114                 fill(src);
115                 return reference::dequantization_layer<T>(src);
116             }
117             case DataType::QSYMM16:
118             {
119                 SimpleTensor<int16_t> src{ shape, src_data_type, 1, _quantization_info };
120                 fill(src);
121                 return reference::dequantization_layer<T>(src);
122             }
123             default:
124                 ARM_COMPUTE_ERROR("Unsupported data type");
125         }
126     }
127 
128 protected:
generate_quantization_info(DataType data_type,int32_t num_channels)129     QuantizationInfo generate_quantization_info(DataType data_type, int32_t num_channels)
130     {
131         std::mt19937                    gen(library.get()->seed());
132         std::uniform_int_distribution<> distribution_scale_q8(1, 255);
133         std::uniform_int_distribution<> distribution_offset_q8(1, 127);
134         std::uniform_int_distribution<> distribution_scale_q16(1, 32768);
135 
136         switch(data_type)
137         {
138             case DataType::QSYMM16:
139                 return QuantizationInfo(1.f / distribution_scale_q16(gen));
140             case DataType::QSYMM8:
141                 return QuantizationInfo(1.f / distribution_scale_q8(gen));
142             case DataType::QSYMM8_PER_CHANNEL:
143             {
144                 std::vector<float> scale(num_channels);
145                 for(int32_t i = 0; i < num_channels; ++i)
146                 {
147                     scale[i] = 1.f / distribution_offset_q8(gen);
148                 }
149                 return QuantizationInfo(scale);
150             }
151             case DataType::QASYMM8:
152                 return QuantizationInfo(1.f / distribution_scale_q8(gen), distribution_offset_q8(gen));
153             case DataType::QASYMM8_SIGNED:
154                 return QuantizationInfo(1.f / distribution_scale_q8(gen), -distribution_offset_q8(gen));
155             default:
156                 ARM_COMPUTE_ERROR("Unsupported data type");
157         }
158     }
159 
160 protected:
161     TensorType       _target{};
162     SimpleTensor<T>  _reference{};
163     QuantizationInfo _quantization_info{};
164 };
165 } // namespace validation
166 } // namespace test
167 } // namespace arm_compute
168 #endif /* ARM_COMPUTE_TEST_DEQUANTIZATION_LAYER_FIXTURE */
169