• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2019 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <aclCommon/ArmComputeTensorUtils.hpp>
7 
8 #include <boost/test/unit_test.hpp>
9 
10 using namespace armnn::armcomputetensorutils;
11 
12 BOOST_AUTO_TEST_SUITE(ArmComputeTensorUtils)
13 
BOOST_AUTO_TEST_CASE(BuildArmComputeTensorInfoTest)14 BOOST_AUTO_TEST_CASE(BuildArmComputeTensorInfoTest)
15 {
16 
17     const armnn::TensorShape tensorShape = { 1, 2, 3, 4 };
18     const armnn::DataType dataType = armnn::DataType::QAsymmU8;
19 
20     const std::vector<float> quantScales = { 1.5f, 2.5f, 3.5f, 4.5f };
21     const float quantScale = quantScales[0];
22     const int32_t quantOffset = 128;
23 
24     // Tensor info with per-tensor quantization
25     const armnn::TensorInfo tensorInfo0(tensorShape, dataType, quantScale, quantOffset);
26     const arm_compute::TensorInfo aclTensorInfo0 = BuildArmComputeTensorInfo(tensorInfo0);
27 
28     const arm_compute::TensorShape& aclTensorShape = aclTensorInfo0.tensor_shape();
29     BOOST_CHECK(aclTensorShape.num_dimensions() == tensorShape.GetNumDimensions());
30     for(unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
31     {
32         // NOTE: arm_compute tensor dimensions are stored in the opposite order
33         BOOST_CHECK(aclTensorShape[i] == tensorShape[tensorShape.GetNumDimensions() - i - 1]);
34     }
35 
36     BOOST_CHECK(aclTensorInfo0.data_type() == arm_compute::DataType::QASYMM8);
37     BOOST_CHECK(aclTensorInfo0.quantization_info().scale()[0] == quantScale);
38 
39     // Tensor info with per-axis quantization
40     const armnn::TensorInfo tensorInfo1(tensorShape, dataType, quantScales, 0);
41     const arm_compute::TensorInfo aclTensorInfo1 = BuildArmComputeTensorInfo(tensorInfo1);
42 
43     BOOST_CHECK(aclTensorInfo1.quantization_info().scale() == quantScales);
44 }
45 
46 BOOST_AUTO_TEST_SUITE_END()
47