• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/Allocator.h"
25 #include "arm_compute/runtime/MemoryManagerOnDemand.h"
26 #include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
27 #include "arm_compute/runtime/NEON/functions/NENormalizationLayer.h"
28 #include "arm_compute/runtime/OffsetLifetimeManager.h"
29 #include "arm_compute/runtime/PoolManager.h"
30 #include "tests/AssetsLibrary.h"
31 #include "tests/NEON/Accessor.h"
32 #include "tests/framework/Asserts.h"
33 #include "tests/framework/Macros.h"
34 #include "tests/framework/datasets/Datasets.h"
35 #include "tests/validation/Validation.h"
36 #include "tests/validation/fixtures/UNIT/DynamicTensorFixture.h"
37 
38 namespace arm_compute
39 {
40 namespace test
41 {
42 namespace validation
43 {
44 namespace
45 {
46 constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
47 RelativeTolerance<float>           tolerance_f32(0.1f);               /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
48 constexpr float                    tolerance_num = 0.07f;             /**< Tolerance number */
49 } // namespace
50 #ifndef DOXYGEN_SKIP_THIS
51 using NENormLayerWrapper = SimpleFunctionWrapper<MemoryManagerOnDemand, NENormalizationLayer, ITensor>;
52 template <>
configure(arm_compute::ITensor * src,arm_compute::ITensor * dst)53 void NENormLayerWrapper::configure(arm_compute::ITensor *src, arm_compute::ITensor *dst)
54 {
55     _func.configure(src, dst, NormalizationLayerInfo(NormType::CROSS_MAP, 3));
56 }
57 #endif // DOXYGEN_SKIP_THIS
58 TEST_SUITE(NEON)
59 TEST_SUITE(UNIT)
60 TEST_SUITE(DynamicTensor)
61 
62 using OffsetMemoryManagementService      = MemoryManagementService<Allocator, OffsetLifetimeManager, PoolManager, MemoryManagerOnDemand>;
63 using NEDynamicTensorType3SingleFunction = DynamicTensorType3SingleFunction<Tensor, Accessor, OffsetMemoryManagementService, NENormLayerWrapper>;
64 
65 /** Tests the memory manager with dynamic input and output tensors.
66  *
67  *  Create and manage the tensors needed to run a simple function. After the function is executed,
68  *  change the input and output size requesting more memory and go through the manage/allocate process.
69  *  The memory manager should be able to update the inner structures and allocate the requested memory
70  * */
71 FIXTURE_DATA_TEST_CASE(DynamicTensorType3Single, NEDynamicTensorType3SingleFunction, framework::DatasetMode::ALL,
72                        framework::dataset::zip(framework::dataset::make("Level0Shape", { TensorShape(12U, 11U, 3U), TensorShape(256U, 8U, 12U) }),
73                                                framework::dataset::make("Level1Shape", { TensorShape(67U, 31U, 15U), TensorShape(11U, 2U, 3U) })))
74 {
75     if(input_l0.total_size() < input_l1.total_size())
76     {
77         ARM_COMPUTE_EXPECT(internal_l0.size < internal_l1.size, framework::LogLevel::ERRORS);
78         ARM_COMPUTE_EXPECT(cross_l0.size < cross_l1.size, framework::LogLevel::ERRORS);
79     }
80     else
81     {
82         ARM_COMPUTE_EXPECT(internal_l0.size == internal_l1.size, framework::LogLevel::ERRORS);
83         ARM_COMPUTE_EXPECT(cross_l0.size == cross_l1.size, framework::LogLevel::ERRORS);
84     }
85 }
86 
87 using NEDynamicTensorType3ComplexFunction = DynamicTensorType3ComplexFunction<Tensor, Accessor, OffsetMemoryManagementService, NEConvolutionLayer>;
88 /** Tests the memory manager with dynamic input and output tensors.
89  *
90  *  Create and manage the tensors needed to run a complex function. After the function is executed,
91  *  change the input and output size requesting more memory and go through the manage/allocate process.
92  *  The memory manager should be able to update the inner structures and allocate the requested memory
93  * */
94 FIXTURE_DATA_TEST_CASE(DynamicTensorType3Complex, NEDynamicTensorType3ComplexFunction, framework::DatasetMode::ALL,
95                        framework::dataset::zip(framework::dataset::zip(framework::dataset::zip(framework::dataset::zip(
96                                                                                                    framework::dataset::make("InputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 6U), TensorShape(128U, 128U, 6U) } }),
97                                                                                                    framework::dataset::make("WeightsManager", { TensorShape(3U, 3U, 6U, 3U) })),
98                                                                                                framework::dataset::make("BiasShape", { TensorShape(3U) })),
99                                                                        framework::dataset::make("OutputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 3U), TensorShape(128U, 128U, 3U) } })),
100                                                framework::dataset::make("PadStrideInfo", { PadStrideInfo(1U, 1U, 1U, 1U) })))
101 {
102     for(unsigned int i = 0; i < num_iterations; ++i)
103     {
104         run_iteration(i);
105         validate(Accessor(dst_target), dst_ref, tolerance_f32, tolerance_num, absolute_tolerance_float);
106     }
107 }
108 
109 using NEDynamicTensorType2PipelineFunction = DynamicTensorType2PipelineFunction<Tensor, Accessor, OffsetMemoryManagementService, NEConvolutionLayer>;
110 /** Tests the memory manager with dynamic input and output tensors.
111  *
112  *  Create and manage the tensors needed to run a pipeline. After the function is executed, resize the input size and rerun.
113  */
114 FIXTURE_DATA_TEST_CASE(DynamicTensorType2Pipeline, NEDynamicTensorType2PipelineFunction, framework::DatasetMode::ALL,
115                        framework::dataset::make("InputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 6U), TensorShape(128U, 128U, 6U) } }))
116 {
117 }
118 TEST_SUITE_END() // DynamicTensor
119 TEST_SUITE_END() // UNIT
120 TEST_SUITE_END() // Neon
121 } // namespace validation
122 } // namespace test
123 } // namespace arm_compute
124