1 /*
2 * Copyright (c) 2019-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/BlobLifetimeManager.h"
25 #include "arm_compute/runtime/CL/CLBufferAllocator.h"
26 #include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
27 #include "arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h"
28 #include "arm_compute/runtime/MemoryGroup.h"
29 #include "arm_compute/runtime/MemoryManagerOnDemand.h"
30 #include "arm_compute/runtime/PoolManager.h"
31 #include "src/core/CL/kernels/CLFillBorderKernel.h"
32 #include "src/core/CL/kernels/CLL2NormalizeLayerKernel.h"
33 #include "src/core/CL/kernels/CLReductionOperationKernel.h"
34 #include "tests/AssetsLibrary.h"
35 #include "tests/CL/CLAccessor.h"
36 #include "tests/Globals.h"
37 #include "tests/Utils.h"
38 #include "tests/framework/Asserts.h"
39 #include "tests/framework/Macros.h"
40 #include "tests/framework/datasets/Datasets.h"
41 #include "tests/validation/Validation.h"
42 #include "tests/validation/fixtures/UNIT/DynamicTensorFixture.h"
43
44 namespace arm_compute
45 {
46 namespace test
47 {
48 namespace validation
49 {
50 namespace
51 {
52 constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
53 RelativeTolerance<float> tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
54 constexpr float tolerance_num = 0.07f; /**< Tolerance number */
55 } // namespace
56
57 #ifndef DOXYGEN_SKIP_THIS
58 using CLL2NormLayerWrapper = SimpleFunctionWrapper<MemoryManagerOnDemand, CLL2NormalizeLayer, ICLTensor>;
59 template <>
configure(ICLTensor * src,ICLTensor * dst)60 void CLL2NormLayerWrapper::configure(ICLTensor *src, ICLTensor *dst)
61 {
62 _func.configure(src, dst, 0, 0.0001f);
63 }
64 #endif // DOXYGEN_SKIP_THIS
65 TEST_SUITE(CL)
66 TEST_SUITE(UNIT)
67 TEST_SUITE(DynamicTensor)
68
69 using BlobMemoryManagementService = MemoryManagementService<CLBufferAllocator, BlobLifetimeManager, PoolManager, MemoryManagerOnDemand>;
70 using CLDynamicTensorType3SingleFunction = DynamicTensorType3SingleFunction<CLTensor, CLAccessor, BlobMemoryManagementService, CLL2NormLayerWrapper>;
71
72 /** Tests the memory manager with dynamic input and output tensors.
73 *
74 * Create and manage the tensors needed to run a simple function. After the function is executed,
75 * change the input and output size requesting more memory and go through the manage/allocate process.
76 * The memory manager should be able to update the inner structures and allocate the requested memory
77 * */
78 FIXTURE_DATA_TEST_CASE(DynamicTensorType3Single, CLDynamicTensorType3SingleFunction, framework::DatasetMode::ALL,
79 framework::dataset::zip(framework::dataset::make("Level0Shape", { TensorShape(12U, 11U, 3U), TensorShape(256U, 8U, 12U) }),
80 framework::dataset::make("Level1Shape", { TensorShape(67U, 31U, 15U), TensorShape(11U, 2U, 3U) })))
81 {
82 ARM_COMPUTE_EXPECT(internal_l0.size() == internal_l1.size(), framework::LogLevel::ERRORS);
83 ARM_COMPUTE_EXPECT(cross_l0.size() == cross_l1.size(), framework::LogLevel::ERRORS);
84
85 const unsigned int internal_size = internal_l0.size();
86 const unsigned int cross_size = cross_l0.size();
87 if(input_l0.total_size() < input_l1.total_size())
88 {
89 for(unsigned int i = 0; i < internal_size; ++i)
90 {
91 ARM_COMPUTE_EXPECT(internal_l0[i].size < internal_l1[i].size, framework::LogLevel::ERRORS);
92 }
93 for(unsigned int i = 0; i < cross_size; ++i)
94 {
95 ARM_COMPUTE_EXPECT(cross_l0[i].size < cross_l1[i].size, framework::LogLevel::ERRORS);
96 }
97 }
98 else
99 {
100 for(unsigned int i = 0; i < internal_size; ++i)
101 {
102 ARM_COMPUTE_EXPECT(internal_l0[i].size == internal_l1[i].size, framework::LogLevel::ERRORS);
103 }
104 for(unsigned int i = 0; i < cross_size; ++i)
105 {
106 ARM_COMPUTE_EXPECT(cross_l0[i].size == cross_l1[i].size, framework::LogLevel::ERRORS);
107 }
108 }
109 }
110
111 using CLDynamicTensorType3ComplexFunction = DynamicTensorType3ComplexFunction<CLTensor, CLAccessor, BlobMemoryManagementService, CLConvolutionLayer>;
112 /** Tests the memory manager with dynamic input and output tensors.
113 *
114 * Create and manage the tensors needed to run a complex function. After the function is executed,
115 * change the input and output size requesting more memory and go through the manage/allocate process.
116 * The memory manager should be able to update the inner structures and allocate the requested memory
117 * */
118 FIXTURE_DATA_TEST_CASE(DynamicTensorType3Complex, CLDynamicTensorType3ComplexFunction, framework::DatasetMode::ALL,
119 framework::dataset::zip(framework::dataset::zip(framework::dataset::zip(framework::dataset::zip(
120 framework::dataset::make("InputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 16U), TensorShape(64U, 64U, 16U) } }),
121 framework::dataset::make("WeightsManager", { TensorShape(3U, 3U, 16U, 5U) })),
122 framework::dataset::make("BiasShape", { TensorShape(5U) })),
123 framework::dataset::make("OutputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 5U), TensorShape(64U, 64U, 5U) } })),
124 framework::dataset::make("PadStrideInfo", { PadStrideInfo(1U, 1U, 1U, 1U) })))
125 {
126 for(unsigned int i = 0; i < num_iterations; ++i)
127 {
128 run_iteration(i);
129 validate(CLAccessor(dst_target), dst_ref, tolerance_f32, tolerance_num, absolute_tolerance_float);
130 }
131 }
132
133 using CLDynamicTensorType2PipelineFunction = DynamicTensorType2PipelineFunction<CLTensor, CLAccessor, BlobMemoryManagementService, CLConvolutionLayer>;
134 /** Tests the memory manager with dynamic input and output tensors.
135 *
136 * Create and manage the tensors needed to run a pipeline. After the function is executed, resize the input size and rerun.
137 */
138 FIXTURE_DATA_TEST_CASE(DynamicTensorType2Pipeline, CLDynamicTensorType2PipelineFunction, framework::DatasetMode::ALL,
139 framework::dataset::make("InputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 6U), TensorShape(128U, 128U, 6U) } }))
140 {
141 }
142
143 TEST_SUITE_END() // DynamicTensor
144 TEST_SUITE_END() // UNIT
145 TEST_SUITE_END() // CL
146 } // namespace validation
147 } // namespace test
148 } // namespace arm_compute
149