1 /*
2 * Copyright (c) 2019-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/BlobLifetimeManager.h"
25 #include "arm_compute/runtime/CL/CLBufferAllocator.h"
26 #include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
27 #include "arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h"
28 #include "arm_compute/runtime/MemoryGroup.h"
29 #include "arm_compute/runtime/MemoryManagerOnDemand.h"
30 #include "arm_compute/runtime/PoolManager.h"
31 #include "src/core/CL/kernels/CLFillBorderKernel.h"
32 #include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
33 #include "src/core/CL/kernels/CLIm2ColKernel.h"
34 #include "src/core/CL/kernels/CLL2NormalizeLayerKernel.h"
35 #include "src/core/CL/kernels/CLReductionOperationKernel.h"
36 #include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
37 #include "tests/AssetsLibrary.h"
38 #include "tests/CL/CLAccessor.h"
39 #include "tests/Globals.h"
40 #include "tests/Utils.h"
41 #include "tests/framework/Asserts.h"
42 #include "tests/framework/Macros.h"
43 #include "tests/framework/datasets/Datasets.h"
44 #include "tests/validation/Validation.h"
45 #include "tests/validation/fixtures/UNIT/DynamicTensorFixture.h"
46
47 namespace arm_compute
48 {
49 namespace test
50 {
51 namespace validation
52 {
53 namespace
54 {
55 constexpr AbsoluteTolerance<float> absolute_tolerance_float(0.0001f); /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
56 RelativeTolerance<float> tolerance_f32(0.1f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
57 constexpr float tolerance_num = 0.07f; /**< Tolerance number */
58 } // namespace
59
60 #ifndef DOXYGEN_SKIP_THIS
61 using CLL2NormLayerWrapper = SimpleFunctionWrapper<MemoryManagerOnDemand, CLL2NormalizeLayer, ICLTensor>;
62 template <>
configure(ICLTensor * src,ICLTensor * dst)63 void CLL2NormLayerWrapper::configure(ICLTensor *src, ICLTensor *dst)
64 {
65 _func.configure(src, dst, 0, 0.0001f);
66 }
67 #endif // DOXYGEN_SKIP_THIS
68 TEST_SUITE(CL)
69 TEST_SUITE(UNIT)
70 TEST_SUITE(DynamicTensor)
71
72 using BlobMemoryManagementService = MemoryManagementService<CLBufferAllocator, BlobLifetimeManager, PoolManager, MemoryManagerOnDemand>;
73 using CLDynamicTensorType3SingleFunction = DynamicTensorType3SingleFunction<CLTensor, CLAccessor, BlobMemoryManagementService, CLL2NormLayerWrapper>;
74
75 /** Tests the memory manager with dynamic input and output tensors.
76 *
77 * Create and manage the tensors needed to run a simple function. After the function is executed,
78 * change the input and output size requesting more memory and go through the manage/allocate process.
79 * The memory manager should be able to update the inner structures and allocate the requested memory
80 * */
81 FIXTURE_DATA_TEST_CASE(DynamicTensorType3Single, CLDynamicTensorType3SingleFunction, framework::DatasetMode::ALL,
82 framework::dataset::zip(framework::dataset::make("Level0Shape", { TensorShape(12U, 11U, 3U), TensorShape(256U, 8U, 12U) }),
83 framework::dataset::make("Level1Shape", { TensorShape(67U, 31U, 15U), TensorShape(11U, 2U, 3U) })))
84 {
85 ARM_COMPUTE_EXPECT(internal_l0.size() == internal_l1.size(), framework::LogLevel::ERRORS);
86 ARM_COMPUTE_EXPECT(cross_l0.size() == cross_l1.size(), framework::LogLevel::ERRORS);
87
88 const unsigned int internal_size = internal_l0.size();
89 const unsigned int cross_size = cross_l0.size();
90 if(input_l0.total_size() < input_l1.total_size())
91 {
92 for(unsigned int i = 0; i < internal_size; ++i)
93 {
94 ARM_COMPUTE_EXPECT(internal_l0[i].size < internal_l1[i].size, framework::LogLevel::ERRORS);
95 }
96 for(unsigned int i = 0; i < cross_size; ++i)
97 {
98 ARM_COMPUTE_EXPECT(cross_l0[i].size < cross_l1[i].size, framework::LogLevel::ERRORS);
99 }
100 }
101 else
102 {
103 for(unsigned int i = 0; i < internal_size; ++i)
104 {
105 ARM_COMPUTE_EXPECT(internal_l0[i].size == internal_l1[i].size, framework::LogLevel::ERRORS);
106 }
107 for(unsigned int i = 0; i < cross_size; ++i)
108 {
109 ARM_COMPUTE_EXPECT(cross_l0[i].size == cross_l1[i].size, framework::LogLevel::ERRORS);
110 }
111 }
112 }
113
114 using CLDynamicTensorType3ComplexFunction = DynamicTensorType3ComplexFunction<CLTensor, CLAccessor, BlobMemoryManagementService, CLConvolutionLayer>;
115 /** Tests the memory manager with dynamic input and output tensors.
116 *
117 * Create and manage the tensors needed to run a complex function. After the function is executed,
118 * change the input and output size requesting more memory and go through the manage/allocate process.
119 * The memory manager should be able to update the inner structures and allocate the requested memory
120 * */
121 FIXTURE_DATA_TEST_CASE(DynamicTensorType3Complex, CLDynamicTensorType3ComplexFunction, framework::DatasetMode::ALL,
122 framework::dataset::zip(framework::dataset::zip(framework::dataset::zip(framework::dataset::zip(
123 framework::dataset::make("InputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 16U), TensorShape(64U, 64U, 16U) } }),
124 framework::dataset::make("WeightsManager", { TensorShape(3U, 3U, 16U, 5U) })),
125 framework::dataset::make("BiasShape", { TensorShape(5U) })),
126 framework::dataset::make("OutputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 5U), TensorShape(64U, 64U, 5U) } })),
127 framework::dataset::make("PadStrideInfo", { PadStrideInfo(1U, 1U, 1U, 1U) })))
128 {
129 for(unsigned int i = 0; i < num_iterations; ++i)
130 {
131 run_iteration(i);
132 validate(CLAccessor(dst_target), dst_ref, tolerance_f32, tolerance_num, absolute_tolerance_float);
133 }
134 }
135
136 using CLDynamicTensorType2PipelineFunction = DynamicTensorType2PipelineFunction<CLTensor, CLAccessor, BlobMemoryManagementService, CLConvolutionLayer>;
137 /** Tests the memory manager with dynamic input and output tensors.
138 *
139 * Create and manage the tensors needed to run a pipeline. After the function is executed, resize the input size and rerun.
140 */
141 FIXTURE_DATA_TEST_CASE(DynamicTensorType2Pipeline, CLDynamicTensorType2PipelineFunction, framework::DatasetMode::ALL,
142 framework::dataset::make("InputShape", { std::vector<TensorShape>{ TensorShape(12U, 12U, 6U), TensorShape(128U, 128U, 6U) } }))
143 {
144 }
145
146 TEST_SUITE_END() // DynamicTensor
147 TEST_SUITE_END() // UNIT
148 TEST_SUITE_END() // CL
149 } // namespace validation
150 } // namespace test
151 } // namespace arm_compute
152