• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE
25 #define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE
26 
27 #include "arm_compute/core/CL/CLKernelLibrary.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/core/Types.h"
30 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
31 
32 #include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
33 #include "arm_compute/dynamic_fusion/sketch/attributes/Pool2dAttributes.h"
34 #include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
35 #include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuPool2d.h"
36 #include "src/dynamic_fusion/utils/Utils.h"
37 
38 #include "tests/CL/CLAccessor.h"
39 #include "tests/framework/Fixture.h"
40 #include "tests/validation/reference/PoolingLayer.h"
41 
42 using namespace arm_compute::experimental::dynamic_fusion;
43 
44 namespace arm_compute
45 {
46 namespace test
47 {
48 namespace validation
49 {
50 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
51 class DynamicFusionGpuPool2dValidationGenericFixture : public framework::Fixture
52 {
53 public:
54     template <typename...>
setup(TensorShape input_shape,const Pool2dAttributes & pool_attr,DataType data_type,bool mixed_precision)55     void setup(TensorShape input_shape, const Pool2dAttributes &pool_attr, DataType data_type, bool mixed_precision)
56     {
57         _target    = compute_target(input_shape, pool_attr, data_type, mixed_precision);
58         _reference = compute_reference(input_shape, convert_pool_attr_to_pool_info(pool_attr, mixed_precision), data_type);
59     }
60 
61 protected:
62     template <typename U>
fill(U && tensor,int i)63     void fill(U &&tensor, int i)
64     {
65         switch(tensor.data_type())
66         {
67             case DataType::F16:
68             {
69                 arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ -1.0f, 1.0f };
70                 library->fill(tensor, distribution, i);
71                 break;
72             }
73             case DataType::F32:
74             {
75                 std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
76                 library->fill(tensor, distribution, i);
77                 break;
78             }
79             default:
80                 library->fill_tensor_uniform(tensor, i);
81         }
82     }
83 
84     // Given input is in nchw format
compute_target(TensorShape input_shape,const Pool2dAttributes & pool_attr,const DataType data_type,bool mixed_precision)85     TensorType compute_target(TensorShape input_shape, const Pool2dAttributes &pool_attr, const DataType data_type, bool mixed_precision)
86     {
87         CLScheduler::get().default_reinit();
88 
89         // Change shape due to NHWC data layout, test shapes are NCHW
90         permute(input_shape, PermutationVector(2U, 0U, 1U));
91 
92         // Create a new workload sketch
93         auto              cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
94         auto              gpu_ctx        = GpuWorkloadContext{ &cl_compile_ctx };
95         GpuWorkloadSketch sketch{ &gpu_ctx };
96 
97         // Create sketch tensors
98         auto input_info = sketch.create_tensor_info(TensorInfo(input_shape, 1, data_type, DataLayout::NHWC));
99         auto dst_info   = sketch.create_tensor_info();
100 
101         // Create Pool2dSettings
102         GpuPool2dSettings pool_settings = GpuPool2dSettings().mixed_precision(mixed_precision);
103 
104         FunctionType::create_op(sketch, &input_info, &dst_info, pool_attr, pool_settings);
105 
106         // Configure runtime
107         ClWorkloadRuntime runtime;
108         runtime.configure(sketch);
109         // (Important) Allocate auxiliary tensor memory if there are any
110         for(auto &data : runtime.get_auxiliary_tensors())
111         {
112             CLTensor     *tensor      = std::get<0>(data);
113             TensorInfo    info        = std::get<1>(data);
114             AuxMemoryInfo aux_mem_req = std::get<2>(data);
115             tensor->allocator()->init(info, aux_mem_req.alignment);
116             tensor->allocator()->allocate(); // Use ACL allocated memory
117         }
118         // Construct user tensors
119         TensorType t_input{};
120         TensorType t_dst{};
121 
122         // Initialize user tensors
123         t_input.allocator()->init(input_info);
124         t_dst.allocator()->init(dst_info);
125 
126         // Allocate and fill user tensors
127         t_input.allocator()->allocate();
128         t_dst.allocator()->allocate();
129 
130         fill(AccessorType(t_input), 0);
131 
132         // Run runtime
133         runtime.run({ &t_input, &t_dst });
134         return t_dst;
135     }
136 
compute_reference(TensorShape shape,PoolingLayerInfo pool_info,DataType data_type)137     SimpleTensor<T> compute_reference(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type)
138     {
139         // Create reference
140         SimpleTensor<T> src(shape, data_type, 1, QuantizationInfo());
141         // Fill reference
142         fill(src, 0);
143         return reference::pooling_layer<T>(src, pool_info, QuantizationInfo(), nullptr, DataLayout::NCHW);
144     }
145 
146     TensorType      _target{};
147     SimpleTensor<T> _reference{};
148 };
149 
150 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
151 class DynamicFusionGpuPool2dValidationFixture : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
152 {
153 public:
154     template <typename...>
setup(TensorShape input_shape,PoolingType pool_type,Size2D pool_size,Padding2D pad,Size2D stride,bool exclude_padding,DataType data_type)155     void setup(TensorShape input_shape, PoolingType pool_type, Size2D pool_size, Padding2D pad, Size2D stride, bool exclude_padding, DataType data_type)
156     {
157         DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape,
158                                                                                                          Pool2dAttributes().pool_type(pool_type).pool_size(pool_size).pad(pad).stride(stride).exclude_padding(exclude_padding),
159                                                                                                          data_type, false);
160     }
161 };
162 
163 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
164 class DynamicFusionGpuPool2dMixedPrecisionValidationFixture : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
165 {
166 public:
167     template <typename...>
setup(TensorShape input_shape,PoolingType pool_type,Size2D pool_size,Padding2D pad,Size2D stride,bool exclude_padding,DataType data_type,bool mixed_precision)168     void setup(TensorShape input_shape, PoolingType pool_type, Size2D pool_size, Padding2D pad, Size2D stride, bool exclude_padding, DataType data_type, bool mixed_precision)
169     {
170         DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape,
171                                                                                                          Pool2dAttributes().pool_type(pool_type).pool_size(pool_size).pad(pad).stride(stride).exclude_padding(exclude_padding),
172                                                                                                          data_type, mixed_precision);
173     }
174 };
175 
176 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
177 class DynamicFusionGpuPool2dSpecialValidationFixture : public DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
178 {
179 public:
180     template <typename...>
setup(TensorShape input_shape,Pool2dAttributes pool_attr,DataType data_type)181     void setup(TensorShape input_shape, Pool2dAttributes pool_attr, DataType data_type)
182     {
183         DynamicFusionGpuPool2dValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, pool_attr, data_type, false);
184     }
185 };
186 
187 } // namespace validation
188 } // namespace test
189 } // namespace arm_compute
190 
191 #endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_GPU_CL_POOL2DFIXTURE */
192