• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE
25 #define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE
26 
27 #include "arm_compute/core/CL/CLKernelLibrary.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/core/Types.h"
30 #include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
31 #include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
32 #include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
33 #include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
34 
35 #include "tests/framework/Fixture.h"
36 #include "tests/validation/reference/DepthConvertLayer.h"
37 
38 using namespace arm_compute::experimental::dynamic_fusion;
39 
40 namespace arm_compute
41 {
42 namespace test
43 {
44 namespace validation
45 {
46 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
47 class DynamicFusionCastValidationFixture : public framework::Fixture
48 {
49 public:
50     template <typename...>
setup(TensorShape shape,DataType dt_in,DataType dt_out,ConvertPolicy policy)51     void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy)
52     {
53         _target    = compute_target(shape, dt_in, dt_out, policy);
54         _reference = compute_reference(shape, dt_in, dt_out, policy);
55     }
56 
57 protected:
58     template <typename U>
fill(U && tensor,int i,DataType dt_in,DataType dt_out)59     void fill(U &&tensor, int i, DataType dt_in, DataType dt_out)
60     {
61         // Restricting range to avoid inf values
62         if(dt_out == DataType::F16)
63         {
64             constexpr int signed_min   = -32000;
65             constexpr int signed_max   = 32000;
66             constexpr int unsigned_min = 0;
67             constexpr int unsigned_max = 65000;
68 
69             switch(dt_in)
70             {
71                 case DataType::U8:
72                 case DataType::QASYMM8:
73                 case DataType::QASYMM8_SIGNED:
74                 case DataType::S8:
75                 case DataType::F32:
76                 {
77                     library->fill_tensor_uniform(tensor, i);
78                     break;
79                 }
80                 case DataType::U16:
81                 {
82                     library->fill_tensor_uniform(tensor, i, static_cast<uint16_t>(unsigned_min), static_cast<uint16_t>(unsigned_max));
83                     break;
84                 }
85                 case DataType::S16:
86                 {
87                     library->fill_tensor_uniform(tensor, i, static_cast<int16_t>(signed_min), static_cast<int16_t>(signed_max));
88                     break;
89                 }
90                 case DataType::U32:
91                 {
92                     library->fill_tensor_uniform(tensor, i, static_cast<uint32_t>(unsigned_min), static_cast<uint32_t>(unsigned_max));
93                     break;
94                 }
95                 case DataType::S32:
96                 {
97                     library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(signed_min), static_cast<int32_t>(signed_max));
98                     break;
99                 }
100                 default:
101                     ARM_COMPUTE_ERROR("NOT SUPPORTED!");
102             }
103         }
104         else
105         {
106             library->fill_tensor_uniform(tensor, i);
107         }
108     }
109 
110     // Given input is in nchw format
compute_target(const TensorShape & shape,const DataType dt_in,const DataType dt_out,const ConvertPolicy policy)111     TensorType compute_target(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
112     {
113         // Create a new workload sketch
114         auto              cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
115         auto              gpu_ctx        = GpuWorkloadContext{ &cl_compile_ctx };
116         GpuWorkloadSketch sketch{ &gpu_ctx };
117 
118         // Create sketch tensors
119         TensorInfo src_info = sketch.create_tensor_info(TensorInfo(shape, 1, dt_in, DataLayout::NCHW)); // layout is not important
120         TensorInfo dst_info = sketch.create_tensor_info();
121 
122         CastAttributes attributes;
123         attributes.convert_policy(policy).data_type(dt_out);
124 
125         ITensorInfo *ans_info = FunctionType::create_op(sketch, &src_info, attributes);
126         GpuOutput::create_op(sketch, ans_info, &dst_info);
127 
128         // Configure runtime
129         ClWorkloadRuntime runtime;
130         runtime.configure(sketch);
131 
132         // (Important) Allocate auxiliary tensor memory if there are any
133         for(auto &data : runtime.get_auxiliary_tensors())
134         {
135             CLTensor     *tensor      = std::get<0>(data);
136             TensorInfo    info        = std::get<1>(data);
137             AuxMemoryInfo aux_mem_req = std::get<2>(data);
138             tensor->allocator()->init(info, aux_mem_req.alignment);
139             tensor->allocator()->allocate(); // Use ACL allocated memory
140         }
141 
142         // Construct user tensors
143         TensorType t_src{};
144         TensorType t_dst{};
145 
146         // Initialize user tensors
147         t_src.allocator()->init(src_info);
148         t_dst.allocator()->init(dst_info);
149 
150         // Allocate and fill user tensors
151         t_src.allocator()->allocate();
152         t_dst.allocator()->allocate();
153 
154         fill(AccessorType(t_src), 0, dt_in, dt_out);
155 
156         // Run runtime
157         runtime.run({ &t_src, &t_dst });
158         return t_dst;
159     }
160 
compute_reference(const TensorShape & shape,const DataType dt_in,const DataType dt_out,const ConvertPolicy policy)161     SimpleTensor<T2> compute_reference(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
162     {
163         // Create reference
164         SimpleTensor<T1> src{ shape, dt_in, 1 };
165 
166         // Fill reference
167         fill(src, 0, dt_in, dt_out);
168 
169         return reference::depth_convert<T1, T2>(src, dt_out, policy, 0);
170     }
171 
172     TensorType       _target{};
173     SimpleTensor<T2> _reference{};
174 };
175 } // namespace validation
176 } // namespace test
177 } // namespace arm_compute
178 #endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE */
179