1 /*
2 * Copyright (c) 2018-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/CL/functions/CLWinogradConvolutionLayer.h"
25
26 #include "arm_compute/core/CL/CLKernelLibrary.h"
27 #include "arm_compute/core/CL/ICLTensor.h"
28 #include "arm_compute/core/KernelDescriptors.h"
29 #include "src/core/CL/ICLKernel.h"
30 #include "src/core/helpers/MemoryHelpers.h"
31 #include "src/gpu/cl/operators/ClWinogradConv2d.h"
32 #include "support/Cast.h"
33
34 namespace arm_compute
35 {
36 struct CLWinogradConvolutionLayer::Impl
37 {
38 const ICLTensor *src{ nullptr };
39 const ICLTensor *weights{ nullptr };
40 const ICLTensor *biases{ nullptr };
41 ICLTensor *dst{ nullptr };
42 std::unique_ptr<opencl::ClWinogradConv2d> op{ nullptr };
43 ITensorPack run_pack{};
44 MemoryGroup memory_group{};
45 WorkspaceData<CLTensor> workspace_tensors{};
46 bool is_prepared{ false };
47 };
48
CLWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)49 CLWinogradConvolutionLayer::CLWinogradConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)
50 : _impl(std::make_unique<Impl>())
51 {
52 _impl->memory_group = MemoryGroup(memory_manager);
53 }
54
55 CLWinogradConvolutionLayer::~CLWinogradConvolutionLayer() = default;
56
configure(ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info,const ActivationLayerInfo & act_info,bool enable_fast_math)57 void CLWinogradConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output, const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info,
58 bool enable_fast_math)
59 {
60 configure(CLKernelLibrary::get().get_compile_context(), input, weights, biases, output, conv_info, act_info, enable_fast_math);
61 }
62
configure(const CLCompileContext & compile_context,ICLTensor * input,const ICLTensor * weights,const ICLTensor * biases,ICLTensor * output,const PadStrideInfo & conv_info,const ActivationLayerInfo & act_info,bool enable_fast_math)63 void CLWinogradConvolutionLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, const ICLTensor *weights, const ICLTensor *biases, ICLTensor *output,
64 const PadStrideInfo &conv_info,
65 const ActivationLayerInfo &act_info, bool enable_fast_math)
66 {
67 _impl->src = input;
68 _impl->weights = weights;
69 _impl->biases = biases;
70 _impl->dst = output;
71
72 _impl->op = std::make_unique<opencl::ClWinogradConv2d>();
73 _impl->op->configure(compile_context, input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv_info, act_info, enable_fast_math);
74
75 _impl->run_pack =
76 {
77 { TensorType::ACL_SRC_0, _impl->src },
78 { TensorType::ACL_SRC_1, _impl->weights },
79 { TensorType::ACL_SRC_2, _impl->biases },
80 { TensorType::ACL_DST, _impl->dst }
81 };
82 _impl->workspace_tensors = manage_workspace<CLTensor>(_impl->op->workspace(), _impl->memory_group, _impl->run_pack, _impl->run_pack);
83 }
84
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const PadStrideInfo & conv_info,const ActivationLayerInfo & act_info,bool enable_fast_math)85 Status CLWinogradConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
86 const ActivationLayerInfo &act_info, bool enable_fast_math)
87 {
88 return opencl::ClWinogradConv2d::validate(input, weights, biases, output, conv_info, act_info, enable_fast_math);
89 }
90
run()91 void CLWinogradConvolutionLayer::run()
92 {
93 MemoryGroupResourceScope scope_mg(_impl->memory_group);
94 prepare();
95 _impl->op->run(_impl->run_pack);
96 }
97
prepare()98 void CLWinogradConvolutionLayer::prepare()
99 {
100 if(!_impl->is_prepared)
101 {
102 _impl->op->prepare(_impl->run_pack);
103
104 // Release Preparation tensors
105 release_prepare_tensors(_impl->workspace_tensors, _impl->run_pack);
106 _impl->run_pack.remove_tensor(TensorType::ACL_SRC_1);
107 _impl->is_prepared = true;
108 }
109 }
110 } // namespace arm_compute