1 /*
2 * Copyright (c) 2020-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/NEON/functions/NEGEMMConv2d.h"
25
26 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
27 #include "arm_compute/runtime/Tensor.h"
28 #include "src/core/helpers/MemoryHelpers.h"
29 #include "src/cpu/operators/CpuGemmDirectConv2d.h"
30
31 namespace arm_compute
32 {
33 using OperatorType = cpu::CpuGemmDirectConv2d;
34 using namespace arm_compute::experimental;
35
36 struct NEGEMMConv2d::Impl
37 {
38 const ITensor *weights{ nullptr };
39 std::unique_ptr<OperatorType> op{ nullptr };
40 ITensorPack run_pack{};
41 ITensorPack prep_pack{};
42 WorkspaceData<Tensor> workspace{};
43 MemoryGroup memory_group{};
44 bool is_prepared{ false };
45 experimental::MemoryRequirements aux_mem_req{};
46 };
47
NEGEMMConv2d(const std::shared_ptr<IMemoryManager> & memory_manager)48 NEGEMMConv2d::NEGEMMConv2d(const std::shared_ptr<IMemoryManager> &memory_manager)
49 : _impl(std::make_unique<Impl>())
50 {
51 _impl->memory_group = MemoryGroup(memory_manager);
52 }
53
54 NEGEMMConv2d::~NEGEMMConv2d() = default;
55
configure(ITensor * input,const ITensor * weights,const ITensor * biases,ITensor * output,const Conv2dInfo & info)56 void NEGEMMConv2d::configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const Conv2dInfo &info)
57 {
58 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
59
60 _impl->weights = weights;
61 _impl->is_prepared = false;
62 _impl->op = std::make_unique<OperatorType>();
63
64 _impl->op->configure(input->info(), weights->info(), biases != nullptr ? biases->info() : nullptr, output->info(), info);
65
66 _impl->aux_mem_req = _impl->op->workspace();
67 _impl->run_pack = { { TensorType::ACL_SRC_0, input }, { TensorType::ACL_SRC_2, biases }, { TensorType::ACL_DST, output } };
68 _impl->prep_pack = { { TensorType::ACL_SRC_1, weights }, { TensorType::ACL_SRC_2, biases } };
69 _impl->workspace = manage_workspace<Tensor>(_impl->op->workspace(), _impl->memory_group, _impl->run_pack, _impl->prep_pack);
70 }
71
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const Conv2dInfo & info)72 Status NEGEMMConv2d::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const Conv2dInfo &info)
73 {
74 return OperatorType::validate(input, weights, biases, output, info);
75 }
76
run()77 void NEGEMMConv2d::run()
78 {
79 prepare();
80
81 MemoryGroupResourceScope scope_mg(_impl->memory_group);
82 _impl->op->run(_impl->run_pack);
83 }
84
prepare()85 void NEGEMMConv2d::prepare()
86 {
87 if(!_impl->is_prepared)
88 {
89 _impl->op->prepare(_impl->prep_pack);
90
91 auto has_reshape = std::find_if(_impl->aux_mem_req.begin(),
92 _impl->aux_mem_req.end(),
93 [](const MemoryInfo & m) -> bool { return m.lifetime == MemoryLifetime::Persistent; });
94
95 if(has_reshape != std::end(_impl->aux_mem_req))
96 {
97 _impl->weights->mark_as_unused();
98 }
99 else
100 {
101 _impl->run_pack.add_const_tensor(ACL_SRC_1, _impl->weights);
102 }
103
104 // Release temporary tensors that are only used in prepare stage
105 release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace);
106 _impl->is_prepared = true;
107 }
108 }
109 } // namespace arm_compute
110