• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2019-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "src/cpu/operators/CpuDepthwiseConv2dAssemblyDispatch.h"
26 
27 #include "arm_compute/core/ITensorInfo.h"
28 #include "arm_compute/runtime/NEON/NEScheduler.h"
29 #include "src/common/utils/Log.h"
30 #include "src/core/CPP/Validate.h"
31 #include "src/core/helpers/AutoConfiguration.h"
32 #include "src/core/utils/AssemblyUtils.h"
33 #include "src/cpu/kernels/internal/CpuDepthwiseConv2dAssemblyWrapperKernel.h"
34 
35 namespace arm_compute
36 {
37 namespace cpu
38 {
39 struct CpuDepthwiseConv2dAssemblyDispatch::LocalImpl
40 {
41     std::unique_ptr<kernels::CpuDepthwiseConv2dAssemblyWrapperKernel> asm_kernel{ nullptr };
42     bool                                                              is_prepared{ false };
43     bool                                                              are_weights_const{ true };
44     experimental::MemoryRequirements                                  mem_req{};
45 };
46 
47 #ifndef DOXYGEN_SKIP_THIS
CpuDepthwiseConv2dAssemblyDispatch()48 CpuDepthwiseConv2dAssemblyDispatch::CpuDepthwiseConv2dAssemblyDispatch()
49     : _pImpl(std::make_unique<LocalImpl>())
50 {
51 }
52 #endif /* DOXYGEN_SKIP_THIS */
53 
54 CpuDepthwiseConv2dAssemblyDispatch::~CpuDepthwiseConv2dAssemblyDispatch() = default;
55 
configure(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * bias,ITensorInfo * dst,const ConvolutionInfo & info)56 void CpuDepthwiseConv2dAssemblyDispatch::configure(const ITensorInfo     *src,
57                                                    const ITensorInfo     *weights,
58                                                    const ITensorInfo     *bias,
59                                                    ITensorInfo           *dst,
60                                                    const ConvolutionInfo &info)
61 {
62     ARM_COMPUTE_LOG_PARAMS(src, weights, bias, dst, info);
63     const CPUInfo     &ci          = NEScheduler::get().cpu_info();
64     const unsigned int num_threads = NEScheduler::get().num_threads();
65     _pImpl->is_prepared            = false;
66     _pImpl->are_weights_const      = weights->are_values_constant();
67 
68     // If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
69     if(!CpuDepthwiseConv2dAssemblyDispatch::validate(src, weights, bias, dst, info))
70     {
71         return;
72     }
73 
74     auto dwc_wrapper = std::make_unique<kernels::CpuDepthwiseConv2dAssemblyWrapperKernel>();
75     ARM_COMPUTE_ERROR_ON(dwc_wrapper == nullptr);
76     dwc_wrapper->configure(src, weights, bias, dst, info, ci);
77 
78     // Compute memory requirements for assembly kernels
79     constexpr size_t alignment = 4096;
80     _pImpl->mem_req.push_back({ TensorType::ACL_INT_0, dwc_wrapper->get_working_size(num_threads, src->dimension(0)), alignment });
81     _pImpl->mem_req.push_back({ TensorType::ACL_INT_1, dwc_wrapper->get_storage_size(), alignment });
82     _pImpl->asm_kernel = std::move(dwc_wrapper);
83 }
84 
validate(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * bias,const ITensorInfo * dst,const ConvolutionInfo & info)85 Status CpuDepthwiseConv2dAssemblyDispatch::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *dst, const ConvolutionInfo &info)
86 {
87     return kernels::CpuDepthwiseConv2dAssemblyWrapperKernel::validate(src, weights, bias, dst, info);
88 }
89 
workspace() const90 experimental::MemoryRequirements CpuDepthwiseConv2dAssemblyDispatch::workspace() const
91 {
92     return _pImpl->mem_req;
93 }
94 
is_activation_supported(const ActivationLayerInfo & activation)95 bool CpuDepthwiseConv2dAssemblyDispatch::is_activation_supported(const ActivationLayerInfo &activation)
96 {
97     arm_gemm::Activation act = assembly_utils::map_to_arm_gemm_activation(activation);
98     return act.type != arm_gemm::Activation::Type::None;
99 }
100 
run(ITensorPack & tensors)101 void CpuDepthwiseConv2dAssemblyDispatch::run(ITensorPack &tensors)
102 {
103     ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No inputs provided");
104 
105     prepare(tensors);
106 
107     NEScheduler::get().schedule_op(_pImpl->asm_kernel.get(), Window::DimY, _pImpl->asm_kernel->window(), tensors);
108 }
109 
prepare(ITensorPack & tensors)110 void CpuDepthwiseConv2dAssemblyDispatch::prepare(ITensorPack &tensors)
111 {
112     const ITensor *weights = tensors.get_const_tensor(TensorType::ACL_SRC_1);
113 
114     if((!_pImpl->are_weights_const && weights != nullptr) || !_pImpl->is_prepared)
115     {
116         // Pack weights and bias
117         const ITensor *bias    = tensors.get_const_tensor(TensorType::ACL_SRC_2);
118         ITensor       *storage = tensors.get_tensor(TensorType::ACL_INT_1);
119 
120         const auto weights_ptr    = weights->buffer() + weights->info()->offset_first_element_in_bytes();
121         const auto bias_ptr       = (bias) ? bias->buffer() + bias->info()->offset_first_element_in_bytes() : nullptr;
122         auto       parameters_ptr = storage->buffer() + storage->info()->offset_first_element_in_bytes();
123 
124         const auto weights_shape   = weights->info()->tensor_shape();
125         const auto weights_padding = weights->info()->padding();
126 
127         const size_t ld_weights_col = weights_shape[0] + weights_padding.left + weights_padding.right;
128         const size_t ld_weights_row = ld_weights_col * (weights_shape[1] + weights_padding.top + weights_padding.bottom);
129         _pImpl->asm_kernel->pack_parameters(parameters_ptr, bias_ptr, weights_ptr, ld_weights_col, ld_weights_row);
130 
131         weights->mark_as_unused();
132         if(bias != nullptr)
133         {
134             bias->mark_as_unused();
135         }
136         _pImpl->is_prepared = true;
137     }
138 }
139 } // namespace cpu
140 } // namespace arm_compute
141