• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClDepthwiseConvolutionWorkload.hpp"
7 
8 #include <ResolveType.hpp>
9 #include "ClWorkloadUtils.hpp"
10 
11 #include <armnn/Exceptions.hpp>
12 #include <aclCommon/ArmComputeUtils.hpp>
13 #include <aclCommon/ArmComputeTensorUtils.hpp>
14 #include <cl/ClTensorHandle.hpp>
15 #include <backendsCommon/CpuTensorHandle.hpp>
16 #include <backendsCommon/WorkloadUtils.hpp>
17 #include <backendsCommon/WorkloadData.hpp>
18 
19 #include <arm_compute/runtime/CL/functions/CLDepthwiseConvolutionLayer.h>
20 
21 namespace armnn
22 {
23 
24 using namespace armcomputetensorutils;
25 
ClDepthwiseConvolutionWorkloadValidate(const TensorInfo & input,const TensorInfo & output,const DepthwiseConvolution2dDescriptor & descriptor,const TensorInfo & weights,const Optional<TensorInfo> & biases,const ActivationDescriptor * activationDescriptor)26 arm_compute::Status ClDepthwiseConvolutionWorkloadValidate(const TensorInfo& input,
27                                                            const TensorInfo& output,
28                                                            const DepthwiseConvolution2dDescriptor& descriptor,
29                                                            const TensorInfo& weights,
30                                                            const Optional<TensorInfo>& biases,
31                                                            const ActivationDescriptor* activationDescriptor)
32 {
33     const arm_compute::TensorInfo aclInputInfo  = BuildArmComputeTensorInfo(input,  descriptor.m_DataLayout);
34     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
35 
36     // ArmNN's weight format is [ M, I, H, W ]
37     const unsigned int aclDepthMultiplier = weights.GetShape()[0];
38 
39     // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
40     // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
41     TensorInfo weightsPermuted = ConvertWeightTensorInfoFromArmnnToAcl(weights, descriptor.m_DataLayout);
42 
43     // Convert the weights into the compute library format
44     const arm_compute::TensorInfo aclWeightsInfo = BuildArmComputeTensorInfo(weightsPermuted, descriptor.m_DataLayout);
45 
46     arm_compute::TensorInfo aclBiasesInfo;
47     arm_compute::TensorInfo *optionalAclBiasesInfo = nullptr;
48 
49     if (descriptor.m_BiasEnabled)
50     {
51         ARMNN_ASSERT(biases.has_value());
52 
53         aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
54         optionalAclBiasesInfo = &aclBiasesInfo;
55     }
56 
57     const arm_compute::PadStrideInfo aclPadStrideInfo = BuildArmComputePadStrideInfo(descriptor);
58     const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
59             descriptor.m_DilationX,
60             descriptor.m_DilationY);
61 
62     const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
63             activationDescriptor);
64 
65     return arm_compute::CLDepthwiseConvolutionLayer::validate(&aclInputInfo,
66                                                               &aclWeightsInfo,
67                                                               optionalAclBiasesInfo,
68                                                               &aclOutputInfo,
69                                                               aclPadStrideInfo,
70                                                               aclDepthMultiplier,
71                                                               activationInfo,
72                                                               aclDilationInfo);
73 
74 }
75 
ClDepthwiseConvolutionWorkload(const DepthwiseConvolution2dQueueDescriptor & descriptor,const WorkloadInfo & info)76 ClDepthwiseConvolutionWorkload::ClDepthwiseConvolutionWorkload(
77     const DepthwiseConvolution2dQueueDescriptor& descriptor,
78     const WorkloadInfo& info)
79     : BaseWorkload<DepthwiseConvolution2dQueueDescriptor>(descriptor, info)
80 {
81     // Allocate a buffer for the swizzling of the weight tensor
82     std::unique_ptr<unsigned char[]> permuteBuffer(new unsigned char[m_Data.m_Weight->GetTensorInfo().GetNumBytes()]);
83 
84     // Convert the weight format from ArmNN's [ M, I, H, W ] (does NOT depend on the data layout) to either
85     // [ 1, H, W, I * M ] (if NHWC) or [ 1, I * M, H, W ] (if NCHW), as required by the compute library
86     ConstTensor weightPermuted = ConvertWeightTensorFromArmnnToAcl(m_Data.m_Weight,
87                                                                    m_Data.m_Parameters.m_DataLayout,
88                                                                    permuteBuffer.get());
89 
90     // Convert the weights into the compute library format
91     m_KernelTensor = std::make_unique<arm_compute::CLTensor>();
92     BuildArmComputeTensor(*m_KernelTensor, weightPermuted.GetInfo(), m_Data.m_Parameters.m_DataLayout);
93 
94     if (m_Data.m_Parameters.m_BiasEnabled)
95     {
96         m_BiasTensor = std::make_unique<arm_compute::CLTensor>();
97         BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLayout);
98     }
99 
100     const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(
101                 m_Data.m_Parameters.m_DilationX,
102                 m_Data.m_Parameters.m_DilationY);
103 
104 
105     std::string name = std::string("ClDepthwiseConvolutionWorkload");
106     m_Data.ValidateInputsOutputs(name, 1, 1);
107 
108     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
109     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
110 
111     arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
112     input.info()->set_data_layout(aclDataLayout);
113     output.info()->set_data_layout(aclDataLayout);
114 
115     // ArmNN's weight format is [ M, I, H, W ]
116     auto& weightInfo = m_Data.m_Weight->GetTensorInfo();
117 
118     // Get the depth multiplier
119     const unsigned int depthMultiplier = weightInfo.GetShape()[0];
120 
121     arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
122 
123     const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
124 
125     m_DepthwiseConvolutionLayer = std::make_unique<arm_compute::CLDepthwiseConvolutionLayer>();
126     static_cast<arm_compute::CLDepthwiseConvolutionLayer*>(m_DepthwiseConvolutionLayer.get())->configure(
127         &input,
128         m_KernelTensor.get(),
129         m_BiasTensor.get(),
130         &output,
131         padStrideInfo,
132         depthMultiplier,
133         activationInfo,
134         aclDilationInfo);
135 
136     ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
137 
138     ScopedCpuTensorHandle weightsPermutedHandle(weightPermuted);
139     InitializeArmComputeClTensorData(*m_KernelTensor, &weightsPermutedHandle);
140 
141     if (m_BiasTensor)
142     {
143         InitializeArmComputeClTensorData(*m_BiasTensor, m_Data.m_Bias);
144     }
145 
146     m_DepthwiseConvolutionLayer->prepare();
147     FreeUnusedTensors();
148 }
149 
FreeUnusedTensors()150 void ClDepthwiseConvolutionWorkload::FreeUnusedTensors()
151 {
152     FreeTensorIfUnused(m_KernelTensor);
153     FreeTensorIfUnused(m_BiasTensor);
154 }
155 
Execute() const156 void ClDepthwiseConvolutionWorkload::Execute() const
157 {
158     ARMNN_SCOPED_PROFILING_EVENT_CL("ClDepthwiseConvolutionWorkload_Execute");
159     ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
160 
161     RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
162 }
163 
164 } // namespace armnn
165