1 //
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "Convolution2dLayer.hpp"
7 #include "LayerCloneBase.hpp"
8
9 #include <armnn/TypesUtils.hpp>
10
11 #include <armnnUtils/DataLayoutIndexed.hpp>
12
13 #include <armnn/backends/TensorHandle.hpp>
14 #include <armnn/backends/WorkloadFactory.hpp>
15
16 #include <string>
17
18 using namespace armnnUtils;
19
20 namespace armnn
21 {
22
Convolution2dLayer(const Convolution2dDescriptor & param,const char * name)23 Convolution2dLayer::Convolution2dLayer(const Convolution2dDescriptor& param, const char* name)
24 : LayerWithParameters(param.GetNumInputs(), 1, LayerType::Convolution2d, param, name)
25 {
26
27 }
28
SerializeLayerParameters(ParameterStringifyFunction & fn) const29 void Convolution2dLayer::SerializeLayerParameters(ParameterStringifyFunction& fn) const
30 {
31 //using DescriptorType = Parameters;
32 const std::vector<TensorShape>& inputShapes =
33 {
34 GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
35 GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape()
36 };
37 const TensorShape filterShape = inputShapes[1];
38 DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
39 unsigned int filterWidth = filterShape[dataLayoutIndex.GetWidthIndex()];
40 unsigned int filterHeight = filterShape[dataLayoutIndex.GetHeightIndex()];
41 unsigned int outChannels = filterShape[0];
42
43 fn("OutputChannels",std::to_string(outChannels));
44 fn("FilterWidth",std::to_string(filterWidth));
45 fn("FilterHeight",std::to_string(filterHeight));
46 LayerWithParameters<Convolution2dDescriptor>::SerializeLayerParameters(fn);
47 }
48
CreateWorkload(const IWorkloadFactory & factory) const49 std::unique_ptr<IWorkload> Convolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
50 {
51 ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Convolution2dLayer_CreateWorkload");
52 Convolution2dQueueDescriptor descriptor;
53 SetAdditionalInfo(descriptor);
54
55 return factory.CreateWorkload(LayerType::Convolution2d, descriptor, PrepInfoAndDesc(descriptor));
56 }
57
Clone(Graph & graph) const58 Convolution2dLayer* Convolution2dLayer::Clone(Graph& graph) const
59 {
60 auto layer = CloneBase<Convolution2dLayer>(graph, m_Param, GetName());
61 return std::move(layer);
62 }
63
InferOutputShapes(const std::vector<TensorShape> & inputShapes) const64 std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
65 {
66 ARMNN_ASSERT(inputShapes.size() == 2);
67 const TensorShape& inputShape = inputShapes[0];
68 const TensorShape filterShape = inputShapes[1];
69
70 // If we support multiple batch dimensions in the future, then this assert will need to change.
71 ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
72
73 ARMNN_ASSERT( m_Param.m_StrideX > 0);
74 ARMNN_ASSERT( m_Param.m_StrideY > 0);
75
76 DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
77
78 unsigned int inWidth = inputShape[dataLayoutIndex.GetWidthIndex()];
79 unsigned int inHeight = inputShape[dataLayoutIndex.GetHeightIndex()];
80 unsigned int inBatchSize = inputShape[0];
81
82 unsigned int filterWidth = filterShape[dataLayoutIndex.GetWidthIndex()];
83 unsigned int dilatedFilterWidth = filterWidth + (m_Param.m_DilationX - 1) * (filterWidth - 1);
84 unsigned int readWidth = (inWidth + m_Param.m_PadLeft + m_Param.m_PadRight) - dilatedFilterWidth;
85 unsigned int outWidth = 1 + (readWidth / m_Param.m_StrideX);
86
87 unsigned int filterHeight = filterShape[dataLayoutIndex.GetHeightIndex()];
88 unsigned int dilatedFilterHeight = filterHeight + (m_Param.m_DilationY - 1) * (filterHeight - 1);
89 unsigned int readHeight = (inHeight + m_Param.m_PadTop + m_Param.m_PadBottom) - dilatedFilterHeight;
90 unsigned int outHeight = 1 + (readHeight / m_Param.m_StrideY);
91
92 unsigned int outChannels = filterShape[0];
93 unsigned int outBatchSize = inBatchSize;
94
95 TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
96 TensorShape( { outBatchSize, outHeight, outWidth, outChannels } ) :
97 TensorShape( { outBatchSize, outChannels, outHeight, outWidth });
98
99 return std::vector<TensorShape>({ tensorShape });
100 }
101
ValidateTensorShapesFromInputs()102 void Convolution2dLayer::ValidateTensorShapesFromInputs()
103 {
104 VerifyLayerConnections(m_Param.GetNumInputs(), CHECK_LOCATION());
105
106 const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
107
108 VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
109
110 ARMNN_ASSERT_MSG(GetInputSlot(1).GetConnection(),
111 "Convolution2dLayer: Weights should be connected to input slot 1.");
112
113 std::vector<TensorShape> inferredShapes = InferOutputShapes({
114 GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
115 GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
116
117 ARMNN_ASSERT(inferredShapes.size() == 1);
118
119 ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution2dLayer");
120 }
121
GetConstantTensorsByRef() const122 Layer::ImmutableConstantTensors Convolution2dLayer::GetConstantTensorsByRef() const
123 {
124 Layer::ImmutableConstantTensors tensors = GetConnectedConstantAsInputTensors();
125 return tensors;
126 }
127
ExecuteStrategy(IStrategy & strategy) const128 void Convolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
129 {
130 strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
131 }
132
133 } // namespace armnn
134