• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "TransposeConvolution2dLayer.hpp"
7 #include "LayerCloneBase.hpp"
8 
9 #include <armnnUtils/DataLayoutIndexed.hpp>
10 
11 #include <armnn/backends/TensorHandle.hpp>
12 #include <armnn/backends/WorkloadFactory.hpp>
13 
14 using namespace armnnUtils;
15 
16 namespace armnn
17 {
18 
TransposeConvolution2dLayer(const TransposeConvolution2dDescriptor & param,const char * name)19 TransposeConvolution2dLayer::TransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& param,
20                                                          const char* name)
21     : LayerWithParameters(1, 1, LayerType::TransposeConvolution2d, param, name)
22 {
23 }
24 
CreateWorkload(const IWorkloadFactory & factory) const25 std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
26 {
27     ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
28 
29     TransposeConvolution2dQueueDescriptor descriptor;
30     descriptor.m_Weight = m_Weight.get();
31 
32     if (m_Param.m_BiasEnabled)
33     {
34         ARMNN_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
35         descriptor.m_Bias = m_Bias.get();
36     }
37 
38     SetAdditionalInfo(descriptor);
39 
40     return factory.CreateWorkload(LayerType::TransposeConvolution2d, descriptor, PrepInfoAndDesc(descriptor));
41 }
42 
Clone(Graph & graph) const43 TransposeConvolution2dLayer* TransposeConvolution2dLayer::Clone(Graph& graph) const
44 {
45     auto layer = CloneBase<TransposeConvolution2dLayer>(graph, m_Param, GetName());
46 
47     layer->m_Weight = m_Weight ? m_Weight : nullptr;
48 
49     if (layer->m_Param.m_BiasEnabled)
50     {
51         layer->m_Bias = m_Bias ? m_Bias : nullptr;
52     }
53 
54     return std::move(layer);
55 }
56 
InferOutputShapes(const std::vector<TensorShape> & inputShapes) const57 std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
58     const std::vector<TensorShape>& inputShapes) const
59 {
60     ARMNN_ASSERT(inputShapes.size() == 2);
61     const TensorShape& inputShape  = inputShapes[0];
62     const TensorShape& kernelShape = inputShapes[1];
63 
64     ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
65 
66     DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
67 
68     const unsigned int batches = inputShape[0];
69 
70     const unsigned int wInput = inputShape[dataLayoutIndex.GetWidthIndex()];
71     const unsigned int hInput = inputShape[dataLayoutIndex.GetHeightIndex()];
72 
73     const unsigned int wKernel = kernelShape[dataLayoutIndex.GetWidthIndex()];
74     const unsigned int hKernel = kernelShape[dataLayoutIndex.GetHeightIndex()];
75 
76     unsigned int wPadding = m_Param.m_PadLeft + m_Param.m_PadRight;
77     unsigned int hPadding = m_Param.m_PadTop + m_Param.m_PadBottom;
78 
79     unsigned int wOutput = (wInput - 1) * m_Param.m_StrideX + wKernel - wPadding;
80     unsigned int hOutput = (hInput - 1) * m_Param.m_StrideY + hKernel - hPadding;
81     unsigned int cOutput = kernelShape[0];
82 
83     TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
84          TensorShape( { batches, hOutput, wOutput, cOutput } ) :
85          TensorShape( { batches, cOutput, hOutput, wOutput });
86 
87     return std::vector<TensorShape>({ tensorShape });
88 }
89 
ValidateTensorShapesFromInputs()90 void TransposeConvolution2dLayer::ValidateTensorShapesFromInputs()
91 {
92     VerifyLayerConnections(1, CHECK_LOCATION());
93 
94     const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
95 
96     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
97 
98     ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
99 
100     std::vector<TensorShape> expectedOutputShape;
101     // If output_shape was specified then use it rather than calculate an inferred output shape.
102     if (m_Param.m_OutputShapeEnabled)
103     {
104         TensorShape shapeAsTensorShape(static_cast<unsigned int>(m_Param.m_OutputShape.size()),
105             m_Param.m_OutputShape.data());
106         expectedOutputShape.push_back(shapeAsTensorShape);
107     }
108     else
109     {
110         expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
111                                                  m_Weight->GetTensorInfo().GetShape() });
112     }
113 
114     ARMNN_ASSERT(expectedOutputShape.size() == 1);
115 
116     ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
117 }
118 
GetConstantTensorsByRef() const119 Layer::ImmutableConstantTensors TransposeConvolution2dLayer::GetConstantTensorsByRef() const
120 {
121     // For API stability DO NOT ALTER order and add new members to the end of vector
122     return {m_Weight, m_Bias};
123 }
124 
ExecuteStrategy(IStrategy & strategy) const125 void TransposeConvolution2dLayer::ExecuteStrategy(IStrategy& strategy) const
126 {
127     ManagedConstTensorHandle managedWeight(m_Weight);
128     std::vector<armnn::ConstTensor> constTensors { { managedWeight.GetTensorInfo(), managedWeight.Map() } };
129 
130     ManagedConstTensorHandle managedBias(m_Bias);
131     if (GetParameters().m_BiasEnabled)
132     {
133         constTensors.emplace_back(ConstTensor(managedBias.GetTensorInfo(), managedBias.Map()));
134     }
135 
136     strategy.ExecuteStrategy(this, GetParameters(), constTensors, GetName());
137 }
138 
139 } // namespace armnn
140