• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConversionUtils.hpp"
7 #include <armnnUtils/Permute.hpp>
8 
9 ///
10 /// Helper classes
11 ///
12 
13 namespace armnn_driver
14 {
15 
LayerInputHandle()16 LayerInputHandle::LayerInputHandle()
17     : m_OutputSlot(nullptr)
18     , m_Valid(false)
19 {}
20 
LayerInputHandle(bool valid,armnn::IOutputSlot * outputSlot,armnn::TensorInfo tensorInfo)21 LayerInputHandle::LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo)
22     : m_OutputSlot(outputSlot)
23     , m_Valid(valid)
24     , m_TensorInfo(tensorInfo)
25 {}
26 
IsValid() const27 bool LayerInputHandle::IsValid() const
28 {
29     return m_Valid;
30 }
31 
Connect(armnn::IInputSlot & inputSlot)32 void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
33 {
34     ARMNN_ASSERT(IsValid());
35     if (m_OutputSlot)
36     {
37         m_OutputSlot->Connect(inputSlot);
38     }
39 }
40 
Disconnect(armnn::IInputSlot & inputSlot)41 void LayerInputHandle::Disconnect(armnn::IInputSlot& inputSlot)
42 {
43     ARMNN_ASSERT(IsValid());
44     if (m_OutputSlot)
45     {
46         m_OutputSlot->Disconnect(inputSlot);
47     }
48 }
49 
GetTensorInfo() const50 const armnn::TensorInfo& LayerInputHandle::GetTensorInfo() const
51 {
52     return m_TensorInfo;
53 }
54 
ConstTensorPin(bool optional)55 ConstTensorPin::ConstTensorPin(bool optional)
56     : m_Optional(optional)
57 {}
58 
ConstTensorPin(const armnn::TensorInfo & tensorInfo,const void * valueStart,uint32_t numBytes,const armnn::PermutationVector & mappings)59 ConstTensorPin::ConstTensorPin(const armnn::TensorInfo& tensorInfo,
60                                const void* valueStart,
61                                uint32_t numBytes,
62                                const armnn::PermutationVector& mappings)
63 {
64     armnn::IgnoreUnused(numBytes);
65     assert(tensorInfo.GetNumBytes() == numBytes);
66 
67     const bool needsSwizzling = (mappings.GetSize() > 0);
68     if (needsSwizzling)
69     {
70         m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
71         SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
72 
73         m_ConstTensor = armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, mappings), m_SwizzledTensorData.data());
74     }
75     else
76     {
77         m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
78     }
79 }
80 
IsValid() const81 bool ConstTensorPin::IsValid() const
82 {
83     return m_ConstTensor.GetMemoryArea() != nullptr;
84 }
85 
IsOptional() const86 bool ConstTensorPin::IsOptional() const
87 {
88     return m_Optional;
89 }
90 
GetConstTensor() const91 const armnn::ConstTensor& ConstTensorPin::GetConstTensor() const
92 {
93     return m_ConstTensor;
94 }
95 
GetConstTensorPtr() const96 const armnn::ConstTensor* ConstTensorPin::GetConstTensorPtr() const
97 {
98     if (IsValid() && m_ConstTensor.GetNumElements() > 0)
99     {
100         return &m_ConstTensor;
101     }
102     // tensor is either invalid, or has no elements (indicating an optional tensor that was not provided)
103     return nullptr;
104 }
105 
106 ///
107 /// Utility functions
108 ///
109 
ProcessActivation(const armnn::TensorInfo & tensorInfo,ActivationFn activation,armnn::IConnectableLayer * prevLayer,ConversionData & data)110 armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
111                                             ActivationFn activation,
112                                             armnn::IConnectableLayer* prevLayer,
113                                             ConversionData& data)
114 {
115     ARMNN_ASSERT(prevLayer->GetNumOutputSlots() == 1);
116 
117     prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
118 
119     armnn::IConnectableLayer* activationLayer = prevLayer;
120 
121     if (activation != ActivationFn::kActivationNone)
122     {
123         armnn::ActivationDescriptor activationDesc;
124         switch (activation)
125         {
126             case ActivationFn::kActivationRelu:
127             {
128                 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
129                 break;
130             }
131             case ActivationFn::kActivationRelu1:
132             {
133                 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
134                 activationDesc.m_A = 1.0f;
135                 activationDesc.m_B = -1.0f;
136                 break;
137             }
138             case ActivationFn::kActivationRelu6:
139             {
140                 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
141                 activationDesc.m_A = 6.0f;
142                 break;
143             }
144             case ActivationFn::kActivationSigmoid:
145             {
146                 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
147                 break;
148             }
149             case ActivationFn::kActivationTanh:
150             {
151                 activationDesc.m_Function = armnn::ActivationFunction::TanH;
152                 activationDesc.m_A = 1.0f;
153                 activationDesc.m_B = 1.0f;
154                 break;
155             }
156             default:
157             {
158                 Fail("%s: Invalid activation enum value %i", __func__, activation);
159                 return nullptr;
160             }
161         }
162 
163         bool isSupported = false;
164         FORWARD_LAYER_SUPPORT_FUNC(__func__,
165                                    IsActivationSupported,
166                                    data.m_Backends,
167                                    isSupported,
168                                    prevLayer->GetOutputSlot(0).GetTensorInfo(),
169                                    tensorInfo,
170                                    activationDesc);
171         if (!isSupported)
172         {
173             return nullptr;
174         }
175 
176         activationLayer = data.m_Network->AddActivationLayer(activationDesc);
177 
178         prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
179         activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
180     }
181 
182     return activationLayer;
183 }
184 
185 } // namespace armnn_driver
186