• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include "LayerFwd.hpp"
8 
9 #include <armnn/backends/ITensorHandleFactory.hpp>
10 #include <OutputHandler.hpp>
11 #include <backendsCommon/TensorHandleFactoryRegistry.hpp>
12 #include <backendsCommon/WorkloadDataCollector.hpp>
13 #include <backendsCommon/WorkloadInfo.hpp>
14 #include "InternalTypes.hpp"
15 #include "SerializeLayerParameters.hpp"
16 
17 #include <armnn/Types.hpp>
18 #include <armnn/Tensor.hpp>
19 #include <armnn/INetwork.hpp>
20 #include <armnn/utility/IgnoreUnused.hpp>
21 #include <armnn/utility/NumericCast.hpp>
22 #include <armnn/utility/PolymorphicDowncast.hpp>
23 
24 #include <algorithm>
25 #include <functional>
26 #include <iostream>
27 #include <list>
28 #include <memory>
29 #include <string>
30 #include <vector>
31 #include <backendsCommon/WorkloadData.hpp>
32 
33 namespace armnn
34 {
35 
36 class IWorkload;
37 class IWorkloadFactory;
38 class Layer;
39 class Graph;
40 
41 class InputSlot final : public IInputSlot
42 {
43 public:
InputSlot(Layer & owner,unsigned int slotIndex)44     explicit InputSlot(Layer& owner, unsigned int slotIndex)
45     : m_OwningLayer(owner)
46     , m_Connection(nullptr)
47     , m_SlotIndex(slotIndex)
48     {}
49 
50     ~InputSlot();
51 
GetOwningLayer() const52     Layer& GetOwningLayer() const { return m_OwningLayer; }
GetSlotIndex() const53     unsigned int GetSlotIndex() const { return m_SlotIndex; }
54 
GetConnectedOutputSlot() const55     const OutputSlot* GetConnectedOutputSlot() const { return m_Connection; }
GetConnectedOutputSlot()56     OutputSlot* GetConnectedOutputSlot() { return m_Connection; }
57 
58     /// Links the slot to an output slot or breaks an existing link if passing nullptr.
SetConnection(OutputSlot * source)59     void SetConnection(OutputSlot* source)
60     {
61         if (m_Connection != nullptr && source != nullptr)
62         {
63             throw InvalidArgumentException("Tried to connect an output slot to an input slot, "
64                 "but the latter already has a connection");
65         }
66         m_Connection = source;
67     }
68 
69     // Inserts single-output existing layer at this point in the graph.
70     void Insert(Layer& layer);
71 
72     // IInputSlot
73 
74     const IOutputSlot* GetConnection() const override;
75     IOutputSlot* GetConnection() override;
76 
77 private:
78     Layer& m_OwningLayer;
79     OutputSlot* m_Connection;
80     const unsigned int m_SlotIndex;
81 };
82 
83 class OutputSlot final : public IOutputSlot
84 {
85 public:
OutputSlot(Layer & owner,OutputHandler & outputHandler)86     explicit OutputSlot(Layer& owner, OutputHandler& outputHandler)
87     : m_OwningLayer(owner)
88     , m_OutputHandler(outputHandler)
89     , m_TensorHandleFactoryId(ITensorHandleFactory::LegacyFactoryId)
90     {}
91 
92     OutputSlot(const OutputSlot&) = delete;
93     OutputSlot& operator=(const OutputSlot&) = delete;
94     OutputSlot& operator=(OutputSlot&&) = delete;
95 
96     OutputSlot(OutputSlot&&) = default;
97 
~OutputSlot()98     ~OutputSlot()
99     {
100         try
101         {
102             // Coverity fix: DisconnectAll() may throw uncaught exceptions.
103             DisconnectAll();
104         }
105         catch (const std::exception& e)
106         {
107             // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
108             // exception of type std::length_error.
109             // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
110             std::cerr << "WARNING: An error has occurred when disconnecting all output slots: "
111                       << e.what() << std::endl;
112         }
113     }
114 
GetOwningLayer() const115     Layer& GetOwningLayer() const { return m_OwningLayer; }
116 
117     LayerGuid GetOwningLayerGuid() const override;
118 
GetOutputHandler() const119     const OutputHandler& GetOutputHandler() const { return m_OutputHandler; }
GetOutputHandler()120     OutputHandler& GetOutputHandler() { return m_OutputHandler; }
121 
122     int Connect(InputSlot& destination);
123     void Disconnect(InputSlot& slot);
124 
GetConnections() const125     const std::vector<InputSlot*>& GetConnections() const { return m_Connections; }
GetEdgeStrategies() const126     const std::vector<EdgeStrategy>& GetEdgeStrategies() const { return m_EdgeStrategies; }
127 
128     bool ValidateTensorShape(const TensorShape& shape) const;
129 
130     // Disconnect all conections.
131     void DisconnectAll();
132 
133     /// Moves all connections to another OutputSlot.
134     void MoveAllConnections(OutputSlot& destination);
135 
136     // IOutputSlot
137 
GetNumConnections() const138     unsigned int GetNumConnections() const override { return armnn::numeric_cast<unsigned int>(m_Connections.size()); }
139     const InputSlot* GetConnection(unsigned int index) const override;
140     InputSlot* GetConnection(unsigned int index) override;
141 
142     void SetTensorInfo(const TensorInfo& tensorInfo) override;
143     const TensorInfo& GetTensorInfo() const override;
144     bool IsTensorInfoSet() const override;
145 
Connect(IInputSlot & destination)146     int Connect(IInputSlot& destination) override
147     {
148         return Connect(*PolymorphicDowncast<InputSlot*>(&destination));
149     }
150 
Disconnect(IInputSlot & slot)151     void Disconnect(IInputSlot& slot) override
152     {
153         return Disconnect(*PolymorphicDowncast<InputSlot*>(&slot));
154     }
155 
156     unsigned int CalculateIndexOnOwner() const override;
157 
158     bool operator==(const OutputSlot& other) const;
159 
160     void SetTensorHandleFactory(const ITensorHandleFactory::FactoryId& id);
161     ITensorHandleFactory::FactoryId GetTensorHandleFactoryId() const;
162 
163     void SetEdgeStrategy(unsigned int connectionIndex, EdgeStrategy strategy);
164     EdgeStrategy GetEdgeStrategyForConnection(unsigned int connectionIdx) const;
165 
166 private:
167     void ValidateConnectionIndex(unsigned int index) const;
168 
169     Layer& m_OwningLayer;
170     OutputHandler& m_OutputHandler;
171     std::vector<InputSlot*> m_Connections;
172 
173     ITensorHandleFactory::FactoryId m_TensorHandleFactoryId;
174     std::vector<EdgeStrategy> m_EdgeStrategies;
175 };
176 
177 // InputSlot inlines that need OutputSlot declaration.
178 
~InputSlot()179 inline InputSlot::~InputSlot()
180 {
181     if (m_Connection != nullptr)
182     {
183         try
184         {
185             // Coverity fix: Disconnect() may throw uncaught exceptions.
186             m_Connection->Disconnect(*this);
187         }
188         catch (const std::exception& e)
189         {
190             // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
191             // exception of type std::length_error.
192             // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
193             std::cerr << "WARNING: An error has occurred when disconnecting an input slot: "
194                       << e.what() << std::endl;
195         }
196     }
197 }
198 
GetConnection() const199 inline const IOutputSlot* InputSlot::GetConnection() const { return GetConnectedOutputSlot(); }
GetConnection()200 inline IOutputSlot* InputSlot::GetConnection() { return GetConnectedOutputSlot(); }
201 
202 
203 class ScopedCpuTensorHandle;
204 
205 // Base layer class
206 
207 using LayerPriority = unsigned int;
208 using AdditionalInfoObjectPtr = std::shared_ptr<void>;
209 
210 class Layer : public IConnectableLayer
211 {
212 public:
213     /// @param name - Optional name for the layer (may be nullptr).
214     Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, const char* name);
215     Layer(unsigned int numInputSlots, unsigned int numOutputSlots, LayerType type, DataLayout layout, const char* name);
216 
GetNameStr() const217     const std::string& GetNameStr() const
218     {
219         return m_LayerName;
220     }
221 
GetOutputHandler(unsigned int i=0) const222     const OutputHandler& GetOutputHandler(unsigned int i = 0) const
223     {
224         return m_OutputHandlers[i];
225     }
226 
GetOutputHandler(unsigned int i=0)227     OutputHandler& GetOutputHandler(unsigned int i = 0)
228     {
229         return const_cast<OutputHandler&>(const_cast<const Layer*>(this)->GetOutputHandler(i));
230     }
231 
GetShapeInferenceMethod() const232     ShapeInferenceMethod GetShapeInferenceMethod() const { return m_ShapeInferenceMethod; };
233 
GetInputSlots() const234     const std::vector<InputSlot>& GetInputSlots() const { return m_InputSlots; }
GetOutputSlots() const235     const std::vector<OutputSlot>& GetOutputSlots() const { return m_OutputSlots; }
236 
237     // Allows non-const access to input slots, but don't expose vector (vector size is fixed at layer construction).
BeginInputSlots()238     std::vector<InputSlot>::iterator BeginInputSlots() { return m_InputSlots.begin(); }
EndInputSlots()239     std::vector<InputSlot>::iterator EndInputSlots() { return m_InputSlots.end(); }
240 
241     // Allows non-const access to output slots, but don't expose vector (vector size is fixed at layer construction).
BeginOutputSlots()242     std::vector<OutputSlot>::iterator BeginOutputSlots() { return m_OutputSlots.begin(); }
EndOutputSlots()243     std::vector<OutputSlot>::iterator EndOutputSlots() { return m_OutputSlots.end(); }
244 
245     // Checks whether the outputs of this layer don't have any connection.
IsOutputUnconnected()246     bool IsOutputUnconnected()
247     {
248         unsigned int numConnections = 0;
249 
250         for (auto&& output : GetOutputSlots())
251         {
252             numConnections += output.GetNumConnections();
253         }
254 
255         return (GetNumOutputSlots() > 0) && (numConnections == 0);
256     }
257 
258     // Used for sorting.
259     void ResetPriority() const;
260     LayerPriority GetPriority() const;
261 
GetType() const262     LayerType GetType() const { return m_Type; }
263 
264     DataType GetDataType() const;
265 
GetBackendId() const266     const BackendId& GetBackendId() const { return m_BackendId; }
SetBackendId(const BackendId & id)267     void SetBackendId(const BackendId& id) { m_BackendId = id; }
268 
269     // Virtuals
270 
271     virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const = 0;
272 
273     virtual void CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
274                                      const IWorkloadFactory& factory,
275                                      const bool IsMemoryManaged = true);
276 
277     /// Creates a dynamically-allocated copy of this layer.
278     /// @param graph - The Graph into which this Layer is being cloned.
279     virtual Layer* Clone(Graph& graph) const = 0;
280 
281     void VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const;
282 
283     virtual void ValidateTensorShapesFromInputs() = 0;
284 
285     std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
286 
287     /// Helper to serialize the layer parameters to string.
288     /// (currently used in DotSerializer and company).
289     virtual void SerializeLayerParameters(ParameterStringifyFunction& fn) const;
290 
291     // Free up the constant source data
292     virtual void ReleaseConstantData();
293 
294     template<typename Op>
OperateOnConstantTensors(Op op)295     void OperateOnConstantTensors(Op op)
296     {
297         for (auto constant : GetConstantTensorsByRef())
298         {
299             if (constant.get())
300             {
301                 op(constant);
302             }
303         }
304     };
305 
306     // IConnectableLayer
307 
GetName() const308     const char* GetName() const override { return m_LayerName.c_str(); }
309 
GetNumInputSlots() const310     unsigned int GetNumInputSlots() const override { return static_cast<unsigned int>(m_InputSlots.size()); }
GetNumOutputSlots() const311     unsigned int GetNumOutputSlots() const override { return static_cast<unsigned int>(m_OutputSlots.size()); }
312 
GetInputSlot(unsigned int index) const313     const InputSlot& GetInputSlot(unsigned int index) const override { return m_InputSlots.at(index); }
GetInputSlot(unsigned int index)314     InputSlot& GetInputSlot(unsigned int index) override { return  m_InputSlots.at(index); }
GetOutputSlot(unsigned int index=0) const315     const OutputSlot& GetOutputSlot(unsigned int index = 0) const override { return m_OutputSlots.at(index); }
GetOutputSlot(unsigned int index=0)316     OutputSlot& GetOutputSlot(unsigned int index = 0) override { return m_OutputSlots.at(index); }
317 
SetGuid(LayerGuid guid)318     void SetGuid(LayerGuid guid) { m_Guid = guid; }
GetGuid() const319     LayerGuid GetGuid() const final { return m_Guid; }
320 
AddRelatedLayerName(const std::string layerName)321     void AddRelatedLayerName(const std::string layerName) { m_RelatedLayerNames.emplace_back(layerName); }
322 
GetRelatedLayerNames()323     const std::list<std::string>& GetRelatedLayerNames() { return m_RelatedLayerNames; }
324 
325     virtual void Reparent(Graph& dest, std::list<Layer*>::const_iterator iterator) = 0;
326 
BackendSelectionHint(Optional<BackendId> backend)327     void BackendSelectionHint(Optional<BackendId> backend) final
328     {
329         m_BackendHint = backend;
330     }
GetBackendHint() const331     Optional<BackendId> GetBackendHint() const { return m_BackendHint; }
332 
SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)333     void SetShapeInferenceMethod(ShapeInferenceMethod shapeInferenceMethod)
334     {
335         m_ShapeInferenceMethod = shapeInferenceMethod;
336     }
337 
338     template<typename T>
GetAdditionalInformation() const339     std::shared_ptr<T> GetAdditionalInformation() const
340     {
341         return std::static_pointer_cast<T>(m_AdditionalInfoObject);
342     }
343 
SetAdditionalInfoForObject(const AdditionalInfoObjectPtr & additionalInfo)344     void SetAdditionalInfoForObject(const AdditionalInfoObjectPtr& additionalInfo)
345     {
346         m_AdditionalInfoObject = additionalInfo;
347     }
348 
349 protected:
350     // Graph needs access to the virtual destructor.
351     friend class Graph;
352     virtual ~Layer() = default;
353 
354     template <typename QueueDescriptor>
CollectQueueDescriptorInputs(QueueDescriptor & descriptor,WorkloadInfo & info) const355     void CollectQueueDescriptorInputs(QueueDescriptor& descriptor, WorkloadInfo& info) const
356     {
357         WorkloadDataCollector dataCollector(descriptor.m_Inputs, info.m_InputTensorInfos);
358         CollectWorkloadInputs(dataCollector);
359     }
360 
361     template <typename QueueDescriptor>
CollectQueueDescriptorOutputs(QueueDescriptor & descriptor,WorkloadInfo & info) const362     void CollectQueueDescriptorOutputs(QueueDescriptor& descriptor, WorkloadInfo& info) const
363     {
364         WorkloadDataCollector dataCollector(descriptor.m_Outputs, info.m_OutputTensorInfos);
365         CollectWorkloadOutputs(dataCollector);
366     }
367 
368     void ValidateAndCopyShape(const TensorShape& outputShape,
369                               const TensorShape& inferredShape,
370                               const ShapeInferenceMethod shapeInferenceMethod,
371                               const std::string& layerName,
372                               const unsigned int outputSlotIndex = 0);
373 
374     void VerifyShapeInferenceType(const TensorShape& outputShape, ShapeInferenceMethod shapeInferenceMethod);
375 
376     /// Helper function to reduce duplication in *Layer::CreateWorkload.
377     template <typename QueueDescriptor>
PrepInfoAndDesc(QueueDescriptor & descriptor) const378     WorkloadInfo PrepInfoAndDesc(QueueDescriptor& descriptor) const
379     {
380         WorkloadInfo info;
381         CollectQueueDescriptorInputs(descriptor, info);
382         CollectQueueDescriptorOutputs(descriptor, info);
383         return info;
384     }
385 
386     template <typename LayerType, typename ... Params>
387     LayerType* CloneBase(Graph& graph, Params&& ... params) const;
388 
389     // Retrieve the Handles to the constants
390     using ConstantTensors = std::vector<std::reference_wrapper<std::unique_ptr<ScopedCpuTensorHandle>>>;
GetConstantTensorsByRef()391     virtual ConstantTensors GetConstantTensorsByRef() {return ConstantTensors(); };
392 
393     // "Blob"
394     AdditionalInfoObjectPtr m_AdditionalInfoObject;
395 
396     // Utility method to set a pointer in the queueDescriptor to the "blob" location in the layer
397     void SetAdditionalInfo(QueueDescriptor& descriptor) const;
398 
399 private:
400     void CollectWorkloadInputs(WorkloadDataCollector& dataCollector) const;
401     void CollectWorkloadOutputs(WorkloadDataCollector& dataCollector) const;
402 
403 protected:
404     std::vector<OutputHandler> m_OutputHandlers;
405     ShapeInferenceMethod m_ShapeInferenceMethod;
406 
407 private:
408     const std::string m_LayerName;
409 
410     std::vector<InputSlot> m_InputSlots;
411     std::vector<OutputSlot> m_OutputSlots;
412 
413     const LayerType m_Type;
414     BackendId m_BackendId;
415     Optional<BackendId> m_BackendHint;
416 
417     /// Used for sorting.
418     mutable LayerPriority m_Priority = 0;
419     mutable bool m_Visiting = false;
420 
421     LayerGuid m_Guid;
422 
423     std::list<std::string> m_RelatedLayerNames;
424 
425 };
426 
427 // A layer user-provided data can be bound to (e.g. inputs, outputs).
428 class BindableLayer : public Layer
429 {
430 public:
BindableLayer(unsigned int numInputSlots,unsigned int numOutputSlots,LayerType type,const char * name,LayerBindingId id)431     BindableLayer(unsigned int numInputSlots,
432         unsigned int numOutputSlots,
433         LayerType type,
434         const char* name,
435         LayerBindingId id)
436     : Layer(numInputSlots, numOutputSlots, type, name)
437     , m_Id(id)
438     {
439     }
440 
GetBindingId() const441     LayerBindingId GetBindingId() const { return m_Id; };
442 
443 protected:
444     ~BindableLayer() = default;
445 
446 private:
447     LayerBindingId m_Id;
448 };
449 
450 }
451