1 // 2 // Copyright © 2017 Arm Ltd. All rights reserved. 3 // SPDX-License-Identifier: MIT 4 // 5 6 #pragma once 7 8 #include "ITensorHandle.hpp" 9 10 #include <armnn/IRuntime.hpp> 11 #include <armnn/MemorySources.hpp> 12 #include <armnn/Types.hpp> 13 #include <armnn/utility/IgnoreUnused.hpp> 14 15 namespace armnn 16 { 17 18 /// Capability class to calculate in the GetCapabilities function 19 /// so that only the capability in the scope can be choose to calculate 20 enum class CapabilityClass 21 { 22 PaddingRequired = 1, 23 24 // add new enum values here 25 26 CapabilityClassMax = 254 27 }; 28 29 /// Capability of the TensorHandleFactory 30 struct Capability 31 { Capabilityarmnn::Capability32 Capability(CapabilityClass capabilityClass, bool value) 33 : m_CapabilityClass(capabilityClass) 34 , m_Value(value) 35 {} 36 37 CapabilityClass m_CapabilityClass; 38 bool m_Value; 39 }; 40 41 class ITensorHandleFactory 42 { 43 public: 44 using FactoryId = std::string; 45 static const FactoryId LegacyFactoryId; /// Use the workload factory to create the tensor handle 46 static const FactoryId DeferredFactoryId; /// Some TensorHandleFactory decisions are deferred to run-time 47 ~ITensorHandleFactory()48 virtual ~ITensorHandleFactory() {} 49 50 virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent, 51 TensorShape const& subTensorShape, 52 unsigned int const* subTensorOrigin) const = 0; 53 54 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const = 0; 55 56 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo, 57 DataLayout dataLayout) const = 0; 58 59 /// Utility Functions for backends which require TensorHandles to have unmanaged memory. 60 /// These should be overloaded if required to facilitate direct import of input tensors 61 /// and direct export of output tensors. CreateTensorHandle(const TensorInfo & tensorInfo,const bool IsMemoryManaged) const62 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo, 63 const bool IsMemoryManaged) const 64 { 65 IgnoreUnused(IsMemoryManaged); 66 return CreateTensorHandle(tensorInfo); 67 } 68 CreateTensorHandle(const TensorInfo & tensorInfo,DataLayout dataLayout,const bool IsMemoryManaged) const69 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo, 70 DataLayout dataLayout, 71 const bool IsMemoryManaged) const 72 { 73 IgnoreUnused(IsMemoryManaged); 74 return CreateTensorHandle(tensorInfo, dataLayout); 75 } 76 77 virtual const FactoryId& GetId() const = 0; 78 SupportsInPlaceComputation() const79 virtual bool SupportsInPlaceComputation() const { return false; } 80 81 virtual bool SupportsSubTensors() const = 0; 82 SupportsMapUnmap() const83 virtual bool SupportsMapUnmap() const final { return true; } 84 GetExportFlags() const85 virtual MemorySourceFlags GetExportFlags() const { return 0; } GetImportFlags() const86 virtual MemorySourceFlags GetImportFlags() const { return 0; } 87 GetCapabilities(const IConnectableLayer * layer,const IConnectableLayer * connectedLayer,CapabilityClass capabilityClass)88 virtual std::vector<Capability> GetCapabilities(const IConnectableLayer* layer, 89 const IConnectableLayer* connectedLayer, 90 CapabilityClass capabilityClass) 91 { 92 IgnoreUnused(layer); 93 IgnoreUnused(connectedLayer); 94 IgnoreUnused(capabilityClass); 95 return std::vector<Capability>(); 96 } 97 }; 98 99 enum class EdgeStrategy 100 { 101 Undefined, /// No strategy has been defined. Used internally to verify integrity of optimizations. 102 DirectCompatibility, /// Destination backend can work directly with tensors on source backend. 103 ExportToTarget, /// Source backends tensor data can be exported to destination backend tensor without copy. 104 CopyToTarget /// Copy contents from source backend tensor to destination backend tensor. 105 }; 106 107 } //namespace armnn 108