1 // 2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved. 3 // SPDX-License-Identifier: MIT 4 // 5 6 #pragma once 7 8 #include "ITensorHandle.hpp" 9 #include <armnn/MemorySources.hpp> 10 #include <armnn/Tensor.hpp> 11 #include <armnn/Types.hpp> 12 #include <armnn/utility/IgnoreUnused.hpp> 13 14 #include <memory> 15 #include <string> 16 #include <vector> 17 18 namespace armnn 19 { 20 class IConnectableLayer; 21 22 /// Capability class to calculate in the GetCapabilities function 23 /// so that only the capability in the scope can be choose to calculate 24 enum class CapabilityClass 25 { 26 PaddingRequired = 1, 27 FallbackImportDisabled = 2, 28 29 // add new enum values here 30 31 CapabilityClassMax = 254 32 }; 33 34 /// Capability of the TensorHandleFactory 35 struct Capability 36 { Capabilityarmnn::Capability37 Capability(CapabilityClass capabilityClass, bool value) 38 : m_CapabilityClass(capabilityClass) 39 , m_Value(value) 40 {} 41 42 CapabilityClass m_CapabilityClass; 43 bool m_Value; 44 }; 45 46 class ITensorHandleFactory 47 { 48 public: 49 using FactoryId = std::string; 50 static const FactoryId LegacyFactoryId; /// Use the workload factory to create the tensor handle 51 static const FactoryId DeferredFactoryId; /// Some TensorHandleFactory decisions are deferred to run-time 52 ~ITensorHandleFactory()53 virtual ~ITensorHandleFactory() {} 54 55 virtual std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent, 56 TensorShape const& subTensorShape, 57 unsigned int const* subTensorOrigin) const = 0; 58 59 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const = 0; 60 61 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo, 62 DataLayout dataLayout) const = 0; 63 64 /// Utility Functions for backends which require TensorHandles to have unmanaged memory. 65 /// These should be overloaded if required to facilitate direct import of input tensors 66 /// and direct export of output tensors. CreateTensorHandle(const TensorInfo & tensorInfo,const bool IsMemoryManaged) const67 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo, 68 const bool IsMemoryManaged) const 69 { 70 IgnoreUnused(IsMemoryManaged); 71 return CreateTensorHandle(tensorInfo); 72 } 73 CreateTensorHandle(const TensorInfo & tensorInfo,DataLayout dataLayout,const bool IsMemoryManaged) const74 virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo, 75 DataLayout dataLayout, 76 const bool IsMemoryManaged) const 77 { 78 IgnoreUnused(IsMemoryManaged); 79 return CreateTensorHandle(tensorInfo, dataLayout); 80 } 81 82 virtual const FactoryId& GetId() const = 0; 83 SupportsInPlaceComputation() const84 virtual bool SupportsInPlaceComputation() const { return false; } 85 86 virtual bool SupportsSubTensors() const = 0; 87 SupportsMapUnmap() const88 virtual bool SupportsMapUnmap() const { return true; } 89 GetExportFlags() const90 virtual MemorySourceFlags GetExportFlags() const { return 0; } GetImportFlags() const91 virtual MemorySourceFlags GetImportFlags() const { return 0; } 92 GetCapabilities(const IConnectableLayer * layer,const IConnectableLayer * connectedLayer,CapabilityClass capabilityClass)93 virtual std::vector<Capability> GetCapabilities(const IConnectableLayer* layer, 94 const IConnectableLayer* connectedLayer, 95 CapabilityClass capabilityClass) 96 { 97 IgnoreUnused(layer); 98 IgnoreUnused(connectedLayer); 99 IgnoreUnused(capabilityClass); 100 return std::vector<Capability>(); 101 } 102 }; 103 104 enum class EdgeStrategy 105 { 106 Undefined, /// No strategy has been defined. Used internally to verify integrity of optimizations. 107 DirectCompatibility, /// Destination backend can work directly with tensors on source backend. 108 ExportToTarget, /// Source backends tensor data can be exported to destination backend tensor without copy. 109 CopyToTarget /// Copy contents from source backend tensor to destination backend tensor. 110 }; 111 112 } //namespace armnn 113