• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/Types.hpp>
9 #include <armnn/IRuntime.hpp>
10 #include <armnn/Deprecated.hpp>
11 
12 #include <ExecutionData.hpp>
13 #include <ISubgraphViewConverter.hpp>
14 #include <WorkingMemDescriptor.hpp>
15 
16 #include <armnn/backends/IBackendContext.hpp>
17 #include <armnn/backends/IMemoryManager.hpp>
18 #include <armnn/backends/ITensorHandleFactory.hpp>
19 #include <armnn/backends/OptimizationViews.hpp>
20 #include <armnn/backends/SubgraphView.hpp>
21 
22 #include <client/include/backends/IBackendProfiling.hpp>
23 #include <client/include/backends/IBackendProfilingContext.hpp>
24 
25 #include <vector>
26 #include <memory>
27 
28 namespace armnn
29 {
30 class IWorkloadFactory;
31 class IMemoryManager;
32 class ILayerSupport;
33 
34 struct BackendVersion
35 {
36     uint32_t m_Major;
37     uint32_t m_Minor;
38 
BackendVersionarmnn::BackendVersion39     constexpr BackendVersion()
40         : m_Major(0)
41         , m_Minor(0)
42     {}
BackendVersionarmnn::BackendVersion43     constexpr BackendVersion(uint32_t major, uint32_t minor)
44         : m_Major(major)
45         , m_Minor(minor)
46     {}
47 
operator ==armnn::BackendVersion48     bool operator==(const BackendVersion& other) const
49     {
50         return this == &other ||
51                (this->m_Major == other.m_Major &&
52                 this->m_Minor == other.m_Minor);
53     }
54 
operator <=armnn::BackendVersion55     bool operator<=(const BackendVersion& other) const
56     {
57         return this->m_Major < other.m_Major ||
58                (this->m_Major == other.m_Major &&
59                 this->m_Minor <= other.m_Minor);
60     }
61 
operator >=armnn::BackendVersion62     bool operator>=(const BackendVersion& other) const
63     {
64         return this->m_Major > other.m_Major ||
65                (this->m_Major == other.m_Major &&
66                 this->m_Minor >= other.m_Minor);
67     }
68 };
69 
operator <<(std::ostream & os,const BackendVersion & backendVersion)70 inline std::ostream& operator<<(std::ostream& os, const BackendVersion& backendVersion)
71 {
72     os << "[" << backendVersion.m_Major << "." << backendVersion.m_Minor << "]";
73 
74     return os;
75 }
76 
77 class IBackendInternal : public IBackend
78 {
79 protected:
80     /// Creation must be done through a specific
81     /// backend interface.
82     IBackendInternal() = default;
83 
84 public:
85     /// Allow backends created by the factory function
86     /// to be destroyed through IBackendInternal.
87     ~IBackendInternal() override = default;
88 
89     using IWorkloadFactoryPtr = std::unique_ptr<IWorkloadFactory>;
90     using IBackendContextPtr = std::unique_ptr<IBackendContext>;
91     /// This is the bridge between backend and backend profiling we'll keep it in the backend namespace.
92     using IBackendProfilingContextPtr = std::shared_ptr<arm::pipe::IBackendProfilingContext>;
93     using IBackendProfilingPtr = std::unique_ptr<arm::pipe::IBackendProfiling>;
94     using ILayerSupportSharedPtr = std::shared_ptr<ILayerSupport>;
95 
96     using IBackendSpecificModelContextPtr = std::shared_ptr<IBackendModelContext>;
97 
98     using IMemoryManagerUniquePtr = std::unique_ptr<IMemoryManager>;
99     using IMemoryManagerSharedPtr = std::shared_ptr<IMemoryManager>;
100 
101     virtual IMemoryManagerUniquePtr CreateMemoryManager() const;
102 
103     virtual IWorkloadFactoryPtr CreateWorkloadFactory(
104         const IMemoryManagerSharedPtr& memoryManager = nullptr) const = 0;
105 
106     virtual IWorkloadFactoryPtr CreateWorkloadFactory(
107         class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const;
108 
109     virtual IWorkloadFactoryPtr CreateWorkloadFactory(
110         const IMemoryManagerSharedPtr& memoryManager,
111         const ModelOptions& modelOptions) const;
112 
113     virtual IWorkloadFactoryPtr CreateWorkloadFactory(
114         class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry,
115         const ModelOptions& modelOptions) const;
116 
117     virtual IWorkloadFactoryPtr CreateWorkloadFactory(
118         class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry,
119         const ModelOptions& modelOptions,
120         MemorySourceFlags inputFlags,
121         MemorySourceFlags outputFlags) const;
122 
123     /// Create the runtime context of the backend
124     ///
125     /// Implementations may return a default-constructed IBackendContextPtr if
126     /// no context is needed at runtime.
127     /// Implementations must throw BackendUnavailableException if the backend
128     /// cannot be used (for example, necessary accelerator hardware is not present).
129     /// The default implementation always returns a default-constructed pointer.
130     virtual IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const;
131 
132     virtual IBackendSpecificModelContextPtr CreateBackendSpecificModelContext(const ModelOptions& modelOptions) const;
133 
134     /// Create context specifically used for profiling interaction from backends.
135     virtual IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions& creationOptions,
136                                                                       IBackendProfilingPtr& backendProfiling);
137 
138     virtual ILayerSupportSharedPtr GetLayerSupport() const = 0;
139 
140     virtual ILayerSupportSharedPtr GetLayerSupport(const ModelOptions& modelOptions) const;
141 
142     virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const;
143 
144     virtual OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph,
145                                                    const ModelOptions& modelOptions) const;
146 
147     bool SupportsTensorAllocatorAPI() const;
148 
149     ITensorHandleFactory::FactoryId GetBackwardCompatibleFavoriteHandleFactory();
150 
151     /// (Optional) Returns a vector of supported TensorHandleFactory ids in preference order.
152     virtual std::vector<ITensorHandleFactory::FactoryId> GetHandleFactoryPreferences() const;
153 
154     /// (Optional) Register TensorHandleFactories
155     /// Either this method or CreateMemoryManager() and
156     /// IWorkloadFactory::CreateTensor() IWorkloadFactory::CreateSubtensor() methods must be implemented.
RegisterTensorHandleFactories(class TensorHandleFactoryRegistry &)157     virtual void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& /*registry*/) {}
158 
159     /// (Optional) Register TensorHandleFactories
160     /// Either this method or CreateMemoryManager() and
161     /// IWorkloadFactory::CreateTensor() IWorkloadFactory::CreateSubtensor() methods must be implemented.
162     virtual void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry,
163                                                MemorySourceFlags inputFlags,
164                                                MemorySourceFlags outputFlags);
165 
166     /// Returns the version of the Backend API
GetApiVersion()167     static constexpr BackendVersion GetApiVersion() { return BackendVersion(1, 0); }
168 
169     /// Returns a BackendCapability if the backend lists the capability
170     /// The BackendCapability must then be inspected to check whether or not that BackendCapability is supported
171     /// Otherwise returns an EmptyOptional if the BackendCapability is unlisted
GetCapabilities() const172     virtual BackendCapabilities GetCapabilities() const
173     {
174         return BackendCapabilities("IBackendInternal NullCapabilities");
175     };
176 
177     /// Signals the backend to use a custom memory allocator provided by the user
178     ///
179     /// \param allocator - a pointer to the provided ICustomAllocator to use with this backend
180     /// \param errMsg - Optional string variable to return error messages
181     /// \return - Returns true if switching to custom allocator was successful
UseCustomMemoryAllocator(std::shared_ptr<ICustomAllocator> allocator,armnn::Optional<std::string &> errMsg)182     virtual bool UseCustomMemoryAllocator(std::shared_ptr<ICustomAllocator> allocator,
183                                           armnn::Optional<std::string&> errMsg)
184     {
185         IgnoreUnused(allocator);
186         if (errMsg)
187         {
188             std::stringstream message;
189             message << "The backend " << GetId() << " doesn't support using a custom allocator. This error might"
190                                                     " be related with the protected mode if the backend doesn't"
191                                                     " fully support it.";
192 
193             errMsg.value() = message.str();
194         }
195         return false;
196     }
197 
198     /// Returns the default memory allocator for the backend
199     ///
200     /// \return - Returns unique pointer to the Default Allocator of the Backend
GetDefaultAllocator() const201     virtual std::unique_ptr<ICustomAllocator> GetDefaultAllocator() const
202     {
203         throw armnn::Exception("GetDefaultAllocator: Function has not been implemented in backend.");
204     }
205 
206     /// Returns the number of files cached if backend supports caching
207     ///
208     /// \return - Returns 0 if backend does not support caching otherwise number of files cached
GetNumberOfCacheFiles() const209     virtual unsigned int GetNumberOfCacheFiles() const { return 0; }
210 
211     /// Returns ExecutionData for the backend
212     ///
213     /// \param workingMemDescriptor - Vectors of input and output TensorHandles for a layer
214     /// \return - Returns backend specific ExecutionData generated for a layer
CreateExecutionData(WorkingMemDescriptor & workingMemDescriptor) const215     virtual ExecutionData CreateExecutionData(WorkingMemDescriptor& workingMemDescriptor) const
216     {
217         IgnoreUnused(workingMemDescriptor);
218         throw armnn::Exception("CreateExecutionData: Function has not been implemented in backend.");
219     };
220 
221     /// Update the ExecutionData for a layer. It is used to swap in pre-imported tensor handles
222     ///
223     /// \param executionData - Backend specific ExecutionData generated for a layer
224     /// \param workingMemDescriptor - Vectors of input and output TensorHandles for a layer
UpdateExecutionData(ExecutionData & executionData,WorkingMemDescriptor & workingMemDescriptor) const225     virtual void UpdateExecutionData(ExecutionData& executionData, WorkingMemDescriptor& workingMemDescriptor) const
226     {
227         IgnoreUnused(executionData);
228         IgnoreUnused(workingMemDescriptor);
229         throw armnn::Exception("UpdateExecutionData: Function has not been implemented in backend.");
230     };
231 };
232 
233 using IBackendInternalUniquePtr = std::unique_ptr<IBackendInternal>;
234 
235 } // namespace armnn
236