1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7 #include <armnn/ArmNN.hpp>
8
9 #include <CpuExecutor.h>
10 #include <HalInterfaces.h>
11 #include <LegacyHalUtils.h>
12 #include <NeuralNetworks.h>
13 #include "NamespaceAdaptor.hpp"
14
15 #include <vector>
16 #include <string>
17 #include <fstream>
18 #include <iomanip>
19
20 namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
21 namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
22
23 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
24 namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
25 #endif
26
27 #ifdef ARMNN_ANDROID_NN_V1_3
28 namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
29 #endif
30
31 namespace armnn_driver
32 {
33
34 #ifdef ARMNN_ANDROID_S
35 using DataLocation = ::android::nn::DataLocation;
36 #elif ARMNN_ANDROID_R
37 using DataLocation = ::android::nn::hal::DataLocation;
38 #endif
39
getMainModel(const V1_0::Model & model)40 inline const V1_0::Model& getMainModel(const V1_0::Model& model) { return model; }
getMainModel(const V1_1::Model & model)41 inline const V1_1::Model& getMainModel(const V1_1::Model& model) { return model; }
42
43 #if defined (ARMNN_ANDROID_NN_V1_2) || defined (ARMNN_ANDROID_NN_V1_3)
getMainModel(const V1_2::Model & model)44 inline const V1_2::Model& getMainModel(const V1_2::Model& model) { return model; }
45 #endif
46
47 #ifdef ARMNN_ANDROID_NN_V1_3
getMainModel(const V1_3::Model & model)48 inline const V1_3::Subgraph& getMainModel(const V1_3::Model& model) { return model.main; }
49 #endif
50
51 extern const armnn::PermutationVector g_DontPermute;
52
53 template <typename OperandType>
54 class UnsupportedOperand: public std::runtime_error
55 {
56 public:
UnsupportedOperand(const OperandType type)57 UnsupportedOperand(const OperandType type)
58 : std::runtime_error("Operand type is unsupported")
59 , m_type(type)
60 {}
61
62 OperandType m_type;
63 };
64
65 /// Swizzles tensor data in @a input according to the dimension mappings.
66 void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
67 const armnn::PermutationVector& mappings);
68
69 /// Returns a pointer to a specific location in a pool
70 void* GetMemoryFromPool(V1_0::DataLocation location,
71 const std::vector<android::nn::RunTimePoolInfo>& memPools);
72
73 /// Can throw UnsupportedOperand
74 armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand);
75
76 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
77 armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand);
78 #endif
79
80 #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
81 armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand);
82 #endif
83
84 std::string GetOperandSummary(const V1_0::Operand& operand);
85
86 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
87 std::string GetOperandSummary(const V1_2::Operand& operand);
88 #endif
89
90 #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
91 std::string GetOperandSummary(const V1_3::Operand& operand);
92 #endif
93
94 template <typename HalModel>
GetModelSummary(const HalModel & model)95 std::string GetModelSummary(const HalModel& model)
96 {
97 std::stringstream result;
98
99 result << getMainModel(model).inputIndexes.size() << " input(s), "
100 << getMainModel(model).operations.size() << " operation(s), "
101 << getMainModel(model).outputIndexes.size() << " output(s), "
102 << getMainModel(model).operands.size() << " operand(s) "
103 << std::endl;
104
105 result << "Inputs: ";
106 for (uint32_t i = 0; i < getMainModel(model).inputIndexes.size(); i++)
107 {
108 result << GetOperandSummary(getMainModel(model).operands[getMainModel(model).inputIndexes[i]]) << ", ";
109 }
110 result << std::endl;
111
112 result << "Operations: ";
113 for (uint32_t i = 0; i < getMainModel(model).operations.size(); i++)
114 {
115 result << toString(getMainModel(model).operations[i].type).c_str() << ", ";
116 }
117 result << std::endl;
118
119 result << "Outputs: ";
120 for (uint32_t i = 0; i < getMainModel(model).outputIndexes.size(); i++)
121 {
122 result << GetOperandSummary(getMainModel(model).operands[getMainModel(model).outputIndexes[i]]) << ", ";
123 }
124 result << std::endl;
125
126 return result.str();
127 }
128
129 void DumpTensor(const std::string& dumpDir,
130 const std::string& requestName,
131 const std::string& tensorName,
132 const armnn::ConstTensor& tensor);
133
134 void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
135 const std::string& dumpDir,
136 armnn::NetworkId networkId,
137 const armnn::IProfiler* profiler);
138
139 std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
140 const std::string& dumpDir);
141
142 void RenameGraphDotFile(const std::string& oldName, const std::string& dumpDir, const armnn::NetworkId networkId);
143
144 /// Checks if a tensor info represents a dynamic tensor
145 bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);
146
147 /// Checks for ArmNN support of dynamic tensors.
148 bool AreDynamicTensorsSupported(void);
149
150 std::string GetFileTimestamp();
151
152 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
ComputeShape(const armnn::TensorInfo & info)153 inline V1_2::OutputShape ComputeShape(const armnn::TensorInfo& info)
154 {
155 V1_2::OutputShape shape;
156
157 armnn::TensorShape tensorShape = info.GetShape();
158 // Android will expect scalars as a zero dimensional tensor
159 if(tensorShape.GetDimensionality() == armnn::Dimensionality::Scalar)
160 {
161 shape.dimensions = android::hardware::hidl_vec<uint32_t>{};
162 }
163 else
164 {
165 android::hardware::hidl_vec<uint32_t> dimensions;
166 const unsigned int numDims = tensorShape.GetNumDimensions();
167 dimensions.resize(numDims);
168 for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
169 {
170 dimensions[outputIdx] = tensorShape[outputIdx];
171 }
172 shape.dimensions = dimensions;
173 }
174
175 shape.isSufficient = true;
176
177 return shape;
178 }
179 #endif
180
181 void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);
182
183 } // namespace armnn_driver
184