1 //
2 // Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7 #include <armnn/ArmNN.hpp>
8
9 #include <CpuExecutor.h>
10 #include <HalInterfaces.h>
11 #include <NeuralNetworks.h>
12 #include <Utils.h>
13
14 #include <fmt/format.h>
15
16 #include <vector>
17 #include <string>
18 #include <fstream>
19 #include <iomanip>
20
21 namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
22 namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
23
24 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
25 namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
26 #endif
27
28 #ifdef ARMNN_ANDROID_NN_V1_3
29 namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
30 #endif
31
32 namespace armnn_driver
33 {
34
35 #ifdef ARMNN_ANDROID_R
36 using DataLocation = ::android::nn::hal::DataLocation;
37 #endif
38
getMainModel(const V1_0::Model & model)39 inline const V1_0::Model& getMainModel(const V1_0::Model& model) { return model; }
getMainModel(const V1_1::Model & model)40 inline const V1_1::Model& getMainModel(const V1_1::Model& model) { return model; }
41
42 #if defined (ARMNN_ANDROID_NN_V1_2) || defined (ARMNN_ANDROID_NN_V1_3)
getMainModel(const V1_2::Model & model)43 inline const V1_2::Model& getMainModel(const V1_2::Model& model) { return model; }
44 #endif
45
46 #ifdef ARMNN_ANDROID_NN_V1_3
getMainModel(const V1_3::Model & model)47 inline const V1_3::Subgraph& getMainModel(const V1_3::Model& model) { return model.main; }
48 #endif
49
50 extern const armnn::PermutationVector g_DontPermute;
51
52 template <typename OperandType>
53 class UnsupportedOperand: public std::runtime_error
54 {
55 public:
UnsupportedOperand(const OperandType type)56 UnsupportedOperand(const OperandType type)
57 : std::runtime_error("Operand type is unsupported")
58 , m_type(type)
59 {}
60
61 OperandType m_type;
62 };
63
64 /// Swizzles tensor data in @a input according to the dimension mappings.
65 void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensor, const void* input, void* output,
66 const armnn::PermutationVector& mappings);
67
68 /// Returns a pointer to a specific location in a pool
69 void* GetMemoryFromPool(V1_0::DataLocation location,
70 const std::vector<android::nn::RunTimePoolInfo>& memPools);
71
72 /// Can throw UnsupportedOperand
73 armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand);
74
75 std::string GetOperandSummary(const V1_0::Operand& operand);
76
77 // Returns true for any quantized data type, false for the rest.
78 bool isQuantizedOperand(const V1_0::OperandType& operandType);
79
80 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
81 armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand);
82
83 std::string GetOperandSummary(const V1_2::Operand& operand);
84
85 bool isQuantizedOperand(const V1_2::OperandType& operandType);
86 #endif
87
88 #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
89 armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand);
90
91 std::string GetOperandSummary(const V1_3::Operand& operand);
92
93 bool isQuantizedOperand(const V1_3::OperandType& operandType);
94 #endif
95
96 template <typename HalModel>
GetModelSummary(const HalModel & model)97 std::string GetModelSummary(const HalModel& model)
98 {
99 std::stringstream result;
100
101 result << getMainModel(model).inputIndexes.size() << " input(s), "
102 << getMainModel(model).operations.size() << " operation(s), "
103 << getMainModel(model).outputIndexes.size() << " output(s), "
104 << getMainModel(model).operands.size() << " operand(s) "
105 << std::endl;
106
107 result << "Inputs: ";
108 for (uint32_t i = 0; i < getMainModel(model).inputIndexes.size(); i++)
109 {
110 result << GetOperandSummary(getMainModel(model).operands[getMainModel(model).inputIndexes[i]]) << ", ";
111 }
112 result << std::endl;
113
114 result << "Operations: ";
115 for (uint32_t i = 0; i < getMainModel(model).operations.size(); i++)
116 {
117 result << toString(getMainModel(model).operations[i].type).c_str() << ", ";
118 }
119 result << std::endl;
120
121 result << "Outputs: ";
122 for (uint32_t i = 0; i < getMainModel(model).outputIndexes.size(); i++)
123 {
124 result << GetOperandSummary(getMainModel(model).operands[getMainModel(model).outputIndexes[i]]) << ", ";
125 }
126 result << std::endl;
127
128 return result.str();
129 }
130
131 template <typename TensorType>
132 void DumpTensor(const std::string& dumpDir,
133 const std::string& requestName,
134 const std::string& tensorName,
135 const TensorType& tensor);
136
137 void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
138 const std::string& dumpDir,
139 armnn::NetworkId networkId,
140 const armnn::IProfiler* profiler);
141
142 std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
143 const std::string& dumpDir);
144
145 std::string SerializeNetwork(const armnn::INetwork& network,
146 const std::string& dumpDir,
147 std::vector<uint8_t>& dataCacheData,
148 bool dataCachingActive = true);
149
150 void RenameExportedFiles(const std::string& existingSerializedFileName,
151 const std::string& existingDotFileName,
152 const std::string& dumpDir,
153 const armnn::NetworkId networkId);
154
155 void RenameFile(const std::string& existingName,
156 const std::string& extension,
157 const std::string& dumpDir,
158 const armnn::NetworkId networkId);
159
160 /// Checks if a tensor info represents a dynamic tensor
161 bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);
162
163 /// Checks for ArmNN support of dynamic tensors.
164 bool AreDynamicTensorsSupported(void);
165
166 std::string GetFileTimestamp();
167
168 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
ComputeShape(const armnn::TensorInfo & info)169 inline V1_2::OutputShape ComputeShape(const armnn::TensorInfo& info)
170 {
171 V1_2::OutputShape shape;
172
173 armnn::TensorShape tensorShape = info.GetShape();
174 // Android will expect scalars as a zero dimensional tensor
175 if(tensorShape.GetDimensionality() == armnn::Dimensionality::Scalar)
176 {
177 shape.dimensions = android::hardware::hidl_vec<uint32_t>{};
178 }
179 else
180 {
181 android::hardware::hidl_vec<uint32_t> dimensions;
182 const unsigned int numDims = tensorShape.GetNumDimensions();
183 dimensions.resize(numDims);
184 for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
185 {
186 dimensions[outputIdx] = tensorShape[outputIdx];
187 }
188 shape.dimensions = dimensions;
189 }
190
191 shape.isSufficient = true;
192
193 return shape;
194 }
195 #endif
196
197 void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);
198
199 template <typename ErrorStatus, typename Request>
200 ErrorStatus ValidateRequestArgument(const Request& request,
201 const armnn::TensorInfo& tensorInfo,
202 const V1_0::RequestArgument& requestArgument,
203 std::string descString);
204 } // namespace armnn_driver
205