1 /* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 vcyou may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/core/kernels/hexagon/hexagon_ops_definitions.h"
17
18 #include "tensorflow/core/framework/types.h"
19
20 // CAVEAT: Comment-out the following macro if you want to use experimental
21 // hexagon ops.
22 //#define ENABLE_EXPERIMENTAL_HEXNN_OPS
23
24 namespace tensorflow {
25
26 // HVX internal supported ops names
27 // TODO(satok): Remove this map once hexnn lib supports an API to retrieve op id
28 // from op name and data type
29 enum class HexagonOpsDefinitions::SupportedOpType {
30 INPUT,
31 OUTPUT,
32 NOP,
33 OP_CONST, /* OP_ is required to avoid compilation error on windows */
34 CHECK,
35 CLOSE_FLOAT32,
36 CLOSE_QINT8,
37 CLOSE_Q_QINT8,
38 CLOSE_INT32,
39 CLOSE_QINT32,
40 PPRINT_8,
41 PPRINT_32,
42 PPRINT_FLOAT,
43 PREFREE,
44 FLATTEN,
45
46 #ifdef ENABLE_EXPERIMENTAL_HEXNN_OPS
47 // With Reference
48 QUANTIZEDCONV2D_8X8TO32,
49 QUANTIZEDCONV2D_8X8TO32_REF,
50 QUANTIZEDMATMUL_8X8TO32,
51 QUANTIZEDMATMUL_8X8TO32_REF,
52 QUANTIZEDOWNANDSHRINKRANGE_32TO8,
53 QUANTIZEDOWNANDSHRINKRANGE_32TO8_REF,
54 QUANTIZEDRELU_8,
55 QUANTIZEDRELU_8_REF,
56 QUANTIZEDRELUX_8,
57 QUANTIZEDRELUX_8_REF,
58 QUANTIZEDMAXPOOL_8,
59 QUANTIZEDMAXPOOL_8_REF,
60 QUANTIZEDAVGPOOL_8,
61 QUANTIZEDAVGPOOL_8_REF,
62 QUANTIZEDCONCAT_8,
63 QUANTIZEDCONCAT_8_REF,
64 QUANTIZEDBIASADD_8P8TO32,
65 QUANTIZEDBIASADD_8P8TO32_REF,
66 MIN_F,
67 MIN_F_REF,
68 MAX_F,
69 MAX_F_REF,
70 QUANTIZE,
71 QUANTIZE_REF,
72 DEQUANTIZE,
73 DEQUANTIZE_REF,
74 SUPERNODE_8X8P8TO8,
75 SUPERNODE_8X8P8TO8_REF,
76
77 QUANTIZEDFLATTEN,
78 SOFTMAX_F,
79 CONV2D_F,
80 MATMUL_F,
81 RELU_F,
82 RELUX_F,
83 AVGPOOL_F,
84 MAXPOOL_F,
85 CONCAT_F,
86 BIASADD_F,
87 LRN_F,
88
89 VARIABLE,
90 ASSIGN,
91 RESHAPE,
92 QUANTIZED_RESHAPE,
93 TANH_F,
94 SIGMOID_F,
95 SLICE_8,
96 SLICE_F,
97 QUANTIZED_SLICE_8,
98 ADD_F,
99 MUL_F,
100 MINIMUM_F,
101 MAXIMUM_F,
102
103 REQUANTIZE_32_TO_8,
104 REQUANTIZE_32_TO_8_REF,
105 REQUANTIZATION_RANGE_32,
106 REQUANTIZATION_RANGE_32_REF,
107
108 NEG_F,
109 SUB_F,
110 ADD_N_F,
111 RANGE_INT32,
112 RANK_INT32,
113 TRANSPOSE_INT32,
114 TRANSPOSE_F,
115 INSTANCE_NORM_F,
116 QUANTIZED_INSTANCENORM_8,
117 QUANTIZED_INSTANCENORM_8_REF,
118 SUB_INT32,
119 ADD_INT32,
120 SPLIT_F,
121 DEQUANTIZE_QINT32_F,
122 PRELU_F,
123 QUANTIZED_PRELU_8,
124 SUM_F,
125 PROD_F,
126 MUL_INT32,
127 LOGICAL_AND_INT32,
128 LOGICALOR_INT32,
129 LOGICAL_XOR_INT32,
130 SPAPE_INT32,
131 PACK_INT32,
132 MIRROR_PAD_F,
133 RESIZE_NEAREST_NEIGHBOR_F,
134 STRIDED_SLICE_INT32,
135 STRIDED_SLICE_F,
136 EXPAND_DIMS_INT32,
137 EXPAND_DIMS_F,
138
139 LOG_SOFTMAX_F,
140 SPLIT_INT32,
141 QUANTIZED_SPLIT_8,
142
143 DECONV_F,
144 QUANTIZED_DECONV_8X8TO32,
145 QUANTIZED_DECONV_8X8TO32_REF,
146
147 QUANTIZED_MUL_8x8to32,
148 QUANTIZED_MUL_8x8to32_REF,
149 QUANTIZED_ADD_8p8to32,
150 QUANTIZED_ADD_8p8to32_REF,
151 QUANTIZED_SIGMOID_8,
152 QUANTIZED_SIGMOID_8_REF,
153 QUANTIZED_TANH_8,
154 QUANTIZED_TANH_8_REF,
155 QUANTIZED_SOFTMAX_8,
156 QUANTIZED_SOFTMAX_8_REF,
157 QUANTIZED_LRN_8,
158 QUANTIZED_LRN_8_REF,
159 QUANTIZED_PAD2D_FRAME_8P,
160 QUANTIZED_PAD2D_FRAME_8P_REF,
161 QUANTIZED_SUB_8P8TO32,
162 QUANTIZED_SUB_8P8TO32_REF,
163 QUANTIZED_MAXIMUM_8,
164 QUANTIZED_MAXIMUM_8_REF,
165 QUANTIZED_MINIMUM_8,
166 QUANTIZED_MINIMUM_8_REF,
167
168 PAD_F,
169 SPACE_TO_BATCH_ND_F,
170 BATCH_TO_SPACE_ND_F,
171 RESIZE_BILINEAR_F,
172 CONCAT_V2_F,
173
174 #else
175 // With Reference
176 QUANTIZEDCONV2D_8X8TO32,
177 QUANTIZEDCONV2D_8X8TO32_REF,
178 QUANTIZEDMATMUL_8X8TO32,
179 QUANTIZEDMATMUL_8X8TO32_REF,
180 QUANTIZEDOWNANDSHRINKRANGE_32TO8,
181 QUANTIZEDOWNANDSHRINKRANGE_32TO8_REF,
182 QUANTIZEDRELU_8,
183 QUANTIZEDRELU_8_REF,
184 QUANTIZEDRELUX_8,
185 QUANTIZEDRELUX_8_REF,
186 QUANTIZEDSIGMOID_8,
187 QUANTIZEDSIGMOID_8_REF,
188 QUANTIZEDTANH_8,
189 QUANTIZEDTANH_8_REF,
190 QUANTIZEDMAXPOOL_8,
191 QUANTIZEDMAXPOOL_8_REF,
192 QUANTIZEDAVGPOOL_8,
193 QUANTIZEDAVGPOOL_8_REF,
194 QUANTIZEDCONCAT_8,
195 QUANTIZEDCONCAT_8_REF,
196 QUANTIZEDBIASADD_8P8TO32,
197 QUANTIZEDBIASADD_8P8TO32_REF,
198 QUANTIZEDSOFTMAX_8,
199 QUANTIZEDSOFTMAX_8_REF,
200 QUANTIZEDLRN_8,
201 QUANTIZEDLRN_8_REF,
202 MIN_F,
203 MIN_F_REF,
204 MAX_F,
205 MAX_F_REF,
206 QUANTIZE,
207 QUANTIZE_REF,
208 DEQUANTIZE,
209 DEQUANTIZE_REF,
210 SUPERNODE_8X8P8TO8,
211 SUPERNODE_8X8P8TO8_REF,
212
213 QUANTIZEDFLATTEN,
214 SOFTMAX_F,
215 CONV2D_F,
216 MATMUL_F,
217 RELU_F,
218 RELUX_F,
219 AVGPOOL_F,
220 MAXPOOL_F,
221 CONCAT_F,
222 BIASADD_F,
223 LRN_F,
224
225 VARIABLE,
226 ASSIGN,
227 RESHAPE,
228 QUANTIZED_RESHAPE,
229 TANH_F,
230 SIGMOID_F,
231 SLICE_8,
232 SLICE_F,
233 QUANTIZED_SLICE_8,
234 ADD_F,
235 MUL_F,
236 MINIMUM_F,
237 MAXIMUM_F,
238
239 REQUANTIZE_32_TO_8,
240 REQUANTIZE_32_TO_8_REF,
241 REQUANTIZATION_RANGE_32,
242 REQUANTIZATION_RANGE_32_REF,
243
244 NEG_F,
245 SUB_F,
246 ADD_N_F,
247 RANGE_INT32,
248 RANK_INT32,
249 TRANSPOSE_INT32,
250 TRANSPOSE_F,
251 INSTANCE_NORM_F,
252 QUANTIZED_INSTANCENORM_8,
253 QUANTIZED_INSTANCENORM_8_REF,
254 SUB_INT32,
255 ADD_INT32,
256 SPLIT_F,
257 DEQUANTIZE_QINT32_F,
258 PRELU_F,
259 QUANTIZED_PRELU_8,
260 SUM_F,
261 PROD_F,
262 MUL_INT32,
263 LOGICAL_AND_INT32,
264 LOGICALOR_INT32,
265 LOGICAL_XOR_INT32,
266 SPAPE_INT32,
267 PACK_INT32,
268 MIRROR_PAD_F,
269 RESIZE_NEAREST_NEIGHBOR_F,
270 STRIDED_SLICE_INT32,
271 STRIDED_SLICE_F,
272 EXPAND_DIMS_INT32,
273 EXPAND_DIMS_F,
274
275 LOG_SOFTMAX_F,
276 SPLIT_INT32,
277 QUANTIZED_SPLIT_8,
278
279 DECONV_F,
280 QUANTIZED_DECONV_8X8TO32,
281 QUANTIZED_DECONV_8X8TO32_REF,
282 #endif
283
284 SUPPORTED_OP_TYPE_COUNT // TERMINATOR. DO NOT REMOVE
285 };
286
EmplaceOpType(const string & op_type,const DataTypeVector & dt_vec,const SupportedOpType supported_op_type,std::unordered_map<string,std::vector<DataTypeToOp>> * map)287 /* static */ void HexagonOpsDefinitions::EmplaceOpType(
288 const string& op_type, const DataTypeVector& dt_vec,
289 const SupportedOpType supported_op_type,
290 std::unordered_map<string, std::vector<DataTypeToOp>>* map) {
291 if (map->count(op_type) <= 0) {
292 map->emplace(op_type, std::vector<DataTypeToOp>());
293 }
294 map->at(op_type).emplace_back(
295 std::forward_as_tuple(dt_vec, supported_op_type));
296 }
297
298 /* static */ std::unordered_map<
299 string, std::vector<HexagonOpsDefinitions::DataTypeToOp>>
BuildOpNameToSocOpTypeMap()300 HexagonOpsDefinitions::BuildOpNameToSocOpTypeMap() {
301 std::unordered_map<string, std::vector<DataTypeToOp>> op_map;
302 // Custom Op name
303 EmplaceOpType("INPUT", {}, SupportedOpType::INPUT, &op_map);
304 EmplaceOpType("OUTPUT", {}, SupportedOpType::OUTPUT, &op_map);
305 EmplaceOpType("NoOp", {}, SupportedOpType::NOP, &op_map);
306 // Special op type for hexagon
307 EmplaceOpType("FLATTEN", {}, SupportedOpType::FLATTEN, &op_map);
308 // Tensorflow op name
309 // CAVEAT: Keep order of SupportedOpType
310 EmplaceOpType("Identity", {}, SupportedOpType::NOP, &op_map);
311 EmplaceOpType("Placeholder", {}, SupportedOpType::NOP, &op_map);
312 EmplaceOpType("Const", {}, SupportedOpType::OP_CONST, &op_map);
313 EmplaceOpType("QuantizedConv2D", {}, SupportedOpType::QUANTIZEDCONV2D_8X8TO32,
314 &op_map);
315 EmplaceOpType("QuantizedMatMul", {}, SupportedOpType::QUANTIZEDMATMUL_8X8TO32,
316 &op_map);
317 EmplaceOpType("QuantizeDownAndShrinkRange", {},
318 SupportedOpType::QUANTIZEDOWNANDSHRINKRANGE_32TO8, &op_map);
319 EmplaceOpType("QuantizedRelu", {}, SupportedOpType::QUANTIZEDRELU_8, &op_map);
320 EmplaceOpType("QuantizedReluX", {}, SupportedOpType::QUANTIZEDRELUX_8,
321 &op_map);
322 EmplaceOpType("QuantizedMaxPool", {}, SupportedOpType::QUANTIZEDMAXPOOL_8,
323 &op_map);
324 EmplaceOpType("QuantizedAvgPool", {}, SupportedOpType::QUANTIZEDAVGPOOL_8,
325 &op_map);
326 EmplaceOpType("QuantizedConcat", {}, SupportedOpType::QUANTIZEDCONCAT_8,
327 &op_map);
328 EmplaceOpType("QuantizedBiasAdd", {},
329 SupportedOpType::QUANTIZEDBIASADD_8P8TO32, &op_map);
330 EmplaceOpType("Min", {}, SupportedOpType::MIN_F, &op_map);
331 EmplaceOpType("Max", {}, SupportedOpType::MAX_F, &op_map);
332 EmplaceOpType("QuantizeV2", {}, SupportedOpType::QUANTIZE, &op_map);
333 EmplaceOpType("Dequantize", {}, SupportedOpType::DEQUANTIZE, &op_map);
334 EmplaceOpType("Softmax", {}, SupportedOpType::SOFTMAX_F, &op_map);
335 EmplaceOpType("Reshape", {}, SupportedOpType::RESHAPE, &op_map);
336 EmplaceOpType("QuantizedReshape", {}, SupportedOpType::QUANTIZED_RESHAPE,
337 &op_map);
338 EmplaceOpType("Sigmoid", {}, SupportedOpType::SIGMOID_F, &op_map);
339 EmplaceOpType("Slice", {}, SupportedOpType::SLICE_F, &op_map);
340 EmplaceOpType("Add", {}, SupportedOpType::ADD_F, &op_map);
341 EmplaceOpType("Mul", {}, SupportedOpType::MUL_F, &op_map);
342 EmplaceOpType("Requantize", {}, SupportedOpType::REQUANTIZE_32_TO_8, &op_map);
343 EmplaceOpType("RequantizationRange", {},
344 SupportedOpType::REQUANTIZATION_RANGE_32, &op_map);
345 EmplaceOpType("Sub", {}, SupportedOpType::SUB_F, &op_map);
346 EmplaceOpType("Pack", {}, SupportedOpType::PACK_INT32, &op_map);
347 EmplaceOpType("StridedSlice", {}, SupportedOpType::STRIDED_SLICE_F, &op_map);
348 EmplaceOpType("ExpandDims", {}, SupportedOpType::EXPAND_DIMS_F, &op_map);
349 #ifdef ENABLE_EXPERIMENTAL_HEXNN_OPS
350 EmplaceOpType("QuantizedMul", {}, SupportedOpType::QUANTIZED_MUL_8x8to32,
351 &op_map);
352 EmplaceOpType("QuantizedAdd", {}, SupportedOpType::QUANTIZED_ADD_8p8to32,
353 &op_map);
354 EmplaceOpType("Pad", {}, SupportedOpType::PAD_F, &op_map);
355 EmplaceOpType("SpaceToBatchND", {}, SupportedOpType::SPACE_TO_BATCH_ND_F,
356 &op_map),
357 EmplaceOpType("BatchToSpaceND", {}, SupportedOpType::BATCH_TO_SPACE_ND_F,
358 &op_map);
359 EmplaceOpType("ResizeBilinear", {}, SupportedOpType::RESIZE_BILINEAR_F,
360 &op_map);
361 EmplaceOpType("ConcatV2", {}, SupportedOpType::CONCAT_V2_F, &op_map);
362 EmplaceOpType("Conv2DBackpropInput", {}, SupportedOpType::DECONV_F, &op_map);
363
364 EmplaceOpType("Tanh", {}, SupportedOpType::TANH_F, &op_map);
365 EmplaceOpType("Split", {}, SupportedOpType::SPLIT_F, &op_map);
366 EmplaceOpType("Transpose", {}, SupportedOpType::TRANSPOSE_F, &op_map);
367 EmplaceOpType("Concat", {}, SupportedOpType::CONCAT_F, &op_map);
368 #endif
369 return op_map;
370 };
371
HexagonOpsDefinitions()372 HexagonOpsDefinitions::HexagonOpsDefinitions()
373 : op_name_to_soc_op_type_map_(BuildOpNameToSocOpTypeMap()) {}
374
375 /* static */ const IRemoteFusedGraphOpsDefinitions&
getInstance()376 HexagonOpsDefinitions::getInstance() {
377 const static HexagonOpsDefinitions instance{};
378 return instance;
379 }
380
GetTotalOpsCount() const381 int HexagonOpsDefinitions::GetTotalOpsCount() const {
382 return static_cast<int>(SupportedOpType::SUPPORTED_OP_TYPE_COUNT);
383 }
384
GetOpIdFor(const string & op_type,const DataTypeVector & dt_vec) const385 int HexagonOpsDefinitions::GetOpIdFor(const string& op_type,
386 const DataTypeVector& dt_vec) const {
387 if (op_name_to_soc_op_type_map_.count(op_type) > 0) {
388 const std::vector<DataTypeToOp>& dt_to_op_vec =
389 op_name_to_soc_op_type_map_.at(op_type);
390 CHECK(!dt_to_op_vec.empty());
391 // If argument DataType is empty, return the first entry.
392 if (dt_vec.empty()) {
393 return static_cast<int>(std::get<1>(dt_to_op_vec.front()));
394 }
395 // If there is only one op_id registered for empty op_vec, we assume
396 // that the op supports any data types.
397 if (dt_to_op_vec.size() == 1 && std::get<0>(dt_to_op_vec.front()).empty()) {
398 return static_cast<int>(std::get<1>(dt_to_op_vec.front()));
399 }
400 for (const DataTypeToOp& data_type_to_op : dt_to_op_vec) {
401 if (std::get<0>(data_type_to_op) == dt_vec) {
402 return static_cast<int>(std::get<1>(data_type_to_op));
403 }
404 }
405 }
406 return IRemoteFusedGraphOpsDefinitions::INVALID_OP_ID;
407 }
408 } // namespace tensorflow
409