• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_DELEGATES_HEXAGON_BUILDERS_OP_BUILDER_H_
16 #define TENSORFLOW_LITE_DELEGATES_HEXAGON_BUILDERS_OP_BUILDER_H_
17 
18 #include <limits>
19 #include <map>
20 #include <memory>
21 #include <string>
22 #include <utility>
23 #include <vector>
24 
25 #include "hexagon/hexagon_nn_ops.h"
26 #include "tensorflow/lite/builtin_ops.h"
27 #include "tensorflow/lite/c/builtin_op_data.h"
28 #include "tensorflow/lite/c/common.h"
29 #include "tensorflow/lite/delegates/hexagon/hexagon_implementation.h"
30 #include "tensorflow/lite/delegates/hexagon/hexagon_nn/hexagon_nn.h"
31 
32 namespace tflite {
33 namespace delegates {
34 namespace hexagon {
35 
36 // Wrapper that holds all data representing a single node in the Hexagon graph.
37 struct OpNode {
38   std::vector<hexagon_nn_input> inputs;
39   std::vector<hexagon_nn_output> outputs;
40   // Value from the Enum of Ops in hexagon_nn_ops
41   int op_type;
42   hexagon_nn_padding_type padding_type = NN_PAD_NA;
43   // Id of node in the Hexagon graph.
44   int node_id = -1;
45   // Index/ID of node in the tflite graph.
46   // This ID can be duplicate if one TFLite node creates multiple Hexagon op
47   // nodes.
48   int tflite_node_index = -1;
49 };
50 
51 class GraphBuilder;
52 
53 // Represents a single Op in the TFLite graph.
54 // For each op in TFLite there should be an OpBuidler, this builder is
55 // responsible for constructing equivalent node(s) in the hexagon graph. A
56 // single builder can create one or more ops in the hexagon graph. When adding
57 // new op* users should inherit from this class and implement
58 // - PopulateSubgraph: which given inputs/outputs should construct the
59 // equivalent hexagon nodes.
60 // - RegisterOutputs: Which should have logic that maps final outputs from a
61 // given node to the equivalent in Hexagon graph.
62 class OpBuilder {
63  public:
64   // Const representing the shape of a scalar value.
65   static constexpr int kScalarShape[] = {1, 1, 1, 1};
66 
OpBuilder(GraphBuilder * graph_builder,int hexagon_op_type)67   OpBuilder(GraphBuilder* graph_builder, int hexagon_op_type)
68       : graph_builder_(graph_builder) {
69     op_node_.op_type = hexagon_op_type;
70   }
71   // A tensor is identified in the graph using a pair of IDs
72   // (Node ID, output Tensor ID)
73   // Node producing this tensor, and the index of the tensor in this
74   // node output list.
75   using TensorID = std::pair<int, int>;
76 
~OpBuilder()77   virtual ~OpBuilder() {}
78 
79   // Sets the op type in the hexagon graph.
SetOpType(int op_type)80   void SetOpType(int op_type) { op_node_.op_type = op_type; }
81 
82   // Sets the node id in the hexagon graph.
SetNodeId(int node_id)83   void SetNodeId(int node_id) { op_node_.node_id = node_id; }
84 
85   // Sets the TfLite node index in the TfLite graph.
SetTFLiteNodeId(int node_index)86   void SetTFLiteNodeId(int node_index) {
87     op_node_.tflite_node_index = node_index;
88   }
89 
90   // Marks this node as Const node.
SetConstNode()91   void SetConstNode() { op_node_.op_type = OP_Const; }
92 
93   // Sets the padding type of the current node.
SetPaddingType(hexagon_nn_padding_type padding_type)94   void SetPaddingType(hexagon_nn_padding_type padding_type) {
95     op_node_.padding_type = padding_type;
96   }
97 
98   // Sets the builtin_data of TFLite node that this Builder is responsible for.
SetBuiltinData(void * builtin_data)99   void SetBuiltinData(void* builtin_data) { builtin_data_ = builtin_data; }
100 
101   // Returns true if the current op is a const Op.
IsConstNode()102   bool IsConstNode() const { return op_node_.op_type == OP_Const; }
103 
104   // Subclasses should override it and have logic which handles initializing
105   // hexagon node(s) for the current op, given 'inputs' 'outputs'
PopulateSubGraph(const TfLiteIntArray * inputs,const TfLiteIntArray * outputs,TfLiteContext * context)106   virtual TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
107                                         const TfLiteIntArray* outputs,
108                                         TfLiteContext* context) {
109     return kTfLiteOk;
110   }
111 
112   // Subclasses should override it and register the final output(s) from the
113   // node to the equivalent in hexagon graph.
RegisterOutputs(const TfLiteIntArray * outputs,TfLiteContext * context)114   virtual TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
115                                        TfLiteContext* context) {
116     return kTfLiteOk;
117   }
118 
119   // Constructs OpNode which represents a node in the Hexagon graph.
120   const OpNode* Build();
121 
122   // Returns the Node index in TFLite graph.
GetTFLiteNodeID()123   int GetTFLiteNodeID() const { return op_node_.tflite_node_index; }
124 
125   // Returns the Op type of the current Op (in Hexagon graph)
GetOpType()126   int GetOpType() const { return op_node_.op_type; }
127 
128   // Returns the node id in the hexagon graph.
GetID()129   int GetID() const { return op_node_.node_id; }
130 
131   // Adds tensor identified by 'tensor_id' as input to the current Op.
AddInput(const TensorID & tensor_id)132   void AddInput(const TensorID& tensor_id) { input_ids_.push_back(tensor_id); }
133 
134   // Adds Output to the current node, the output has shape defined in 'dims'.
135   // The size of each element is defined using 'element_size'.
136   // Returns the TensorID identifying this output in the graph.
137   TensorID AddOutput(const TfLiteIntArray* dims, int element_size);
138 
139   // Adds Output to the current node, each element in the output has
140   // size 'elementsize' and rank 'rank' and for each dimension in the output
141   // the maximum size is max_sizes[i].
142   // Returns the TensorID identifying this output in the graph.
143   TensorID AddOutput(int elementsize, int rank,
144                      const std::vector<int>& max_sizes);
145 
146   // Same as above but accepts pointer instead of std::vector.
147   TensorID AddOutput(int elementsize, int rank, const int* max_sizes_vect);
148 
149   // Sets the node that corresponds to this builder in TFLite graph.
SetTfLiteNode(const TfLiteNode * node)150   void SetTfLiteNode(const TfLiteNode* node) { tflite_node_ = node; }
151 
152   // Static
153   // Computes the min/max values of 'tensor' and sets the values in
154   // the out params 'min' and 'max'.
155   // Returns kTfLiteOk on success.
ComputeMinAndMaxQuantValues(const TfLiteTensor & tensor,float * min,float * max)156   static TfLiteStatus ComputeMinAndMaxQuantValues(const TfLiteTensor& tensor,
157                                                   float* min, float* max) {
158     if (tensor.type == kTfLiteUInt8) {
159       return ComputeMinAndMaxQuantValues(tensor, min, max,
160                                          std::numeric_limits<uint8_t>::min(),
161                                          std::numeric_limits<uint8_t>::max());
162     } else if (tensor.type == kTfLiteInt8) {
163       return ComputeMinAndMaxQuantValues(tensor, min, max,
164                                          std::numeric_limits<int8_t>::min(),
165                                          std::numeric_limits<int8_t>::max());
166     } else if (tensor.type == kTfLiteInt32) {
167       return ComputeMinAndMaxQuantValues(tensor, min, max,
168                                          std::numeric_limits<int>::min(),
169                                          std::numeric_limits<int>::max());
170     }
171     return kTfLiteError;
172   }
173 
174  protected:
175   // Helper method to fetch dimensions.
176   // TODO(karimnosseir): Move to a shared place.
GetDims(int * batch_size,int * height_size,int * width_size,int * depth_size,const TfLiteIntArray * dims)177   void GetDims(int* batch_size, int* height_size, int* width_size,
178                int* depth_size, const TfLiteIntArray* dims) {
179     int* dim[] = {batch_size, height_size, width_size, depth_size};
180     for (int i = 0; i < 4; ++i) *(dim[i]) = 1;
181     for (int i = 4 - dims->size; i < 4; ++i) {
182       *dim[i] = dims->data[i - (4 - dims->size)];
183     }
184   }
185 
186   // Computes the min and max for 'tensor' and adds them as input
187   // to the node.
188   TfLiteStatus ComputeAndAddMinAndMax(TfLiteContext* context,
189                                       const TfLiteTensor& tensor);
190 
191   // Computes the float min and max for 'tensor', given 'min_value' and
192   // 'max_value' data range. The float min and max will be set in 'min' and
193   // 'max' params
194   template <typename T>
ComputeMinAndMaxQuantValues(const TfLiteTensor & tensor,float * min,float * max,T min_value,T max_value)195   static TfLiteStatus ComputeMinAndMaxQuantValues(const TfLiteTensor& tensor,
196                                                   float* min, float* max,
197                                                   T min_value, T max_value) {
198     *min = 0;
199     *max = 0;
200     const TfLiteQuantization& quant = tensor.quantization;
201     if (quant.type != TfLiteQuantizationType::kTfLiteAffineQuantization) {
202       printf("Tensor not quantized: %s\n", tensor.name);
203       return kTfLiteError;
204     }
205     const TfLiteAffineQuantization* params =
206         static_cast<const TfLiteAffineQuantization*>(quant.params);
207     float scale = params->scale->data[0];
208     float zero_point = static_cast<float>(params->zero_point->data[0]);
209     *min = scale * (static_cast<float>(min_value) - zero_point);
210     *max = scale * (static_cast<float>(max_value) - zero_point);
211 
212     return kTfLiteOk;
213   }
214 
215   OpNode op_node_;
216   // inputs to the current op. Each pair identifies a single output from
217   // another node (node_id, output_id).
218   std::vector<TensorID> input_ids_;
219   // Pointer to the graph builder.
220   GraphBuilder* graph_builder_ = nullptr;
221   // Data needed by this node.
222   void* builtin_data_ = nullptr;
223   // TODO(karimnosseir): Currently we only use it for getting output
224   // size. Can we avoid passing it ?
225   const TfLiteNode* tflite_node_ = nullptr;
226 };
227 
228 class GraphBuilder {
229  public:
GraphBuilder(const HexagonNN * hexagon_nn,TfLiteContext * context,int graph_id)230   GraphBuilder(const HexagonNN* hexagon_nn, TfLiteContext* context,
231                int graph_id)
232       : hexagon_nn_(hexagon_nn), context_(context), graph_id_(graph_id) {}
233 
234   // Returns per OP builder. 'op_type' is the TfLite builtinOperator.
235   OpBuilder* AddNodeFromTfLiteOp(int op_type, TfLiteNode* node,
236                                  int tflite_node_index);
237 
238   // Add node to the graph. The caller responsible for setting correct
239   // data in the Op.
240   // 'tflite_node_index' is the node index in TFLite that creates this op.
241   OpBuilder* AddNode(int tflite_node_index = -1);
242 
243   // Add const node that provides the data held by 'tensor'.
244   // If `int8_to_uint8` is true, then the data will be casted to uint8 from
245   // int8.
246   OpBuilder* AddConstNodeWithData(int tensor_id, const TfLiteTensor& tensor,
247                                   bool int8_to_uint8 = false);
248 
249   // Same as above but takes shape of the tensor that will holds the data.
250   OpBuilder* AddConstNodeWithData(const int shape[], char* data, int data_size);
251 
252   OpBuilder* CreateOpBuilderFromTfLiteOp(int op_type, TfLiteNode* node);
253 
254   // Construct Input node with 'input_tensors' as output.
255   TfLiteStatus AddInputTensors(const TfLiteIntArray* input_tensors,
256                                TfLiteContext* context);
257 
258   // Construct Output node with 'output_tensors' as input.
259   TfLiteStatus AddOutputTensors(const TfLiteIntArray* output_tensors,
260                                 TfLiteContext* context);
261 
262   // Adds BatchSeqConfig node to the graph. This is configuration
263   // for a dynamic batch size for the graph.
264   // A graph can have only one node of this type.
265   void AddBatchSeqConfig(int max_size_for_batch,
266                          TfLiteIntArray* input_batch_dimensions,
267                          TfLiteIntArray* output_batch_dimensions);
268 
269   // Returns tensor id inside Hexagon graph.
GetHexagonTensorId(int tflite_tensor_index)270   OpBuilder::TensorID GetHexagonTensorId(int tflite_tensor_index) {
271     if (!HasTensor(tflite_tensor_index)) {
272       // Return invalid ID.
273       return OpBuilder::TensorID(-1, -1);
274     }
275     return tensors_[tflite_tensor_index];
276   }
277 
278   // Return true if this tensor was added before to the graph.
HasTensor(int tflite_tensor_index)279   bool HasTensor(int tflite_tensor_index) {
280     if (tensors_.size() <= tflite_tensor_index) {
281       return false;
282     }
283     // the first field is node ID and id = 0 is reserved
284     // so anything > 0 is correctly initialized.
285     return tensors_[tflite_tensor_index].first != 0;
286   }
287 
AddDebugNode()288   void AddDebugNode() {}
289 
Build()290   void Build() {
291     for (int i = 0; i < builders_.size(); ++i) {
292       if (builders_[i]->IsConstNode()) {
293         continue;
294       }
295       const OpNode* op_node = builders_[i]->Build();
296       int error = hexagon_nn_->hexagon_nn_append_node(
297           graph_id_, op_node->node_id, op_node->op_type, op_node->padding_type,
298           op_node->inputs.data(), op_node->inputs.size(),
299           op_node->outputs.data(), op_node->outputs.size());
300       if (error != 0) {
301         printf("Error adding node: id:%d, op_type:%d\n", op_node->node_id,
302                op_node->op_type);
303       }
304     }
305   }
306 
print()307   void print() {
308     printf("------------------------------\n");
309     std::vector<unsigned char> buf(10000);
310     hexagon_nn_->hexagon_nn_snpprint(graph_id_, buf.data(), buf.size());
311     printf("%s", buf.data());
312     printf("------------------------------\n");
313     fflush(stdout);
314   }
315 
316   // Add new tensor mapping to the tensor list.
317   bool AddTensorWithID(int tflite_tensor_id, int hexagon_node_id,
318                        int hexagon_node_output_id, bool overwrite = false) {
319     if (!overwrite && HasTensor(tflite_tensor_id)) {
320       TF_LITE_KERNEL_LOG(
321           context_,
322           "Trying to add duplicate tensor without overwrite, tflite_tensor_id "
323           "%d, hexagon_node_id %d, hexagon_node_output_id %d",
324           tflite_tensor_id, hexagon_node_id, hexagon_node_output_id);
325       return false;
326     }
327     if (tensors_.size() <= tflite_tensor_id) {
328       tensors_.resize(tflite_tensor_id + 1);
329     }
330     if (hexagon_node_id == -1 || hexagon_node_output_id == -1)
331       TF_LITE_KERNEL_LOG(context_,
332                          "Trying to add invalid id, tflite_tensor_id "
333                          "%d, hexagon_node_id %d, hexagon_node_output_id %d",
334                          tflite_tensor_id, hexagon_node_id,
335                          hexagon_node_output_id);
336     tensors_[tflite_tensor_id] =
337         OpBuilder::TensorID(hexagon_node_id, hexagon_node_output_id);
338     return true;
339   }
340 
GetOpTypeId(int node_id)341   int GetOpTypeId(int node_id) {
342     if (node_id > builders_.size()) {
343       return -1;
344     }
345     return builders_[node_id - 1]->GetOpType();
346   }
347 
GetTFLiteNodeID(int node_id)348   int GetTFLiteNodeID(int node_id) const {
349     if (node_id > builders_.size()) {
350       return -1;
351     }
352     return builders_[node_id - 1]->GetTFLiteNodeID();
353   }
354 
355   // Returns true if the graph supports dynamic batch. False otherwise.
GraphHasDynamicBatch()356   bool GraphHasDynamicBatch() const { return max_size_for_batch_ != -1; }
357 
358   // Returns the maximum value for batch dimension the graph supports.
359   // -1 if the graph doesn't support dynamic batch.
GetMaxBatchSize()360   int GetMaxBatchSize() const { return max_size_for_batch_; }
361 
362  private:
363   // Lookup in cache if data with key 'cache_key' is present.
364   // Return OpBuilder* for the data if found, nullptr otherwise.
365   OpBuilder* LookupConstData(uint64_t cache_key);
366 
367   // Inserts 'value' in cache, with key equals 'cache_key'.
368   // If data in cache with same key then it will be overwritten.
369   void AddToCache(uint64_t cache_key, OpBuilder* value);
370 
371   // Helper method to fetch dimensions.
372   // TODO(karimnosseir): Move this method to shared place.
GetDims(int * batch_size,int * height_size,int * width_size,int * depth_size,const TfLiteIntArray * dims)373   void GetDims(int* batch_size, int* height_size, int* width_size,
374                int* depth_size, const TfLiteIntArray* dims) {
375     int* dim[] = {batch_size, height_size, width_size, depth_size};
376     for (int i = 0; i < 4; ++i) *(dim[i]) = 1;
377     for (int i = 4 - dims->size; i < 4; ++i) {
378       *dim[i] = dims->data[i - (4 - dims->size)];
379     }
380   }
381 
382   // Adds a Cast op to convert a tensor from int8 to uint8 (or vice versa).
383   // The builder which has the casting operator is filled in 'cast_op_builder'
384   // if not nullptr.
385   TfLiteStatus AddCastOp(TfLiteContext* context, int op_type, int tensor_id,
386                          OpBuilder** cast_op_builder);
387 
388   const HexagonNN* hexagon_nn_ = nullptr;
389   TfLiteContext* context_ = nullptr;
390   int graph_id_ = -1;
391   std::vector<std::unique_ptr<OpBuilder>> builders_;
392   // Index in the vector is the tflite_tensor_index, the value
393   // is the ID in the hexgon graph.
394   std::vector<OpBuilder::TensorID> tensors_;
395 
396   // If the graph being built supports dynamic batch, this represents
397   // the maximum value for batch.
398   int max_size_for_batch_ = -1;
399 
400   // Cache for const data in the graph.
401   // Key is hash of the data, value is pointer to the OpBuilder* for the added
402   // data.
403   std::map<uint64_t, OpBuilder*> cache_;
404 };
405 
406 }  // namespace hexagon
407 }  // namespace delegates
408 }  // namespace tflite
409 
410 #endif  // TENSORFLOW_LITE_DELEGATES_HEXAGON_BUILDERS_OP_BUILDER_H_
411