• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 // This file defines common C types and APIs for implementing operations,
17 // delegates and other constructs in TensorFlow Lite. The actual operations and
18 // delegtes can be defined using C++, but the interface between the interpreter
19 // and the operations are C.
20 //
21 // Summary of abstractions
22 // TF_LITE_ENSURE - Self-sufficient error checking
23 // TfLiteStatus - Status reporting
24 // TfLiteIntArray - stores tensor shapes (dims),
25 // TfLiteContext - allows an op to access the tensors
26 // TfLiteTensor - tensor (a multidimensional array)
27 // TfLiteNode - a single node or operation
28 // TfLiteRegistration - the implementation of a conceptual operation.
29 // TfLiteDelegate - allows delegation of nodes to alternative backends.
30 //
31 // Some abstractions in this file are created and managed by Interpreter.
32 
33 #ifndef TENSORFLOW_LITE_C_COMMON_H_
34 #define TENSORFLOW_LITE_C_COMMON_H_
35 
36 #include <stdbool.h>
37 #include <stddef.h>
38 #include <stdint.h>
39 
40 #ifdef __cplusplus
41 extern "C" {
42 #endif  // __cplusplus
43 
44 typedef enum TfLiteStatus { kTfLiteOk = 0, kTfLiteError = 1 } TfLiteStatus;
45 
46 // The list of external context types known to TF Lite. This list exists solely
47 // to avoid conflicts and to ensure ops can share the external contexts they
48 // need. Access to the external contexts is controled by one of the
49 // corresponding support files.
50 typedef enum TfLiteExternalContextType {
51   kTfLiteEigenContext = 0,       // include eigen_support.h to use.
52   kTfLiteGemmLowpContext = 1,    // include gemm_support.h to use.
53   kTfLiteEdgeTpuContext = 2,     // Placeholder for Edge TPU support.
54   kTfLiteCpuBackendContext = 3,  // include cpu_backend_support.h to use.
55   kTfLiteMaxExternalContexts = 4
56 } TfLiteExternalContextType;
57 
58 // Forward declare so dependent structs and methods can reference these types
59 // prior to the struct definitions.
60 struct TfLiteContext;
61 struct TfLiteDelegate;
62 struct TfLiteRegistration;
63 
64 // An external context is a collection of information unrelated to the TF Lite
65 // framework, but useful to a subset of the ops. TF Lite knows very little
66 // about about the actual contexts, but it keeps a list of them, and is able to
67 // refresh them if configurations like the number of recommended threads
68 // change.
69 typedef struct TfLiteExternalContext {
70   TfLiteExternalContextType type;
71   TfLiteStatus (*Refresh)(struct TfLiteContext* context);
72 } TfLiteExternalContext;
73 
74 #define kTfLiteOptionalTensor (-1)
75 
76 // Fixed size list of integers. Used for dimensions and inputs/outputs tensor
77 // indices
78 typedef struct TfLiteIntArray {
79   int size;
80 // gcc 6.1+ have a bug where flexible members aren't properly handled
81 // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
82 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
83     __GNUC_MINOR__ >= 1
84   int data[0];
85 #else
86   int data[];
87 #endif
88 } TfLiteIntArray;
89 
90 // Given the size (number of elements) in a TfLiteIntArray, calculate its size
91 // in bytes.
92 int TfLiteIntArrayGetSizeInBytes(int size);
93 
94 #ifndef TF_LITE_STATIC_MEMORY
95 // Create a array of a given `size` (uninitialized entries).
96 // This returns a pointer, that you must free using TfLiteIntArrayFree().
97 TfLiteIntArray* TfLiteIntArrayCreate(int size);
98 #endif
99 
100 // Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise.
101 int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b);
102 
103 // Check if an intarray equals an array. Returns 1 if equals, 0 otherwise.
104 int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
105                               const int b_data[]);
106 
107 #ifndef TF_LITE_STATIC_MEMORY
108 // Create a copy of an array passed as `src`.
109 // You are expected to free memory with TfLiteIntArrayFree
110 TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src);
111 
112 // Free memory of array `a`.
113 void TfLiteIntArrayFree(TfLiteIntArray* a);
114 #endif  // TF_LITE_STATIC_MEMORY
115 
116 // Fixed size list of floats. Used for per-channel quantization.
117 typedef struct TfLiteFloatArray {
118   int size;
119 // gcc 6.1+ have a bug where flexible members aren't properly handled
120 // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
121 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
122     __GNUC_MINOR__ >= 1
123   float data[0];
124 #else
125   float data[];
126 #endif
127 } TfLiteFloatArray;
128 
129 // Given the size (number of elements) in a TfLiteFloatArray, calculate its size
130 // in bytes.
131 int TfLiteFloatArrayGetSizeInBytes(int size);
132 
133 #ifndef TF_LITE_STATIC_MEMORY
134 // Create a array of a given `size` (uninitialized entries).
135 // This returns a pointer, that you must free using TfLiteFloatArrayFree().
136 TfLiteFloatArray* TfLiteFloatArrayCreate(int size);
137 
138 // Free memory of array `a`.
139 void TfLiteFloatArrayFree(TfLiteFloatArray* a);
140 #endif  // TF_LITE_STATIC_MEMORY
141 
142 // Since we must not depend on any libraries, define a minimal subset of
143 // error macros while avoiding names that have pre-conceived meanings like
144 // assert and check.
145 
146 // Check whether value is true, and if not return kTfLiteError from
147 // the current function (and report the error string msg).
148 #define TF_LITE_ENSURE_MSG(context, value, msg)            \
149   do {                                                     \
150     if (!(value)) {                                        \
151       (context)->ReportError((context), __FILE__ " " msg); \
152       return kTfLiteError;                                 \
153     }                                                      \
154   } while (0)
155 
156 // Check whether the value `a` is true, and if not return kTfLiteError from
157 // the current function, while also reporting the location of the error.
158 #define TF_LITE_ENSURE(context, a)                                          \
159   do {                                                                      \
160     if (!(a)) {                                                             \
161       (context)->ReportError((context), "%s:%d %s was not true.", __FILE__, \
162                              __LINE__, #a);                                 \
163       return kTfLiteError;                                                  \
164     }                                                                       \
165   } while (0)
166 
167 #define TF_LITE_ENSURE_STATUS(a) \
168   do {                           \
169     if ((a) != kTfLiteOk) {      \
170       return kTfLiteError;       \
171     }                            \
172   } while (0)
173 
174 // Check whether the value `a == b` is true, and if not return kTfLiteError from
175 // the current function, while also reporting the location of the error.
176 // `a` and `b` may be evaluated more than once, so no side effects or
177 // extremely expensive computations should be done.
178 #define TF_LITE_ENSURE_EQ(context, a, b)                                       \
179   do {                                                                         \
180     if ((a) != (b)) {                                                          \
181       (context)->ReportError((context), "%s:%d %s != %s (%d != %d)", __FILE__, \
182                              __LINE__, #a, #b, (a), (b));                      \
183       return kTfLiteError;                                                     \
184     }                                                                          \
185   } while (0)
186 
187 #define TF_LITE_ENSURE_TYPES_EQ(context, a, b)                                 \
188   do {                                                                         \
189     if ((a) != (b)) {                                                          \
190       (context)->ReportError((context), "%s:%d %s != %s (%s != %s)", __FILE__, \
191                              __LINE__, #a, #b, TfLiteTypeGetName(a),           \
192                              TfLiteTypeGetName(b));                            \
193       return kTfLiteError;                                                     \
194     }                                                                          \
195   } while (0)
196 
197 #define TF_LITE_ENSURE_OK(context, status) \
198   do {                                     \
199     if ((status) != kTfLiteOk) {           \
200       return kTfLiteError;                 \
201     }                                      \
202   } while (0)
203 
204 // Single-precision complex data type compatible with the C99 definition.
205 typedef struct TfLiteComplex64 {
206   float re, im;  // real and imaginary parts, respectively.
207 } TfLiteComplex64;
208 
209 // Half precision data type compatible with the C99 definition.
210 typedef struct TfLiteFloat16 {
211   uint16_t data;
212 } TfLiteFloat16;
213 
214 // Types supported by tensor
215 typedef enum {
216   kTfLiteNoType = 0,
217   kTfLiteFloat32 = 1,
218   kTfLiteInt32 = 2,
219   kTfLiteUInt8 = 3,
220   kTfLiteInt64 = 4,
221   kTfLiteString = 5,
222   kTfLiteBool = 6,
223   kTfLiteInt16 = 7,
224   kTfLiteComplex64 = 8,
225   kTfLiteInt8 = 9,
226   kTfLiteFloat16 = 10,
227 } TfLiteType;
228 
229 // Return the name of a given type, for error reporting purposes.
230 const char* TfLiteTypeGetName(TfLiteType type);
231 
232 // SupportedQuantizationTypes.
233 typedef enum TfLiteQuantizationType {
234   // No quantization.
235   kTfLiteNoQuantization = 0,
236   // Affine quantization (with support for per-channel quantization).
237   // Corresponds to TfLiteAffineQuantization.
238   kTfLiteAffineQuantization = 1,
239 } TfLiteQuantizationType;
240 
241 // Structure specifying the quantization used by the tensor, if-any.
242 typedef struct TfLiteQuantization {
243   // The type of quantization held by params.
244   TfLiteQuantizationType type;
245   // Holds a reference to one of the quantization param structures specified
246   // below.
247   void* params;
248 } TfLiteQuantization;
249 
250 // Legacy. Will be deprecated in favor of TfLiteAffineQuantization.
251 // If per-layer quantization is specified this field will still be populated in
252 // addition to TfLiteAffineQuantization.
253 // Parameters for asymmetric quantization. Quantized values can be converted
254 // back to float using:
255 //     real_value = scale * (quantized_value - zero_point)
256 typedef struct TfLiteQuantizationParams {
257   float scale;
258   int32_t zero_point;
259 } TfLiteQuantizationParams;
260 
261 // Parameters for asymmetric quantization across a dimension (i.e per output
262 // channel quantization).
263 // quantized_dimension specifies which dimension the scales and zero_points
264 // correspond to.
265 // For a particular value in quantized_dimension, quantized values can be
266 // converted back to float using:
267 //     real_value = scale * (quantized_value - zero_point)
268 typedef struct TfLiteAffineQuantization {
269   TfLiteFloatArray* scale;
270   TfLiteIntArray* zero_point;
271   int32_t quantized_dimension;
272 } TfLiteAffineQuantization;
273 
274 /* A union of pointers that points to memory for a given tensor. */
275 typedef union TfLitePtrUnion {
276   /* Do not access these members directly, if possible, use
277    * GetTensorData<TYPE>(tensor) instead, otherwise only access .data, as other
278    * members are deprecated. */
279   int32_t* i32;
280   int64_t* i64;
281   float* f;
282   TfLiteFloat16* f16;
283   char* raw;
284   const char* raw_const;
285   uint8_t* uint8;
286   bool* b;
287   int16_t* i16;
288   TfLiteComplex64* c64;
289   int8_t* int8;
290   /* Only use this member. */
291   void* data;
292 } TfLitePtrUnion;
293 
294 // Memory allocation strategies. kTfLiteMmapRo is for read-only memory-mapped
295 // data (or data externally allocated). kTfLiteArenaRw is arena allocated
296 // data. kTfLiteDynamic is for tensors that are allocated during evaluation.
297 typedef enum TfLiteAllocationType {
298   kTfLiteMemNone = 0,
299   kTfLiteMmapRo,
300   kTfLiteArenaRw,
301   kTfLiteArenaRwPersistent,
302   kTfLiteDynamic,
303 } TfLiteAllocationType;
304 
305 // The delegates should use zero or positive integers to represent handles.
306 // -1 is reserved from unallocated status.
307 typedef int TfLiteBufferHandle;
308 enum {
309   kTfLiteNullBufferHandle = -1,
310 };
311 
312 // Storage format of each dimension in a sparse tensor.
313 typedef enum TfLiteDimensionType {
314   kTfLiteDimDense = 0,
315   kTfLiteDimSparseCSR,
316 } TfLiteDimensionType;
317 
318 // Metadata to encode each dimension in a sparse tensor.
319 typedef struct TfLiteDimensionMetadata {
320   TfLiteDimensionType format;
321   int dense_size;
322   TfLiteIntArray* array_segments;
323   TfLiteIntArray* array_indices;
324 } TfLiteDimensionMetadata;
325 
326 // Parameters used to encode a sparse tensor. For detailed explanation of each
327 // field please refer to lite/schema/schema.fbs.
328 typedef struct TfLiteSparsity {
329   TfLiteIntArray* traversal_order;
330   TfLiteIntArray* block_map;
331   TfLiteDimensionMetadata* dim_metadata;
332   int dim_metadata_size;
333 } TfLiteSparsity;
334 
335 // An tensor in the interpreter system which is a wrapper around a buffer of
336 // data including a dimensionality (or NULL if not currently defined).
337 typedef struct TfLiteTensor {
338   // The data type specification for data stored in `data`. This affects
339   // what member of `data` union should be used.
340   TfLiteType type;
341   // A union of data pointers. The appropriate type should be used for a typed
342   // tensor based on `type`.
343   TfLitePtrUnion data;
344   // A pointer to a structure representing the dimensionality interpretation
345   // that the buffer should have. NOTE: the product of elements of `dims`
346   // and the element datatype size should be equal to `bytes` below.
347   TfLiteIntArray* dims;
348   // Quantization information.
349   TfLiteQuantizationParams params;
350   // How memory is mapped
351   //  kTfLiteMmapRo: Memory mapped read only.
352   //  i.e. weights
353   //  kTfLiteArenaRw: Arena allocated read write memory
354   //  (i.e. temporaries, outputs).
355   TfLiteAllocationType allocation_type;
356   // The number of bytes required to store the data of this Tensor. I.e.
357   // (bytes of each element) * dims[0] * ... * dims[n-1].  For example, if
358   // type is kTfLiteFloat32 and dims = {3, 2} then
359   // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
360   size_t bytes;
361 
362   // An opaque pointer to a tflite::MMapAllocation
363   const void* allocation;
364 
365   // Null-terminated name of this tensor.
366   const char* name;
367 
368   // The delegate which knows how to handle `buffer_handle`.
369   // WARNING: This is an experimental interface that is subject to change.
370   struct TfLiteDelegate* delegate;
371 
372   // An integer buffer handle that can be handled by `delegate`.
373   // The value is valid only when delegate is not null.
374   // WARNING: This is an experimental interface that is subject to change.
375   TfLiteBufferHandle buffer_handle;
376 
377   // If the delegate uses its own buffer (e.g. GPU memory), the delegate is
378   // responsible to set data_is_stale to true.
379   // `delegate->CopyFromBufferHandle` can be called to copy the data from
380   // delegate buffer.
381   // WARNING: This is an // experimental interface that is subject to change.
382   bool data_is_stale;
383 
384   // True if the tensor is a variable.
385   bool is_variable;
386 
387   // Quantization information. Replaces params field above.
388   TfLiteQuantization quantization;
389 
390   // Parameters used to encode a sparse tensor.
391   // This is optional. The field is NULL if a tensor is dense.
392   // WARNING: This is an experimental interface that is subject to change.
393   TfLiteSparsity* sparsity;
394 
395   // Optional. Encodes shapes with unknown dimensions with -1. This field is
396   // only populated when unknown dimensions exist in a read-write tensor (i.e.
397   // an input or output tensor). (e.g.  `dims` contains [1, 1, 1, 3] and
398   // `dims_signature` contains [1, -1, -1, 3]).
399   const TfLiteIntArray* dims_signature;
400 } TfLiteTensor;
401 
402 #ifndef TF_LITE_STATIC_MEMORY
403 // Free data memory of tensor `t`.
404 void TfLiteTensorDataFree(TfLiteTensor* t);
405 
406 // Free quantization data.
407 void TfLiteQuantizationFree(TfLiteQuantization* quantization);
408 
409 // Free sparsity parameters.
410 void TfLiteSparsityFree(TfLiteSparsity* sparsity);
411 
412 // Free memory of tensor `t`.
413 void TfLiteTensorFree(TfLiteTensor* t);
414 
415 // Set all of a tensor's fields (and free any previously allocated data).
416 void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
417                        TfLiteQuantizationParams quantization, char* buffer,
418                        size_t size, TfLiteAllocationType allocation_type,
419                        const void* allocation, bool is_variable,
420                        TfLiteTensor* tensor);
421 
422 // Resize the allocated data of a (dynamic) tensor. Tensors with allocation
423 // types other than kTfLiteDynamic will be ignored.
424 void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
425 #endif  // TF_LITE_STATIC_MEMORY
426 
427 // A structure representing an instance of a node.
428 // This structure only exhibits the inputs, outputs and user defined data, not
429 // other features like the type.
430 typedef struct TfLiteNode {
431   // Inputs to this node expressed as indices into the simulator's tensors.
432   TfLiteIntArray* inputs;
433 
434   // Outputs to this node expressed as indices into the simulator's tensors.
435   TfLiteIntArray* outputs;
436 
437   // intermediate tensors to this node expressed as indices into the simulator's
438   // tensors.
439   TfLiteIntArray* intermediates;
440 
441   // Temporary tensors uses during the computations. This usually contains no
442   // tensors, but ops are allowed to change that if they need scratch space of
443   // any sort.
444   TfLiteIntArray* temporaries;
445 
446   // Opaque data provided by the node implementer through `Registration.init`.
447   void* user_data;
448 
449   // Opaque data provided to the node if the node is a builtin. This is usually
450   // a structure defined in builtin_op_data.h
451   void* builtin_data;
452 
453   // Custom initial data. This is the opaque data provided in the flatbuffer.
454   // WARNING: This is an experimental interface that is subject to change.
455   const void* custom_initial_data;
456   int custom_initial_data_size;
457 
458   // The pointer to the delegate. This is non-null only when the node is
459   // created by calling `interpreter.ModifyGraphWithDelegate`.
460   // WARNING: This is an experimental interface that is subject to change.
461   struct TfLiteDelegate* delegate;
462 } TfLiteNode;
463 
464 // WARNING: This is an experimental interface that is subject to change.
465 //
466 // Currently, TfLiteDelegateParams has to be allocated in a way that it's
467 // trivially destructable. It will be stored as `builtin_data` field in
468 // `TfLiteNode` of the delegate node.
469 //
470 // See also the `CreateDelegateParams` function in `interpreter.cc` details.
471 typedef struct TfLiteDelegateParams {
472   struct TfLiteDelegate* delegate;
473   TfLiteIntArray* nodes_to_replace;
474   TfLiteIntArray* input_tensors;
475   TfLiteIntArray* output_tensors;
476 } TfLiteDelegateParams;
477 
478 typedef struct TfLiteContext {
479   // Number of tensors in the context.
480   size_t tensors_size;
481 
482   // The execution plan contains a list of the node indices in execution
483   // order. execution_plan->size is the current number of nodes. And,
484   // execution_plan->data[0] is the first node that needs to be run.
485   // TfLiteDelegates can traverse the current execution plan by iterating
486   // through each member of this array and using GetNodeAndRegistration() to
487   // access details about a node. i.e.
488   // TfLiteIntArray* execution_plan;
489   // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
490   // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
491   //    int node_index = execution_plan->data[exec_index];
492   //    TfLiteNode* node;
493   //    TfLiteRegistration* reg;
494   //    context->GetNodeAndRegistration(context, node_index, &node, &reg);
495   // }
496   // WARNING: This is an experimental interface that is subject to change.
497   TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
498                                    TfLiteIntArray** execution_plan);
499 
500   // An array of tensors in the interpreter context (of length `tensors_size`)
501   TfLiteTensor* tensors;
502 
503   // opaque full context ptr (an opaque c++ data structure)
504   void* impl_;
505 
506   // Request memory pointer be resized. Updates dimensions on the tensor.
507   // NOTE: ResizeTensor takes ownership of newSize.
508   TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor,
509                                TfLiteIntArray* new_size);
510   // Request that an error be reported with format string msg.
511   void (*ReportError)(struct TfLiteContext*, const char* msg, ...);
512 
513   // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries.  If
514   // non-null, the value pointed to by `first_new_tensor_index` will be set to
515   // the index of the first new tensor.
516   TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add,
517                              int* first_new_tensor_index);
518 
519   // Get a Tensor node by node_index.
520   // WARNING: This is an experimental interface that is subject to change.
521   TfLiteStatus (*GetNodeAndRegistration)(
522       struct TfLiteContext*, int node_index, TfLiteNode** node,
523       struct TfLiteRegistration** registration);
524 
525   // Replace ops with one or more stub delegate operations. This function
526   // does not take ownership of `nodes_to_replace`.
527   TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)(
528       struct TfLiteContext*, struct TfLiteRegistration registration,
529       const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate);
530 
531   // Number of threads that are recommended to subsystems like gemmlowp and
532   // eigen.
533   int recommended_num_threads;
534 
535   // Access external contexts by type.
536   // WARNING: This is an experimental interface that is subject to change.
537   TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*,
538                                                TfLiteExternalContextType);
539   // Set the value of a external context. Does not take ownership of the
540   // pointer.
541   // WARNING: This is an experimental interface that is subject to change.
542   void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType,
543                              TfLiteExternalContext*);
544 
545   // Flag for allowing float16 precision for FP32 calculation.
546   // default: false.
547   // WARNING: This is an experimental API and subject to change.
548   bool allow_fp32_relax_to_fp16;
549 
550   // Pointer to the op-level profiler, if set; nullptr otherwise.
551   void* profiler;
552 
553   // Allocate persistent buffer which has the same life time as the interpreter.
554   // The memory is allocated from heap for TFL, and from tail in TFLM.
555   // If *ptr is not nullptr, the pointer will be reallocated.
556   // This method is only available in Prepare stage.
557   // WARNING: This is an experimental interface that is subject to change.
558   TfLiteStatus (*AllocatePersistentBuffer)(struct TfLiteContext* ctx,
559                                            size_t bytes, void** ptr);
560 
561   // Allocate a buffer which will be deallocated right after invoke phase.
562   // The memory is allocated from heap in TFL, and from volatile arena in TFLM.
563   // This method is only available in invoke stage.
564   // NOTE: If possible use RequestScratchBufferInArena method to avoid memory
565   // allocation during inference time.
566   // WARNING: This is an experimental interface that is subject to change.
567   TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes,
568                                         void** ptr);
569 
570   // Request a scratch buffer in the arena through static memory planning.
571   // This method is only available in Prepare stage and the buffer is allocated
572   // by the interpreter between Prepare and Eval stage. In Eval stage,
573   // GetScratchBuffer API can be used to fetch the address.
574   // WARNING: This is an experimental interface that is subject to change.
575   TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx,
576                                               size_t bytes, int* buffer_idx);
577 
578   // Get the scratch buffer pointer.
579   // This method is only available in Eval stage.
580   // WARNING: This is an experimental interface that is subject to change.
581   void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx);
582 
583   // Resize the memory pointer of the `tensor`. This method behaves the same as
584   // `ResizeTensor`, except that it makes a copy of the shape array internally
585   // so the shape array could be deallocated right afterwards.
586   // WARNING: This is an experimental interface that is subject to change.
587   TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx,
588                                        TfLiteTensor* tensor, int dims,
589                                        const int* shape);
590 
591   // This method provides a preview of post-delegation partitioning. Each
592   // TfLiteDelegateParams in the referenced array corresponds to one instance of
593   // the delegate kernel.
594   // Example usage:
595   //
596   // TfLiteIntArray* nodes_to_replace = ...;
597   // TfLiteDelegateParams* params_array;
598   // int num_partitions = 0;
599   // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
600   //    context, delegate, nodes_to_replace, &params_array, &num_partitions));
601   // for (int idx = 0; idx < num_partitions; idx++) {
602   //    const auto& partition_params = params_array[idx];
603   //    ...
604   // }
605   //
606   // NOTE: The context owns the memory referenced by partition_params_array. It
607   // will be cleared with another call to PreviewDelegateParitioning, or after
608   // TfLiteDelegateParams::Prepare returns.
609   //
610   // WARNING: This is an experimental interface that is subject to change.
611   TfLiteStatus (*PreviewDelegatePartitioning)(
612       struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
613       TfLiteDelegateParams** partition_params_array, int* num_partitions);
614 } TfLiteContext;
615 
616 typedef struct TfLiteRegistration {
617   // Initializes the op from serialized data.
618   // If a built-in op:
619   //   `buffer` is the op's params data (TfLiteLSTMParams*).
620   //   `length` is zero.
621   // If custom op:
622   //   `buffer` is the op's `custom_options`.
623   //   `length` is the size of the buffer.
624   //
625   // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer
626   // or an instance of a struct).
627   //
628   // The returned pointer will be stored with the node in the `user_data` field,
629   // accessible within prepare and invoke functions below.
630   // NOTE: if the data is already in the desired format, simply implement this
631   // function to return `nullptr` and implement the free function to be a no-op.
632   void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
633 
634   // The pointer `buffer` is the data previously returned by an init invocation.
635   void (*free)(TfLiteContext* context, void* buffer);
636 
637   // prepare is called when the inputs this node depends on have been resized.
638   // context->ResizeTensor() can be called to request output tensors to be
639   // resized.
640   //
641   // Returns kTfLiteOk on success.
642   TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
643 
644   // Execute the node (should read node->inputs and output to node->outputs).
645   // Returns kTfLiteOk on success.
646   TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
647 
648   // profiling_string is called during summarization of profiling information
649   // in order to group executions together. Providing a value here will cause a
650   // given op to appear multiple times is the profiling report. This is
651   // particularly useful for custom ops that can perform significantly
652   // different calculations depending on their `user-data`.
653   const char* (*profiling_string)(const TfLiteContext* context,
654                                   const TfLiteNode* node);
655 
656   // Builtin codes. If this kernel refers to a builtin this is the code
657   // of the builtin. This is so we can do marshaling to other frameworks like
658   // NN API.
659   // Note: It is the responsibility of the registration binder to set this
660   // properly.
661   int32_t builtin_code;
662 
663   // Custom op name. If the op is a builtin, this will be null.
664   // Note: It is the responsibility of the registration binder to set this
665   // properly.
666   // WARNING: This is an experimental interface that is subject to change.
667   const char* custom_name;
668 
669   // The version of the op.
670   // Note: It is the responsibility of the registration binder to set this
671   // properly.
672   int version;
673 } TfLiteRegistration;
674 
675 // The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the
676 // values should be 1, 2, 4, 8, ...etc.
677 typedef enum TfLiteDelegateFlags {
678   kTfLiteDelegateFlagsNone = 0,
679   // The flag is set if the delegate can handle dynamic sized tensors.
680   // For example, the output shape of a `Resize` op with non-constant shape
681   // can only be inferred when the op is invoked.
682   // In this case, the Delegate is responsible for calling
683   // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling
684   // `ResizeTensor` when invoking the op.
685   //
686   // If the delegate isn't capable to handle dynamic tensors, this flag need
687   // to be set to false.
688   kTfLiteDelegateFlagsAllowDynamicTensors = 1
689 } TfLiteDelegateFlags;
690 
691 // WARNING: This is an experimental interface that is subject to change.
692 typedef struct TfLiteDelegate {
693   // Data that delegate needs to identify itself. This data is owned by the
694   // delegate. The delegate is owned in the user code, so the delegate is
695   // responsible for doing this when it is destroyed.
696   void* data_;
697 
698   // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
699   // delegate a view of the current graph through TfLiteContext*. It typically
700   // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels()
701   // to ask the TensorFlow lite runtime to create macro-nodes to represent
702   // delegated subgraphs of the original graph.
703   TfLiteStatus (*Prepare)(TfLiteContext* context,
704                           struct TfLiteDelegate* delegate);
705 
706   // Copy the data from delegate buffer handle into raw memory of the given
707   // 'tensor'. This cannot be null. The delegate is allowed to allocate the raw
708   // bytes as long as it follows the rules for kTfLiteDynamic tensors.
709   TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
710                                        struct TfLiteDelegate* delegate,
711                                        TfLiteBufferHandle buffer_handle,
712                                        TfLiteTensor* tensor);
713 
714   // Copy the data from raw memory of the given 'tensor' to delegate buffer
715   // handle. This can be null if the delegate doesn't use its own buffer.
716   TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context,
717                                      struct TfLiteDelegate* delegate,
718                                      TfLiteBufferHandle buffer_handle,
719                                      TfLiteTensor* tensor);
720 
721   // Free the Delegate Buffer Handle. Note: This only frees the handle, but
722   // this doesn't release the underlying resource (e.g. textures). The
723   // resources are either owned by application layer or the delegate.
724   // This can be null if the delegate doesn't use its own buffer.
725   void (*FreeBufferHandle)(TfLiteContext* context,
726                            struct TfLiteDelegate* delegate,
727                            TfLiteBufferHandle* handle);
728 
729   // Bitmask flags. See the comments in `TfLiteDelegateFlags`.
730   int64_t flags;
731 } TfLiteDelegate;
732 
733 // Build a 'null' delegate, with all the fields properly set to their default
734 // values.
735 TfLiteDelegate TfLiteDelegateCreate();
736 
737 #ifdef __cplusplus
738 }  // extern "C"
739 #endif  // __cplusplus
740 #endif  // TENSORFLOW_LITE_C_COMMON_H_
741