• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 // This file defines a C API for implementing operations in tflite.
16 // These operations can be defined using c++ but the interface between
17 // the interpreter and the operations are C.
18 //
19 // Summary of abstractions
20 // TF_LITE_ENSURE - Self-sufficient error checking
21 // TfLiteStatus - Status reporting
22 // TfLiteIntArray - stores tensor shapes (dims),
23 // TfLiteContext - allows an op to access the tensors
24 // TfLiteTensor - tensor (a multidimensional array)
25 // TfLiteNode - a single node or operation
26 // TfLiteRegistration - the implementation of a conceptual operation.
27 //
28 // Some abstractions in this file are created and managed by Interpreter.
29 #ifndef TENSORFLOW_CONTRIB_LITE_CONTEXT_H_
30 #define TENSORFLOW_CONTRIB_LITE_CONTEXT_H_
31 
32 #include <stdint.h>
33 #include <stdlib.h>
34 
35 #ifdef __cplusplus
36 extern "C" {
37 #endif  // __cplusplus
38 
39 typedef enum { kTfLiteOk = 0, kTfLiteError = 1 } TfLiteStatus;
40 
41 // Forward declare so GetNode can use this is in Context.
42 typedef struct _TfLiteRegistration TfLiteRegistration;
43 
44 #define kOptionalTensor (-1)
45 
46 // Fixed size list of integers. Used for dimensions and inputs/outputs tensor
47 // indices
48 typedef struct {
49   int size;
50 // gcc 6.1+ have a bug where flexible members aren't properly handled
51 // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
52 #if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
53     __GNUC_MINOR__ >= 1
54   int data[0];
55 #else
56   int data[];
57 #endif
58 } TfLiteIntArray;
59 
60 // Create a array of a given `size` (uninitialized entries).
61 // This returns a pointer, that you must free using TfLiteIntArrayFree().
62 TfLiteIntArray* TfLiteIntArrayCreate(int size);
63 
64 // Check if two tensors are equal. Returns 1 if they are equal, 0 otherwise.
65 int TfLiteIntArrayEqual(TfLiteIntArray* a, TfLiteIntArray* b);
66 
67 // Create a copy of an array passed as `src`.
68 // You are expected to free memory with TfLiteIntArrayFree
69 TfLiteIntArray* TfLiteIntArrayCopy(TfLiteIntArray* src);
70 
71 // Free memory of array `v`.
72 void TfLiteIntArrayFree(TfLiteIntArray* v);
73 
74 // Since we must not depend on any libraries, define a minimal subset of
75 // error macros while avoiding names that have pre-conceived meanings like
76 // assert and check.
77 
78 // Check whether value is true, and if not return kTfLiteError from
79 // the current function (and report the error string msg).
80 #define TF_LITE_ENSURE_MSG(context, value, msg)            \
81   do {                                                     \
82     if (!(value)) {                                        \
83       (context)->ReportError((context), __FILE__ " " msg); \
84       return kTfLiteError;                                 \
85     }                                                      \
86   } while (0)
87 
88 // Check whether the value `a` is true, and if not return kTfLiteError from
89 // the current function, while also reporting the location of the error.
90 #define TF_LITE_ENSURE(context, a)                                          \
91   do {                                                                      \
92     if (!(a)) {                                                             \
93       (context)->ReportError((context), "%s:%d %s was not true.", __FILE__, \
94                              __LINE__, #a);                                 \
95       return kTfLiteError;                                                  \
96     }                                                                       \
97   } while (0)
98 
99 #define TF_LITE_ENSURE_STATUS(a) \
100   do {                           \
101     if ((a) != kTfLiteOk) {      \
102       return kTfLiteError;       \
103     }                            \
104   } while (0)
105 
106 // Check whether the value `a == b` is true, and if not return kTfLiteError from
107 // the current function, while also reporting the location of the error.
108 // `a` and `b` may be evaluated more than once, so no side effects or
109 // extremely expensive computations should be done.
110 #define TF_LITE_ENSURE_EQ(context, a, b)                                       \
111   do {                                                                         \
112     if ((a) != (b)) {                                                          \
113       (context)->ReportError((context), "%s:%d %s != %s (%d != %d)", __FILE__, \
114                              __LINE__, #a, #b, (a), (b));                      \
115       return kTfLiteError;                                                     \
116     }                                                                          \
117   } while (0)
118 
119 #define TF_LITE_ENSURE_OK(context, status) \
120   do {                                     \
121     if ((status) != kTfLiteOk) {           \
122       return status;                       \
123     }                                      \
124   } while (0)
125 
126 // Types supported by tensor
127 typedef enum {
128   kTfLiteNoType = 0,
129   kTfLiteFloat32 = 1,
130   kTfLiteInt32 = 2,
131   kTfLiteUInt8 = 3,
132   kTfLiteInt64 = 4,
133   kTfLiteString = 5,
134 } TfLiteType;
135 
136 // Parameters for asymmetric quantization. Quantized values can be converted
137 // back to float using:
138 //    real_value = scale * (quantized_value - zero_point);
139 typedef struct {
140   float scale;
141   int32_t zero_point;
142 } TfLiteQuantizationParams;
143 
144 // A union of points that points to memory for a given tensor.
145 typedef union {
146   int* i32;
147   int64_t* i64;
148   float* f;
149   char* raw;
150   const char* raw_const;
151   uint8_t* uint8;
152 } TfLitePtrUnion;
153 
154 // Memory allocation strategies. kTfLiteMmapRo is for read-only memory-mapped
155 // data (or data externally allocated). kTfLiteArenaRw is arena allocated
156 // data. kTfLiteDynamic is for tensors that are allocated during evaluation.
157 typedef enum {
158   kTfLiteMemNone = 0,
159   kTfLiteMmapRo,
160   kTfLiteArenaRw,
161   kTfLiteArenaRwPersistent,
162   kTfLiteDynamic,
163 } TfLiteAllocationType;
164 
165 // An tensor in the interpreter system which is a wrapper around a buffer of
166 // data including a dimensionality (or NULL if not currently defined).
167 typedef struct {
168   // The data type specification for data stored in `data`. This affects
169   // what member of `data` union should be used.
170   TfLiteType type;
171   // A union of data pointers. The appropriate type should be used for a typed
172   // tensor based on `type`.
173   TfLitePtrUnion data;
174   // A pointer to a structure representing the dimensionality interpretation
175   // that the buffer should have. NOTE: the product of elements of `dims`
176   // and the element datatype size should be equal to `bytes` below.
177   TfLiteIntArray* dims;
178   // Quantization information.
179   TfLiteQuantizationParams params;
180   // How memory is mapped
181   //  kTfLiteMmapRo: Memory mapped read only.
182   //  i.e. weights
183   //  kTfLiteArenaRw: Arena allocated read write memory
184   //  (i.e. temporaries, outputs).
185   TfLiteAllocationType allocation_type;
186   // The number of bytes required to store the data of this Tensor. I.e.
187   // (bytes of each element) * dims[0] * ... * dims[n-1].  For example, if
188   // type is kTfLiteFloat32 and dims = {3, 2} then
189   // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
190   size_t bytes;
191 
192   // An opaque pointer to a tflite::MMapAllocation
193   const void* allocation;
194 
195   // Null-terminated name of this tensor.
196   const char* name;
197 } TfLiteTensor;
198 
199 // Free memory of tensor `t`;
200 void TfLiteTensorFree(TfLiteTensor* t);
201 
202 // Set all of a tensor's fields (and free any previously allocated data).
203 void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
204                        TfLiteQuantizationParams quantization, char* buffer,
205                        size_t size, TfLiteAllocationType allocation_type,
206                        const void* allocation, TfLiteTensor* tensor);
207 
208 // Resize the allocated data of a (dynamic) tensor.
209 void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
210 
211 // A structure representing an instance of a node.
212 // This structure only exhibits the inputs, outputs and user defined data, not
213 // other features like the type.
214 typedef struct {
215   // Inputs to this node expressed as indices into the simulator's tensors.
216   TfLiteIntArray* inputs;
217 
218   // Outputs to this node expressed as indices into the simulator's tensors.
219   TfLiteIntArray* outputs;
220 
221   // Temporary tensors uses during the computations. This usually contains no
222   // tensors, but ops are allowed to change that if they need scratch space of
223   // any sort.
224   TfLiteIntArray* temporaries;
225 
226   // Opaque data provided by the node implementer through `Registration.init`.
227   void* user_data;
228 
229   // Opaque data provided to the node if the node is a builtin. This is usually
230   // a structure defined in builtin_op_data.h
231   void* builtin_data;
232 
233   // Custom initial data. This is the opaque data provided in the flatbuffer.
234   // WARNING: This is an experimental interface that is subject to change.
235   const void* custom_initial_data;
236   int custom_initial_data_size;
237 } TfLiteNode;
238 
239 typedef struct TfLiteContext {
240   // Number of tensors in the context.
241   int tensors_size;
242 
243   // The execution plan contains a list of the node indices in execution
244   // order. execution_plan->size is the current number of nodes. And,
245   // execution_plan->data[0] is the first node that needs to be run.
246   // TfLiteDelegates can traverse the current execution plan by iterating
247   // through each member of this array and using GetNodeAndRegistration() to
248   // access details about a node. i.e.
249   // TfLiteIntArray* execution_plan;
250   // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
251   // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
252   //    int node_index = execution_plan->data[exec_index];
253   //    TfLiteNode* node;
254   //    TfLiteRegistration* reg;
255   //    context->GetNodeAndRegistration(context, node_index, &node, &reg);
256   // }
257   // WARNING: This is an experimental interface that is subject to change.
258   TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
259                                    TfLiteIntArray** execution_plan);
260 
261   // An tensor of tensors in the interpreter context (of length `tensors_size`)
262   TfLiteTensor* tensors;
263 
264   // opaque full context ptr (an opaque c++ data structure)
265   void* impl_;
266 
267   // Request memory pointer be resized. Updates dimensions on the tensor.
268   // NOTE: ResizeTensor takes ownership of newSize.
269   TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor,
270                                TfLiteIntArray* new_size);
271   // Request that a error be reported with format string msg.
272   void (*ReportError)(struct TfLiteContext*, const char* msg, ...);
273 
274   // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries.  If
275   // non-null, the value pointed to by `first_new_tensor_index` will be set to
276   // the index of the first new tensor.
277   TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add,
278                              int* first_new_tensor_index);
279 
280   // Get a Tensor node by node_index.
281   // WARNING: This is an experimental interface that is subject to change.
282   TfLiteStatus (*GetNodeAndRegistration)(struct TfLiteContext*, int node_index,
283                                          TfLiteNode** node,
284                                          TfLiteRegistration** registration);
285 
286   // Replace ops with delegate.
287   TfLiteStatus (*ReplaceSubgraphsWithDelegateKernels)(
288       struct TfLiteContext*, TfLiteRegistration registration,
289       const TfLiteIntArray* nodes_to_replace);
290 
291   // TODO(ahentz): we should create a more general mechanism for this sort of
292   // library-global objects.
293   void* gemm_context;
294 } TfLiteContext;
295 
296 typedef struct _TfLiteRegistration {
297   // Initializes the op from serialized data.
298   // If a built-in op:
299   //   `buffer` is the op's params data (TfLiteLSTMParams*).
300   //   `length` is zero.
301   // If custom op:
302   //   `buffer` is the op's `custom_options`.
303   //   `length` is the size of the buffer.
304   //
305   // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer
306   // or an instance of a struct).
307   //
308   // The returned pointer will be stored with the node in the `user_data` field,
309   // accessible within prepare and invoke functions below.
310   // NOTE: if the data is already in the desired format, simply implement this
311   // function to return `nullptr` and implement the free function to be a no-op.
312   void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
313 
314   // The pointer `buffer` is the data previously returned by an init invocation.
315   void (*free)(TfLiteContext* context, void* buffer);
316 
317   // prepare is called when the inputs this node depends on have been resized.
318   // context->ResizeTensor() can be called to request output tensors to be
319   // resized.
320   //
321   // Returns kTfLiteOk on success.
322   TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
323 
324   // Execute the node (should read node->inputs and output to node->outputs).
325   // Returns kTfLiteOk on success.
326   TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
327 
328   // Builtin codes. If this kernel refers to a builtin this is the code
329   // of the builtin. This is so we can do marshaling to other frameworks like
330   // NN API. Note, it is the responsibility of the registration binder to
331   // set this properly.
332   int32_t builtin_code;
333 
334   // Custom op name. If the op is a builtin, this will be null.
335   // WARNING: This is an experimental interface that is subject to change.
336   const char* custom_name;
337 } TfLiteRegistration;
338 
339 // WARNING: This is an experimental interface that is subject to change.
340 typedef struct {
341   // Data that delegate needs to identify itself. This data is owned by the
342   // delegate. The delegate is owned in the user code, so the delegate is
343   // responsible for doing this when it is destroyed.
344   void* data_;
345   // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
346   // delegate a view of the current graph through TfLiteContext*. It typically
347   // will look at the nodes and call ReplaceSubgraphsWithDelegateKernels()
348   // to ask the TensorFlow lite runtime to create macro-nodes to represent
349   // delegated subgraphs of the original graph.
350   TfLiteStatus (*Prepare)(TfLiteContext* context, void* data);
351 } TfLiteDelegate;
352 
353 #ifdef __cplusplus
354 }  // extern "C"
355 #endif  // __cplusplus
356 #endif  // TENSORFLOW_CONTRIB_LITE_CONTEXT_H_
357