• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 /// \warning Users of TensorFlow Lite should not include this file directly,
16 /// but should instead include "third_party/tensorflow/lite/c/c_api.h".
17 /// Only the TensorFlow Lite implementation itself should include this
18 /// file directly.
19 #ifndef TENSORFLOW_LITE_CORE_C_C_API_H_
20 #define TENSORFLOW_LITE_CORE_C_C_API_H_
21 
22 #include <stdarg.h>
23 #include <stdbool.h>
24 #include <stdint.h>
25 #include <stdlib.h>
26 
27 #include "tensorflow/lite/builtin_ops.h"
28 #include "tensorflow/lite/core/async/c/types.h"
29 #include "tensorflow/lite/core/c/c_api_types.h"  // IWYU pragma: export
30 
31 // --------------------------------------------------------------------------
32 /// \file
33 /// C API for TensorFlow Lite.
34 ///
35 /// The API leans towards simplicity and uniformity instead of convenience, as
36 /// most usage will be by language-specific wrappers. It provides largely the
37 /// same set of functionality as that of the C++ TensorFlow Lite `Interpreter`
38 /// API, but is useful for shared libraries where having a stable ABI boundary
39 /// is important.
40 ///
41 /// Conventions:
42 /// * We use the prefix TfLite for everything in the API.
43 /// * size_t is used to represent byte sizes of objects that are
44 ///   materialized in the address space of the calling process.
45 /// * int is used as an index into arrays.
46 ///
47 /// Usage:
48 /// <pre><code>
49 /// // Create the model and interpreter options.
50 /// TfLiteModel* model = TfLiteModelCreateFromFile("/path/to/model.tflite");
51 /// TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
52 /// TfLiteInterpreterOptionsSetNumThreads(options, 2);
53 ///
54 /// // Create the interpreter.
55 /// TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
56 ///
57 /// // Allocate tensors and populate the input tensor data.
58 /// TfLiteInterpreterAllocateTensors(interpreter);
59 /// TfLiteTensor* input_tensor =
60 ///     TfLiteInterpreterGetInputTensor(interpreter, 0);
61 /// TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
62 ///                            input.size() * sizeof(float));
63 ///
64 /// // Execute inference.
65 /// TfLiteInterpreterInvoke(interpreter);
66 ///
67 /// // Extract the output tensor data.
68 /// const TfLiteTensor* output_tensor =
69 ///      TfLiteInterpreterGetOutputTensor(interpreter, 0);
70 /// TfLiteTensorCopyToBuffer(output_tensor, output.data(),
71 ///                          output.size() * sizeof(float));
72 ///
73 /// // Dispose of the model and interpreter objects.
74 /// TfLiteInterpreterDelete(interpreter);
75 /// TfLiteInterpreterOptionsDelete(options);
76 /// TfLiteModelDelete(model);
77 ///
78 /// </code></pre>
79 
80 #ifdef __cplusplus
81 extern "C" {
82 #endif  // __cplusplus
83 
84 // This header should be valid in both C (e.g. C99) and C++,
85 // so 'void' in parameters is not redundant.
86 // NOLINTBEGIN(modernize-redundant-void-arg)
87 
88 // --------------------------------------------------------------------------
89 // Opaque types used by the C API.  (See also c_api_types.h.)
90 
91 /// TfLiteModel wraps a loaded TensorFlow Lite model.
92 typedef struct TfLiteModel TfLiteModel;
93 
94 /// TfLiteInterpreterOptions allows customized interpreter configuration.
95 typedef struct TfLiteInterpreterOptions TfLiteInterpreterOptions;
96 
97 /// TfLiteInterpreter provides inference from a provided model.
98 typedef struct TfLiteInterpreter TfLiteInterpreter;
99 
100 /// A tensor in the interpreter system which is a wrapper around a buffer of
101 /// data including a dimensionality (or NULL if not currently defined).
102 typedef struct TfLiteTensor TfLiteTensor;
103 
104 /// TfLiteRegistrationExternal is an external version of TfLiteRegistration to
105 /// use custom op registration API.
106 /// \warning This is an experimental type and subject to change.
107 typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal;
108 
109 // --------------------------------------------------------------------------
110 /// The TensorFlow Lite Runtime version.
111 ///
112 /// Returns a pointer to a statically allocated string that is the version
113 /// number of the (potentially dynamically loaded) TF Lite Runtime library.
114 /// TensorFlow Lite uses semantic versioning, and the return value should be
115 /// in semver 2 format <http://semver.org>, starting with MAJOR.MINOR.PATCH,
116 /// e.g. "2.12.0" or "2.13.0-rc2".
117 TFL_CAPI_EXPORT extern const char* TfLiteVersion(void);
118 
119 /// The supported TensorFlow Lite model file Schema version.
120 ///
121 /// Returns the (major) version number of the Schema used for model
122 /// files that is supported by the (potentially dynamically loaded)
123 /// TensorFlow Lite Runtime.
124 ///
125 /// Model files using schema versions different to this may not be supported by
126 /// the current version of the TF Lite Runtime.
127 TFL_CAPI_EXPORT int TfLiteSchemaVersion(void);
128 
129 /// Returns a model from the provided buffer, or null on failure.
130 ///
131 /// \note The caller retains ownership of the `model_data` buffer and should
132 /// ensure that the lifetime of the `model_data` buffer must be at least as long
133 /// as the lifetime of the `TfLiteModel` and of any `TfLiteInterpreter` objects
134 /// created from that `TfLiteModel`, and furthermore the contents of the
135 /// `model_data` buffer must not be modified during that time."
136 TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreate(const void* model_data,
137                                                       size_t model_size);
138 
139 /// Same as `TfLiteModelCreate` with customizble error reporter.
140 /// * `reporter` takes the provided `user_data` object, as well as a C-style
141 ///   format string and arg list (see also vprintf).
142 /// * `user_data` is optional. If non-null, it is owned by the client and must
143 ///   remain valid for the duration of the interpreter lifetime.
144 TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateWithErrorReporter(
145     const void* model_data, size_t model_size,
146     void (*reporter)(void* user_data, const char* format, va_list args),
147     void* user_data);
148 
149 /// Returns a model from the provided file, or null on failure.
150 ///
151 /// \note The file's contents must not be modified during the lifetime of the
152 /// `TfLiteModel` or of any `TfLiteInterpreter` objects created from that
153 /// `TfLiteModel`.
154 TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFile(
155     const char* model_path);
156 
157 /// Same as `TfLiteModelCreateFromFile` with customizble error reporter.
158 /// * `reporter` takes the provided `user_data` object, as well as a C-style
159 ///   format string and arg list (see also vprintf).
160 /// * `user_data` is optional. If non-null, it is owned by the client and must
161 ///   remain valid for the duration of the interpreter lifetime.
162 TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFileWithErrorReporter(
163     const char* model_path,
164     void (*reporter)(void* user_data, const char* format, va_list args),
165     void* user_data);
166 
167 /// Destroys the model instance.
168 TFL_CAPI_EXPORT extern void TfLiteModelDelete(TfLiteModel* model);
169 
170 /// Returns a new interpreter options instances.
171 TFL_CAPI_EXPORT extern TfLiteInterpreterOptions*
172 TfLiteInterpreterOptionsCreate();
173 
174 /// Creates and returns a shallow copy of an options object.
175 ///
176 /// The caller is responsible for calling `TfLiteInterpreterOptionsDelete` to
177 /// deallocate the object pointed to by the returned pointer.
178 TFL_CAPI_EXPORT extern TfLiteInterpreterOptions* TfLiteInterpreterOptionsCopy(
179     const TfLiteInterpreterOptions* from);
180 
181 /// Destroys the interpreter options instance.
182 TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsDelete(
183     TfLiteInterpreterOptions* options);
184 
185 /// Sets the number of CPU threads to use for the interpreter.
186 TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetNumThreads(
187     TfLiteInterpreterOptions* options, int32_t num_threads);
188 
189 /// Adds a delegate to be applied during `TfLiteInterpreter` creation.
190 ///
191 /// If delegate application fails, interpreter creation will also fail with an
192 /// associated error logged.
193 ///
194 /// \note The caller retains ownership of the delegate and should ensure that it
195 /// remains valid for the duration of any created interpreter's lifetime.
196 ///
197 /// If you are NOT using "TensorFlow Lite in Play Services", and NOT building
198 /// with `TFLITE_WITH_STABLE_ABI` or `TFLITE_USE_OPAQUE_DELEGATE` macros
199 /// enabled, it is possible to pass a `TfLiteDelegate*` rather than a
200 /// `TfLiteOpaqueDelegate*` to this function, since in those cases,
201 /// `TfLiteOpaqueDelegate` is just a typedef alias for `TfLiteDelegate`.
202 /// This is for compatibility with existing source code
203 /// and existing delegates.  For new delegates, it is recommended to
204 /// use `TfLiteOpaqueDelegate` rather than `TfLiteDelegate`.  (See
205 /// `TfLiteOpaqueDelegate` in tensorflow/lite/core/c/c_api_types.h.)
206 TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddDelegate(
207     TfLiteInterpreterOptions* options, TfLiteOpaqueDelegate* delegate);
208 
209 /// Sets a custom error reporter for interpreter execution.
210 ///
211 /// * `reporter` takes the provided `user_data` object, as well as a C-style
212 ///   format string and arg list (see also vprintf).
213 /// * `user_data` is optional. If non-null, it is owned by the client and must
214 ///   remain valid for the duration of the interpreter lifetime.
215 TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetErrorReporter(
216     TfLiteInterpreterOptions* options,
217     void (*reporter)(void* user_data, const char* format, va_list args),
218     void* user_data);
219 
220 /// Adds an op registration to be applied during `TfLiteInterpreter` creation.
221 ///
222 /// The `TfLiteRegistrationExternal` object is needed to implement custom op of
223 /// TFLite Interpreter via C API. Calling this function ensures that any
224 /// `TfLiteInterpreter` created with the specified `options` can execute models
225 /// that use the custom operator specified in `registration`.
226 /// Please refer https://www.tensorflow.org/lite/guide/ops_custom for custom op
227 /// support.
228 /// \note The caller retains ownership of the TfLiteRegistrationExternal object
229 /// and should ensure that it remains valid for the duration of any created
230 /// interpreter's lifetime.
231 /// \warning This is an experimental API and subject to change.
232 TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddRegistrationExternal(
233     TfLiteInterpreterOptions* options,
234     TfLiteRegistrationExternal* registration);
235 
236 /// Enables users to cancel in-flight invocations with
237 /// `TfLiteInterpreterCancel`.
238 ///
239 /// By default it is disabled and calling to `TfLiteInterpreterCancel` will
240 /// return kTfLiteError. See `TfLiteInterpreterCancel`.
241 ///
242 /// \warning This is an experimental API and subject to change.
243 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterOptionsEnableCancellation(
244     TfLiteInterpreterOptions* options, bool enable);
245 
246 /// Returns a new interpreter using the provided model and options, or null on
247 /// failure.
248 ///
249 /// * `model` must be a valid model instance. The caller retains ownership of
250 ///   the object, and may destroy it (via TfLiteModelDelete) immediately after
251 ///   creating the interpreter.  However, if the TfLiteModel was allocated with
252 ///   TfLiteModelCreate, then the `model_data` buffer that was passed to
253 ///   TfLiteModelCreate must outlive the lifetime of the TfLiteInterpreter
254 ///   object that this function returns, and must not be modified during that
255 ///   time; and if the TfLiteModel was allocated with TfLiteModelCreateFromFile,
256 ///   then the contents of the model file must not be modified during the
257 ///   lifetime of the TfLiteInterpreter object that this function returns.
258 /// * `optional_options` may be null. The caller retains ownership of the
259 ///   object, and can safely destroy it (via TfLiteInterpreterOptionsDelete)
260 ///   immediately after creating the interpreter.
261 ///
262 /// \note The client *must* explicitly allocate tensors before attempting to
263 /// access input tensor data or invoke the interpreter.
264 TFL_CAPI_EXPORT extern TfLiteInterpreter* TfLiteInterpreterCreate(
265     const TfLiteModel* model, const TfLiteInterpreterOptions* optional_options);
266 
267 /// Destroys the interpreter.
268 TFL_CAPI_EXPORT extern void TfLiteInterpreterDelete(
269     TfLiteInterpreter* interpreter);
270 
271 /// Returns the number of input tensors associated with the model.
272 TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetInputTensorCount(
273     const TfLiteInterpreter* interpreter);
274 
275 /// Returns a pointer to an array of input tensor indices.  The length of the
276 /// array can be obtained via a call to `TfLiteInterpreterGetInputTensorCount`.
277 ///
278 /// Typically the input tensors associated with an `interpreter` would be set
279 /// during the initialization of the `interpreter`, through a mechanism like the
280 /// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the
281 /// interpreter.  However, there are some circumstances in which the pointer may
282 /// not remain valid throughout the lifetime of the interpreter, because calls
283 /// to `SetInputs` on the interpreter invalidate the returned pointer.
284 ///
285 /// The ownership of the array remains with the TFLite runtime.
286 TFL_CAPI_EXPORT const int* TfLiteInterpreterInputTensorIndices(
287     const TfLiteInterpreter* interpreter);
288 
289 /// Returns the tensor associated with the input index.
290 /// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
291 TFL_CAPI_EXPORT extern TfLiteTensor* TfLiteInterpreterGetInputTensor(
292     const TfLiteInterpreter* interpreter, int32_t input_index);
293 
294 /// Resizes the specified input tensor.
295 ///
296 /// \note After a resize, the client *must* explicitly allocate tensors before
297 /// attempting to access the resized tensor data or invoke the interpreter.
298 ///
299 /// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
300 ///
301 /// This function makes a copy of the input dimensions, so the client can safely
302 /// deallocate `input_dims` immediately after this function returns.
303 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResizeInputTensor(
304     TfLiteInterpreter* interpreter, int32_t input_index, const int* input_dims,
305     int32_t input_dims_size);
306 
307 /// Updates allocations for all tensors, resizing dependent tensors using the
308 /// specified input tensor dimensionality.
309 ///
310 /// This is a relatively expensive operation, and need only be called after
311 /// creating the graph and/or resizing any inputs.
312 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterAllocateTensors(
313     TfLiteInterpreter* interpreter);
314 
315 /// Runs inference for the loaded graph.
316 ///
317 /// Before calling this function, the caller should first invoke
318 /// TfLiteInterpreterAllocateTensors() and should also set the values for the
319 /// input tensors.  After successfully calling this function, the values for the
320 /// output tensors will be set.
321 ///
322 /// \note It is possible that the interpreter is not in a ready state to
323 /// evaluate (e.g., if AllocateTensors() hasn't been called, or if a
324 /// ResizeInputTensor() has been performed without a subsequent call to
325 /// AllocateTensors()).
326 ///
327 ///   If the (experimental!) delegate fallback option was enabled in the
328 ///   interpreter options, then the interpreter will automatically fall back to
329 ///   not using any delegates if execution with delegates fails. For details,
330 ///   see TfLiteInterpreterOptionsSetEnableDelegateFallback in
331 ///   c_api_experimental.h.
332 ///
333 /// Returns one of the following status codes:
334 ///  - kTfLiteOk: Success. Output is valid.
335 ///  - kTfLiteDelegateError: Execution with delegates failed, due to a problem
336 ///    with the delegate(s). If fallback was not enabled, output is invalid.
337 ///    If fallback was enabled, this return value indicates that fallback
338 ///    succeeded, the output is valid, and all delegates previously applied to
339 ///    the interpreter have been undone.
340 ///  - kTfLiteApplicationError: Same as for kTfLiteDelegateError, except that
341 ///    the problem was not with the delegate itself, but rather was
342 ///    due to an incompatibility between the delegate(s) and the
343 ///    interpreter or model.
344 ///  - kTfLiteError: Unexpected/runtime failure. Output is invalid.
345 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterInvoke(
346     TfLiteInterpreter* interpreter);
347 
348 /// Returns the number of output tensors associated with the model.
349 TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorCount(
350     const TfLiteInterpreter* interpreter);
351 
352 /// Returns a pointer to an array of output tensor indices.  The length of the
353 /// array can be obtained via a call to `TfLiteInterpreterGetOutputTensorCount`.
354 ///
355 /// Typically the output tensors associated with an `interpreter` would be set
356 /// during the initialization of the `interpreter`, through a mechanism like the
357 /// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the
358 /// interpreter.  However, there are some circumstances in which the pointer may
359 /// not remain valid throughout the lifetime of the interpreter, because calls
360 /// to `SetOutputs` on the interpreter invalidate the returned pointer.
361 ///
362 /// The ownership of the array remains with the TFLite runtime.
363 TFL_CAPI_EXPORT const int* TfLiteInterpreterOutputTensorIndices(
364     const TfLiteInterpreter* interpreter);
365 
366 /// Returns the tensor associated with the output index.
367 /// REQUIRES: 0 <= output_index < TfLiteInterpreterGetOutputTensorCount(tensor)
368 ///
369 /// \note The shape and underlying data buffer for output tensors may be not
370 /// be available until after the output tensor has been both sized and
371 /// allocated.
372 /// In general, best practice is to interact with the output tensor *after*
373 /// calling TfLiteInterpreterInvoke().
374 TFL_CAPI_EXPORT extern const TfLiteTensor* TfLiteInterpreterGetOutputTensor(
375     const TfLiteInterpreter* interpreter, int32_t output_index);
376 
377 /// Returns modifiable access to the tensor that corresponds to the
378 /// specified `index` and is associated with the provided `interpreter`.
379 ///
380 /// This requires the `index` to be between 0 and N - 1, where N is the
381 /// number of tensors in the model.
382 ///
383 /// Typically the tensors associated with the `interpreter` would be set during
384 /// the `interpreter` initialization, through a mechanism like the
385 /// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the
386 /// interpreter.  However, there are some circumstances in which the pointer may
387 /// not remain valid throughout the lifetime of the interpreter, because calls
388 /// to `AddTensors` on the interpreter invalidate the returned pointer.
389 ///
390 /// Note the difference between this function and
391 /// `TfLiteInterpreterGetInputTensor` (or `TfLiteInterpreterGetOutputTensor` for
392 /// that matter): `TfLiteInterpreterGetTensor` takes an index into the array of
393 /// all tensors associated with the `interpreter`'s model, whereas
394 /// `TfLiteInterpreterGetInputTensor` takes an index into the array of input
395 /// tensors.
396 ///
397 /// The ownership of the tensor remains with the TFLite runtime, meaning the
398 /// caller should not deallocate the pointer.
399 TFL_CAPI_EXPORT
400 TfLiteTensor* TfLiteInterpreterGetTensor(const TfLiteInterpreter* interpreter,
401                                          int index);
402 
403 /// Tries to cancel any in-flight invocation.
404 ///
405 /// \note This only cancels `TfLiteInterpreterInvoke` calls that happen before
406 /// calling this and it does not cancel subsequent invocations.
407 /// \note Calling this function will also cancel any in-flight invocations of
408 /// SignatureRunners constructed from this interpreter.
409 /// Non-blocking and thread safe.
410 ///
411 /// Returns kTfLiteError if cancellation is not enabled via
412 /// `TfLiteInterpreterOptionsEnableCancellation`.
413 ///
414 /// \warning This is an experimental API and subject to change.
415 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterCancel(
416     const TfLiteInterpreter* interpreter);
417 
418 // --------------------------------------------------------------------------
419 // TfLiteTensor wraps data associated with a graph tensor.
420 //
421 // Note that, while the TfLiteTensor struct is not currently opaque, and its
422 // fields can be accessed directly, these methods are still convenient for
423 // language bindings. In the future the tensor struct will likely be made opaque
424 // in the public API.
425 
426 /// Returns the type of a tensor element.
427 TFL_CAPI_EXPORT extern TfLiteType TfLiteTensorType(const TfLiteTensor* tensor);
428 
429 /// Returns the number of dimensions that the tensor has.  Returns -1 in case
430 /// the 'opaque_tensor' does not have its dimensions property set.
431 TFL_CAPI_EXPORT extern int32_t TfLiteTensorNumDims(const TfLiteTensor* tensor);
432 
433 /// Returns the length of the tensor in the "dim_index" dimension.
434 /// REQUIRES: 0 <= dim_index < TFLiteTensorNumDims(tensor)
435 TFL_CAPI_EXPORT extern int32_t TfLiteTensorDim(const TfLiteTensor* tensor,
436                                                int32_t dim_index);
437 
438 /// Returns the size of the underlying data in bytes.
439 TFL_CAPI_EXPORT extern size_t TfLiteTensorByteSize(const TfLiteTensor* tensor);
440 
441 /// Returns a pointer to the underlying data buffer.
442 ///
443 /// \note The result may be null if tensors have not yet been allocated, e.g.,
444 /// if the Tensor has just been created or resized and `TfLiteAllocateTensors()`
445 /// has yet to be called, or if the output tensor is dynamically sized and the
446 /// interpreter hasn't been invoked.
447 TFL_CAPI_EXPORT extern void* TfLiteTensorData(const TfLiteTensor* tensor);
448 
449 /// Returns the (null-terminated) name of the tensor.
450 TFL_CAPI_EXPORT extern const char* TfLiteTensorName(const TfLiteTensor* tensor);
451 
452 /// Returns the parameters for asymmetric quantization. The quantization
453 /// parameters are only valid when the tensor type is `kTfLiteUInt8` and the
454 /// `scale != 0`. Quantized values can be converted back to float using:
455 ///    real_value = scale * (quantized_value - zero_point);
456 TFL_CAPI_EXPORT extern TfLiteQuantizationParams TfLiteTensorQuantizationParams(
457     const TfLiteTensor* tensor);
458 
459 /// Copies from the provided input buffer into the tensor's buffer.
460 /// REQUIRES: input_data_size == TfLiteTensorByteSize(tensor)
461 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyFromBuffer(
462     TfLiteTensor* tensor, const void* input_data, size_t input_data_size);
463 
464 /// Copies to the provided output buffer from the tensor's buffer.
465 /// REQUIRES: output_data_size == TfLiteTensorByteSize(tensor)
466 TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyToBuffer(
467     const TfLiteTensor* output_tensor, void* output_data,
468     size_t output_data_size);
469 
470 /// Returns a new TfLiteRegistrationExternal instance.
471 ///
472 /// \note The caller retains ownership and should ensure that
473 /// the lifetime of the `TfLiteRegistrationExternal` must be at least as long as
474 /// the lifetime of the `TfLiteInterpreter`.
475 /// \warning This is an experimental API and subject to change.
476 TFL_CAPI_EXPORT extern TfLiteRegistrationExternal*
477 TfLiteRegistrationExternalCreate(TfLiteBuiltinOperator builtin_code,
478                                  const char* custom_name, int version);
479 
480 /// Return the builtin op code of the provided external 'registration'.
481 ///
482 /// \warning This is an experimental API and subject to change.
483 TFL_CAPI_EXPORT extern TfLiteBuiltinOperator
484 TfLiteRegistrationExternalGetBuiltInCode(
485     const TfLiteRegistrationExternal* registration);
486 
487 /// Return the OP version of the provided external 'registration'.  Return -1
488 /// in case of error, or if the provided address is null.
489 ///
490 /// \warning This is an experimental API and subject to change.
491 TFL_CAPI_EXPORT extern int TfLiteRegistrationExternalGetVersion(
492     const TfLiteRegistrationExternal* registration);
493 
494 /// Returns the custom name of the provided 'registration'. The returned pointer
495 /// will be non-null iff the op is a custom op.
496 ///
497 /// \warning This is an experimental API and subject to change.
498 TFL_CAPI_EXPORT extern const char* TfLiteRegistrationExternalGetCustomName(
499     const TfLiteRegistrationExternal* registration);
500 
501 /// Destroys the TfLiteRegistrationExternal instance.
502 /// \warning This is an experimental API and subject to change.
503 TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalDelete(
504     TfLiteRegistrationExternal* registration);
505 
506 /// Sets the initialization callback for the registration.
507 ///
508 /// The callback is called to initialize the op from serialized data.
509 /// Please refer `init` of `TfLiteRegistration` for the detail.
510 /// \warning This is an experimental API and subject to change.
511 TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInit(
512     TfLiteRegistrationExternal* registration,
513     void* (*init)(TfLiteOpaqueContext* context, const char* buffer,
514                   size_t length));
515 
516 /// Sets the deallocation callback for the registration.
517 ///
518 /// This callback is called to deallocate the data returned by the init
519 /// callback. The value passed in the `data` parameter is the value that was
520 /// returned by the `init` callback.
521 /// Please refer `free` of `TfLiteRegistration` for the detail.
522 /// \warning This is an experimental API and subject to change.
523 TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetFree(
524     TfLiteRegistrationExternal* registration,
525     void (*free)(TfLiteOpaqueContext* context, void* data));
526 
527 /// Sets the preparation callback for the registration.
528 ///
529 /// The callback is called when the inputs of operator have been resized.
530 /// Please refer `prepare` of `TfLiteRegistration` for the detail.
531 /// \warning This is an experimental API and subject to change.
532 TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetPrepare(
533     TfLiteRegistrationExternal* registration,
534     TfLiteStatus (*prepare)(TfLiteOpaqueContext* context,
535                             TfLiteOpaqueNode* node));
536 
537 /// Sets the invocation callback for the registration.
538 ///
539 /// The callback is called when the operator is executed.
540 /// Please refer `invoke` of `TfLiteRegistration` for the detail.
541 /// \warning This is an experimental API and subject to change.
542 TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInvoke(
543     TfLiteRegistrationExternal* registration,
544     TfLiteStatus (*invoke)(TfLiteOpaqueContext* context,
545                            TfLiteOpaqueNode* node));
546 
547 /// Sets the async kernel accessor callback for the registration.
548 ///
549 /// The callback is called to retrieve the async kernel if the delegate supports
550 /// it. If the delegate does not support async execution, either this function
551 /// should not be called, or `async_kernel` needs to be nullptr.
552 /// `node` is the delegate TfLiteNode created by `ModifyGraphWithDelegate`.
553 /// Please refer `async_kernel` of `TfLiteRegistration` for the detail.
554 /// \warning This is an experimental API and subject to change.
555 TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetAsyncKernel(
556     TfLiteRegistrationExternal* registration,
557     TfLiteAsyncKernel* (*async_kernel)(TfLiteOpaqueContext* context,
558                                        TfLiteOpaqueNode* node));
559 
560 // NOLINTEND(modernize-redundant-void-arg)
561 
562 #ifdef __cplusplus
563 }  // extern "C"
564 #endif  // __cplusplus
565 
566 #endif  // TENSORFLOW_LITE_CORE_C_C_API_H_
567