• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_CORE_SUBGRAPH_H_
16 #define TENSORFLOW_LITE_CORE_SUBGRAPH_H_
17 
18 #include <cstdlib>
19 #include <vector>
20 
21 #include "tensorflow/lite/allocation.h"
22 #include "tensorflow/lite/c/c_api_internal.h"
23 #include "tensorflow/lite/memory_planner.h"
24 #include "tensorflow/lite/profiling/profiler.h"
25 #include "tensorflow/lite/util.h"
26 
27 namespace tflite {
28 
29 // Forward declare since NNAPIDelegate uses Interpreter.
30 class NNAPIDelegate;
31 
32 class Subgraph {
33  public:
34   friend class Interpreter;
35 
36   Subgraph(ErrorReporter* error_reporter,
37            TfLiteExternalContext** external_contexts,
38            std::vector<std::unique_ptr<Subgraph>>* subgraphs);
39 
40   Subgraph(const Subgraph&) = delete;
41 
42   // Subgraphs should be movable but not copyable.
43   Subgraph(Subgraph&&) = default;
44   Subgraph& operator=(const Subgraph&) = delete;
45   virtual ~Subgraph();
46 
47   // Provide a list of tensor indexes that are inputs to the model.
48   // Each index is bound check and this modifies the consistent_ flag of the
49   // interpreter.
50   TfLiteStatus SetInputs(std::vector<int> inputs);
51 
52   // Provide a list of tensor indexes that are outputs to the model
53   // Each index is bound check and this modifies the consistent_ flag of the
54   // interpreter.
55   TfLiteStatus SetOutputs(std::vector<int> outputs);
56 
57   // Provide a list of tensor indexes that are variable tensors.
58   // Each index is bound check and this modifies the consistent_ flag of the
59   // interpreter.
60   TfLiteStatus SetVariables(std::vector<int> variables);
61 
62   // Ensure the internal node storage memory allocates at least `count`
63   // spots for node. NOTE, this doesn't actually add operators. This is an
64   // efficiency optimization that is subject to change.
65   void ReserveNodes(int count);
66 
67   // Adds a node with the given parameters and returns the index of the new
68   // node in `node_index` (optionally). Interpreter will take ownership of
69   // `builtin_data` and destroy it with `free`. Ownership of 'init_data'
70   // remains with the caller.
71   TfLiteStatus AddNodeWithParameters(const std::vector<int>& inputs,
72                                      const std::vector<int>& outputs,
73                                      const char* init_data,
74                                      size_t init_data_size, void* builtin_data,
75                                      const TfLiteRegistration* registration,
76                                      int* node_index = nullptr);
77 
78   // Adds `tensors_to_add` tensors, preserving pre-existing Tensor entries.
79   // The value pointed to by `first_new_tensor_index` will be set to the
80   // index of the first new tensor if `first_new_tensor_index` is non-null.
81   TfLiteStatus AddTensors(int tensors_to_add,
82                           int* first_new_tensor_index = nullptr);
83 
84   // Set description of inputs/outputs/data/fptrs for node `node_index`.
85   // This variant assumes an external buffer has been allocated of size
86   // bytes. The lifetime of buffer must be ensured to be greater or equal
87   // to Interpreter.
88   inline TfLiteStatus SetTensorParametersReadOnly(
89       int tensor_index, TfLiteType type, const char* name,
90       const std::vector<int>& dims, TfLiteQuantization quantization,
91       const char* buffer, size_t bytes,
92       const Allocation* allocation = nullptr) {
93     return SetTensorParametersReadOnly(tensor_index, type, name, dims.size(),
94                                        dims.data(), quantization, buffer, bytes,
95                                        allocation);
96   }
97   TfLiteStatus SetTensorParametersReadOnly(
98       int tensor_index, TfLiteType type, const char* name, const size_t rank,
99       const int* dims, TfLiteQuantization quantization, const char* buffer,
100       size_t bytes, const Allocation* allocation = nullptr);
101 
102   // Set description of inputs/outputs/data/fptrs for node `node_index`.
103   // This variant assumes an external buffer has been allocated of size
104   // bytes. The lifetime of buffer must be ensured to be greater or equal
105   // to Interpreter.
106   inline TfLiteStatus SetTensorParametersReadWrite(
107       int tensor_index, TfLiteType type, const char* name,
108       const std::vector<int>& dims, TfLiteQuantization quantization,
109       bool is_variable = false) {
110     return SetTensorParametersReadWrite(tensor_index, type, name, dims.size(),
111                                         dims.data(), quantization, is_variable);
112   }
113   TfLiteStatus SetTensorParametersReadWrite(int tensor_index, TfLiteType type,
114                                             const char* name, const size_t rank,
115                                             const int* dims,
116                                             TfLiteQuantization quantization,
117                                             bool is_variable = false);
118 
119   // WARNING: Experimental interface, subject to change
120   // Overrides execution plan. This bounds checks indices sent in.
121   TfLiteStatus SetExecutionPlan(const std::vector<int>& new_plan);
122 
123   // Get a mutable tensor data structure.
124   // TODO(aselle): Create a safe ArrayHandle interface to avoid exposing this
125   // read/write access to structure
tensor(int tensor_index)126   TfLiteTensor* tensor(int tensor_index) {
127     if (tensor_index < 0 ||
128         static_cast<size_t>(tensor_index) >= context_->tensors_size) {
129       return nullptr;
130     }
131     return &context_->tensors[tensor_index];
132   }
133 
134   // Get an immutable tensor data structure.
tensor(int tensor_index)135   const TfLiteTensor* tensor(int tensor_index) const {
136     if (tensor_index < 0 ||
137         static_cast<size_t>(tensor_index) >= context_->tensors_size) {
138       return nullptr;
139     }
140     return &context_->tensors[tensor_index];
141   }
142 
143   // Read only access to list of inputs.
inputs()144   std::vector<int>& inputs() { return inputs_; }
145 
146   // Read only access to list of inputs.
inputs()147   const std::vector<int>& inputs() const { return inputs_; }
148 
149   // Read only access to list of outputs.
outputs()150   std::vector<int>& outputs() { return outputs_; }
151 
152   // Read only access to list of outputs.
outputs()153   const std::vector<int>& outputs() const { return outputs_; }
154 
155   // Read only access to list of variable tensors.
variables()156   std::vector<int>& variables() { return variables_; }
157 
158   // Read only access to list of variable tensors.
variables()159   const std::vector<int>& variables() const { return variables_; }
160 
tensors_size()161   size_t tensors_size() const { return tensors_.size(); }
162 
163   // Return the number of ops in the model.
nodes_size()164   size_t nodes_size() const { return nodes_and_registration_.size(); }
165 
166   // Read only access to list of variable tensors.
execution_plan()167   std::vector<int>& execution_plan() { return execution_plan_; }
168 
169   // Read only access to list of variable tensors.
execution_plan()170   const std::vector<int>& execution_plan() const { return execution_plan_; }
171 
172   // Mutable form of tensors (TEMPORARY for refactor).
173   // TODO(b/119495520): remove when refactoring complete.
tensors()174   std::vector<TfLiteTensor>& tensors() { return tensors_; }
175   // Mutable form of tensors (TEMPORARY for refactor).
176   // TODO(b/119495520): remove when refactoring complete.
177   std::vector<std::pair<TfLiteNode, TfLiteRegistration>>&
nodes_and_registration()178   nodes_and_registration() {
179     return nodes_and_registration_;
180   }
181 
182   const std::vector<std::pair<TfLiteNode, TfLiteRegistration>>&
nodes_and_registration()183   nodes_and_registration() const {
184     return nodes_and_registration_;
185   }
186 
187   // Get a pointer to an operation and registration data structure if in bounds.
node_and_registration(int node_index)188   const std::pair<TfLiteNode, TfLiteRegistration>* node_and_registration(
189       int node_index) const {
190     if (node_index < 0 || static_cast<size_t>(node_index) >= nodes_size())
191       return nullptr;
192     return &nodes_and_registration_[node_index];
193   }
194 
195   // Change the dimensionality of a given tensor. Note, this is only acceptable
196   // for tensor indices that are inputs.
197   // Returns status of failure or success.
198   // TODO(aselle): Consider implementing ArraySlice equivalent to make this
199   //   more adept at accepting data without an extra copy. Use absl::ArraySlice
200   //   if our partners determine that dependency is acceptable.
201   TfLiteStatus ResizeInputTensor(int tensor_index,
202                                  const std::vector<int>& dims);
203 
204   // Update allocations for all tensors. This will redim dependent tensors using
205   // the input tensor dimensionality as given. This is relatively expensive.
206   // If you know that your sizes are not changing, you need not call this.
207   // Returns status of success or failure.
208   TfLiteStatus AllocateTensors();
209 
210   // Invoke the subgraph (run the whole graph in dependency order).
211   //
212   // NOTE: It is possible that the interpreter is not in a ready state
213   // to evaluate (i.e. if a ResizeTensor() has been performed without an
214   // AllocateTensors().
215   // Returns status of success or failure.
216   TfLiteStatus Invoke();
217 
218   // Entry point for C node plugin API to report an error.
219   void ReportError(const char* format, ...);
220 
221   void UseNNAPI(bool enable);
222 
223   // Return the subgraph specific context.
context()224   TfLiteContext* context() { return context_; }
225 
226   // Set the value of an external context.
227   void SetExternalContext(TfLiteExternalContextType type,
228                           TfLiteExternalContext* ctx);
229   // Get the half precision flag.
230   // WARNING: This is an experimental API and subject to change.
GetAllowFp16PrecisionForFp32()231   bool GetAllowFp16PrecisionForFp32() const {
232     return context_->allow_fp32_relax_to_fp16;
233   }
234 
235   // Sets the cancellation function pointer in order to cancel a request in the
236   // middle of a call to Invoke(). The interpreter queries this function during
237   // inference, between op invocations; when it returns true, the interpreter
238   // will abort execution and return `kTfLiteError`. The `data` parameter
239   // contains any data used by the cancellation function, and if non-null,
240   // remains owned by the caller.
241   // WARNING: This is an experimental API and subject to change.
242   void SetCancellationFunction(void* data, bool (*check_cancelled_func)(void*));
243 
244   // Ensure the data in `tensor.data` is readable. In case delegate is used,
245   // it might require to copy the data from delegate buffer to raw memory.
246   // WARNING: This is an experimental API and subject to change.
247   // TODO(b/119495520): make this private when refactoring complete.
EnsureTensorDataIsReadable(int tensor_index)248   TfLiteStatus EnsureTensorDataIsReadable(int tensor_index) {
249     TfLiteTensor* t = &tensors_[tensor_index];
250     TF_LITE_ENSURE(context_, t != nullptr);
251     if (t->data_is_stale) {
252       TF_LITE_ENSURE(context_, t->delegate != nullptr);
253       TF_LITE_ENSURE(context_, t->buffer_handle != kTfLiteNullBufferHandle);
254       TF_LITE_ENSURE(context_, t->delegate->CopyFromBufferHandle != nullptr);
255       // TODO(b/120420546): we must add a test that exercise this code.
256       TF_LITE_ENSURE_STATUS(t->delegate->CopyFromBufferHandle(
257           context_, t->delegate, t->buffer_handle, t));
258       t->data_is_stale = false;
259     }
260     return kTfLiteOk;
261   }
262 
263   // The default capacity of `tensors_` vector.
264   static constexpr int kTensorsReservedCapacity = 128;
265   // The capacity headroom of `tensors_` vector before calling ops'
266   // `prepare` and `invoke` function. In these functions, it's guaranteed
267   // allocating up to `kTensorsCapacityHeadroom` more tensors won't invalidate
268   // pointers to existing tensors.
269   static constexpr int kTensorsCapacityHeadroom = 16;
270 
271   // Reset all variable tensors to the default value.
272   // If a variable tensor doesn't have a buffer, reset it to zero.
273   // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it
274   // to the value of the buffer.
275   // WARNING: This is an experimental API and subject to change.
276   TfLiteStatus ResetVariableTensors();
277 
SetProfiler(profiling::Profiler * profiler)278   void SetProfiler(profiling::Profiler* profiler) {
279     profiler_ = profiler;
280     context_->profiler = profiler;
281   }
282 
GetProfiler()283   profiling::Profiler* GetProfiler() { return profiler_; }
284 
285   // Returns a pointer to vector of subgraphs.
286   // WARNING: This is an experimental API and subject to change.
GetSubgraphs()287   std::vector<std::unique_ptr<Subgraph>>* GetSubgraphs() { return subgraphs_; }
288 
289   // True if all tensors in the graph has static size after calling
290   // `AllocateTensors` function.
291   // Before `AllocateTensors` is called, this will always return true;
HasDynamicTensors()292   bool HasDynamicTensors() { return has_dynamic_tensors_; }
293 
294  private:
295   // Prevent 'context_' from accessing functions that are only available to
296   // delegated kernels.
297   void SwitchToKernelContext();
298 
299   // Add delegate-only functions to 'context_'.
300   void SwitchToDelegateContext();
301 
302   // Give 'op_reg' a chance to initialize itself using the contents of
303   // 'buffer'.
OpInit(const TfLiteRegistration & op_reg,const char * buffer,size_t length)304   void* OpInit(const TfLiteRegistration& op_reg, const char* buffer,
305                size_t length) {
306     if (op_reg.init == nullptr) return nullptr;
307     return op_reg.init(context_, buffer, length);
308   }
309 
310   // Let 'op_reg' release any memory it might have allocated via 'OpInit'.
OpFree(const TfLiteRegistration & op_reg,void * buffer)311   void OpFree(const TfLiteRegistration& op_reg, void* buffer) {
312     if (op_reg.free == nullptr) return;
313     if (buffer) {
314       op_reg.free(context_, buffer);
315     }
316   }
317 
318   // Prepare the given 'node' for execution.
OpPrepare(const TfLiteRegistration & op_reg,TfLiteNode * node)319   TfLiteStatus OpPrepare(const TfLiteRegistration& op_reg, TfLiteNode* node) {
320     if (op_reg.prepare == nullptr) return kTfLiteOk;
321     return op_reg.prepare(context_, node);
322   }
323 
324   // Invoke the operator represented by 'node'.
OpInvoke(const TfLiteRegistration & op_reg,TfLiteNode * node)325   TfLiteStatus OpInvoke(const TfLiteRegistration& op_reg, TfLiteNode* node) {
326     if (op_reg.invoke == nullptr) return kTfLiteError;
327     return op_reg.invoke(context_, node);
328   }
329 
330   // Call OpPrepare() for as many ops as possible, allocating memory for their
331   // tensors. If an op containing dynamic tensors is found, preparation will be
332   // postponed until this function is called again. This allows the interpreter
333   // to wait until Invoke() to resolve the sizes of dynamic tensors.
334   TfLiteStatus PrepareOpsAndTensors();
335 
336   // Call OpPrepare() for all ops starting at 'first_node'. Stop when a
337   // dynamic tensors is found or all ops have been prepared. Fill
338   // 'last_node_prepared' with the id of the op containing dynamic tensors, or
339   // the last in the graph.
340   TfLiteStatus PrepareOpsStartingAt(int first_execution_plan_index,
341                                     int* last_execution_plan_index_prepared);
342 
343   // Tensors needed by the interpreter. Use `AddTensors` to add more blank
344   // tensor entries. Note, `tensors_.data()` needs to be synchronized to the
345   // `context_` whenever this std::vector is reallocated. Currently this
346   // only happens in `AddTensors()`.
347   std::vector<TfLiteTensor> tensors_;
348 
349   // Check if an array of tensor indices are valid with respect to the Tensor
350   // array.
351   // NOTE: this changes consistent_ to be false if indices are out of bounds.
352   TfLiteStatus CheckTensorIndices(const char* label, const int* indices,
353                                   int length);
354 
355   // Compute the number of bytes required to represent a tensor with dimensions
356   // specified by the array dims (of length dims_size). Returns the status code
357   // and bytes.
358   TfLiteStatus BytesRequired(TfLiteType type, const int* dims, size_t dims_size,
359                              size_t* bytes);
360 
361   // Request an tensor be resized implementation. If the given tensor is of
362   // type kTfLiteDynamic it will also be allocated new memory.
363   TfLiteStatus ResizeTensorImpl(TfLiteTensor* tensor, TfLiteIntArray* new_size);
364 
365   // Report a detailed error string (will be printed to stderr).
366   // TODO(aselle): allow user of class to provide alternative destinations.
367   void ReportErrorImpl(const char* format, va_list args);
368 
369   // Entry point for C node plugin API to request an tensor be resized.
370   static TfLiteStatus ResizeTensor(TfLiteContext* context, TfLiteTensor* tensor,
371                                    TfLiteIntArray* new_size);
372   // Entry point for C node plugin API to report an error.
373   static void ReportErrorC(TfLiteContext* context, const char* format, ...);
374 
375   // Entry point for C node plugin API to add new tensors.
376   static TfLiteStatus AddTensors(TfLiteContext* context, int tensors_to_add,
377                                  int* first_new_tensor_index);
378 
379   // WARNING: This is an experimental API and subject to change.
380   // Entry point for C API ReplaceNodeSubsetsWithDelegateKernels
381   static TfLiteStatus ReplaceNodeSubsetsWithDelegateKernels(
382       TfLiteContext* context, TfLiteRegistration registration,
383       const TfLiteIntArray* nodes_to_replace, TfLiteDelegate* delegate);
384 
385   // Update the execution graph to replace some of the nodes with stub
386   // nodes. Specifically any node index that has `nodes[index]==1` will be
387   // slated for replacement with a delegate kernel specified by registration.
388   // Ownership of 'nodes_to_replace' and 'delegate' remains with the caller.
389   // WARNING: This is an experimental interface that is subject to change.
390   TfLiteStatus ReplaceNodeSubsetsWithDelegateKernels(
391       TfLiteRegistration registration, const TfLiteIntArray* nodes_to_replace,
392       TfLiteDelegate* delegate);
393 
394   // WARNING: This is an experimental interface that is subject to change.
395   // Gets the internal pointer to a TensorFlow lite node by node_index.
396   TfLiteStatus GetNodeAndRegistration(int node_index, TfLiteNode** node,
397                                       TfLiteRegistration** registration);
398 
399   // WARNING: This is an experimental interface that is subject to change.
400   // Entry point for C node plugin API to get a node by index.
401   static TfLiteStatus GetNodeAndRegistration(struct TfLiteContext*,
402                                              int node_index, TfLiteNode** node,
403                                              TfLiteRegistration** registration);
404 
405   // WARNING: This is an experimental interface that is subject to change.
406   // Gets an TfLiteIntArray* representing the execution plan. The interpreter
407   // owns this memory and it is only guaranteed to exist during the invocation
408   // of the delegate prepare.
409   TfLiteStatus GetExecutionPlan(TfLiteIntArray** execution_plan);
410 
411   // WARNING: This is an experimental interface that is subject to change.
412   // Entry point for C node plugin API to get the execution plan.
413   static TfLiteStatus GetExecutionPlan(struct TfLiteContext* context,
414                                        TfLiteIntArray** execution_plan);
415 
416   // Retrieve an existing external context by type.
417   TfLiteExternalContext* GetExternalContext(TfLiteExternalContextType type);
418   static TfLiteExternalContext* GetExternalContext(
419       struct TfLiteContext* context, TfLiteExternalContextType type);
420 
421   // Set the value of an external context.
422   static void SetExternalContext(struct TfLiteContext* context,
423                                  TfLiteExternalContextType type,
424                                  TfLiteExternalContext* ctx);
425 
426   // Allow a delegate to look at the graph and modify the graph to handle
427   // parts of the graph themselves. After this is called, the graph may
428   // contain new nodes that replace 1 more nodes.
429   // NOTE: If tensors were allocated prior to delegate application, they will
430   // be reallocated if the graph was modified (i.e., the caller does *not* need
431   // to explicitly call |AllocateTensors()| again). If tensors were unallocated,
432   // they will remain unallocated after delegate application.
433   // WARNING: This is an experimental API and subject to change.
434   TfLiteStatus ModifyGraphWithDelegate(TfLiteDelegate* delegate);
435 
436   // Ensures that `tensors_` has at least `kTensorsCapacityHeadroom` extra
437   // capacity. Calling this function may invalidate existing pointers to
438   // tensors. After calling this function, adding `kTensorsCapacityHeadroom`
439   // more tensors won't invalidate the pointer to existing tensors.
EnsureTensorsVectorCapacity()440   void EnsureTensorsVectorCapacity() {
441     const size_t required_capacity = tensors_.size() + kTensorsCapacityHeadroom;
442     if (required_capacity > tensors_.capacity()) {
443       tensors_.reserve(required_capacity);
444       context_->tensors = tensors_.data();
445     }
446   }
447 
448   // The state of the Interpreter.
449   enum State {
450     // The interpreter isn't ready to be invoked.
451     // `AllocateTensor` need to be called to enter an invokable state.
452     kStateUninvokable = 0,
453     // The interpreter is ready to be invoked.
454     kStateInvokable,
455     // The interpreter is ready to be invoked, and graph can't be further
456     // modified. The interpreter will enter this state when calling
457     // `ModifyGraphWithDelegate` with `allow_dynamic_tensors=false`.
458     kStateInvokableAndImmutable,
459   };
460   State state_ = kStateUninvokable;
461 
462   // A pure C data structure used to communicate with the pure C plugin
463   // interface. To avoid copying tensor metadata, this is also the definitive
464   // structure to store tensors.
465   // TODO(b/119495520): Get rid of owned and just make context_ a instance.
466   TfLiteContext owned_context_;
467   TfLiteContext* context_;
468 
469   // Node inputs/outputs are stored in TfLiteNode and TfLiteRegistration stores
470   // function pointers to actual implementation.
471   std::vector<std::pair<TfLiteNode, TfLiteRegistration>>
472       nodes_and_registration_;
473 
474   // Whether the model is consistent. That is to say if the inputs and outputs
475   // of every node and the global inputs and outputs are valid indexes into
476   // the tensor array.
477   bool consistent_ = true;
478 
479   // Array of indices representing the tensors that are inputs to the
480   // interpreter.
481   std::vector<int> inputs_;
482 
483   // Array of indices representing the tensors that are outputs to the
484   // interpreter.
485   std::vector<int> outputs_;
486 
487   // Array of indices representing the tensors that are variable tensors.
488   std::vector<int> variables_;
489 
490   // The error reporter delegate that tflite will forward queries errors to.
491   ErrorReporter* error_reporter_;
492 
493   // Index of the next node to prepare.
494   // During Invoke(), Interpreter will allocate input tensors first, which are
495   // known to be fixed size. Then it will allocate outputs from nodes as many
496   // as possible. When there is a node that produces dynamic sized tensor.
497   // Interpreter will stop allocating tensors, set the value of next allocate
498   // node id, and execute the node to generate the output tensor before continue
499   // to allocate successors. This process repeats until all nodes are executed.
500   // NOTE: this relies on the order of nodes that is in topological order.
501   int next_execution_plan_index_to_prepare_;
502 
503   // WARNING: This is an experimental interface that is subject to change.
504   // This is a list of node indices (to index into nodes_and_registration).
505   // This represents a valid topological sort (dependency ordered) execution
506   // plan. In particular, it is valid for this ordering to contain only a
507   // subset of the node indices.
508   std::vector<int> execution_plan_;
509 
510   // In the future, we'd like a TfLiteIntArray compatible representation.
511   // TODO(aselle): replace execution_plan_ with this.
512   std::unique_ptr<TfLiteIntArray, TfLiteIntArrayDeleter> plan_cache_;
513 
514   // Whether to delegate to NN API
515   std::unique_ptr<NNAPIDelegate> nnapi_delegate_;
516 
517   std::unique_ptr<MemoryPlanner> memory_planner_;
518 
519   // Tracking bit for whether a tensor was resized in the course of an op
520   // invocation. This is a useful hint to ensure that dynamic tensor outputs
521   // trigger downstream reallocation after op invocation.
522   bool tensor_resized_since_op_invoke_ = false;
523 
524   // External contexts (kTfLiteMaxExternalContexts).
525   TfLiteExternalContext** external_contexts_;
526 
527   // Profiler for this interpreter instance.
528   profiling::Profiler* profiler_ = nullptr;
529 
530   // A pointer to vector of subgraphs. The vector is owned by the interpreter.
531   std::vector<std::unique_ptr<Subgraph>>* subgraphs_ = nullptr;
532 
533   // True if all tensors in the graph has static size after calling
534   // `PrepareOpsStartingAt` function (which is called by the `AllocateTensors`
535   // public function).
536   // The value is invalid before `PrepareOpStartingAt` is called.
537   bool has_dynamic_tensors_ = true;
538 
539   // Reference to cancellation function that can cancel a request in the middle
540   // of a call to Invoke(). When this function returns True, a kTfLiteError is
541   // thrown by Invoke().
542   bool (*check_cancelled_func_)(void*) = nullptr;
543 
544   // Reference to data used by the cancellation function in
545   // `check_cancelled_func_`.
546   void* cancellation_data_ = nullptr;
547 };
548 
549 }  // namespace tflite
550 #endif  // TENSORFLOW_LITE_CORE_SUBGRAPH_H_
551