• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_API_H_
17 #define TENSORFLOW_LITE_DELEGATES_GPU_GL_API_H_
18 
19 #include <cstdint>
20 #include <functional>
21 #include <memory>
22 #include <unordered_set>
23 #include <vector>
24 
25 #include "tensorflow/lite/delegates/gpu/common/model.h"
26 #include "tensorflow/lite/delegates/gpu/common/status.h"
27 #include "tensorflow/lite/delegates/gpu/gl/command_queue.h"
28 #include "tensorflow/lite/delegates/gpu/gl/compiler_options.h"
29 #include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
30 #include "tensorflow/lite/delegates/gpu/gl/object_manager.h"
31 #include "tensorflow/lite/delegates/gpu/gl/runtime_options.h"
32 #include "tensorflow/lite/delegates/gpu/gl/stats.h"
33 #include "tensorflow/lite/delegates/gpu/gl/workgroups/calculator.h"
34 
35 namespace tflite {
36 namespace gpu {
37 namespace gl {
38 
39 class InferenceContext;
40 
41 // Represents a model that was prepared for execution. It is stored in a format
42 // most suitable for execution and optionally may include pre-generated or
43 // pre-compiled GPU shaders or whatever is needed for efficient execution.
44 class CompiledModel {
45  public:
46   virtual ~CompiledModel() = default;
47 
48   virtual CompilerStats stats() const = 0;
49 
50   // Creates new inference context. Result can outlive @this.
51   //
52   // NewRun call as well as subsequent calls to InferenceContext methods should
53   // be done from the same EGL context.
54   virtual absl::Status NewRun(
55       const RuntimeOptions& options, const ObjectManager* objects,
56       CommandQueue* command_queue,
57       std::unique_ptr<InferenceContext>* inference_context) const = 0;
58 
59 #ifndef TFLITE_GPU_BINARY_RELEASE
60   // Serializes compiled model to a string.
61   // @return true if serialization finished successfully.
62   virtual absl::Status Serialize(
63       std::vector<uint8_t>* serialized_compiled_model) const = 0;
64 #endif  // TFLITE_GPU_BINARY_RELEASE
65 };
66 
67 // Turns the given model into "compiled" form that is suitable for inference.
68 absl::Status Compile(const CompilationOptions& options,
69                      const GraphFloat32& model,
70                      const std::unordered_set<int>& tflite_graph_io,  // NOLINT
71                      const NodeShader& node_shader,
72                      const WorkgroupsCalculator& workgroup_calculator,
73                      std::unique_ptr<CompiledModel>* compiled_model);
74 
75 #ifndef TFLITE_GPU_BINARY_RELEASE
76 // Reads serialized representation previously created with
77 // CompiledModel::Serialize call.
78 absl::Status ReadSerializedModel(
79     const std::vector<uint8_t>& serialized_model,
80     std::unique_ptr<CompiledModel>* compiled_model);
81 #endif  // TFLITE_GPU_BINARY_RELEASE
82 
83 // Encapsulates everything needed for one or more inference executions done
84 // sequentially.
85 //
86 // Thread-safe.
87 class InferenceContext {
88  public:
89   virtual ~InferenceContext() = default;
90 
91   virtual RuntimeStats stats() const = 0;
92 
93   // Executes inference.
94   virtual absl::Status Execute() = 0;
95 
96   // Asks context to reset it for another round. Keep in mind that does not
97   // affect inputs nor outputs which are not cleared, so it is possible to
98   // re-use them.
99   // It is an error to call Reset while previous run is still in progress.
100   virtual absl::Status Reset() = 0;
101 };
102 
103 }  // namespace gl
104 }  // namespace gpu
105 }  // namespace tflite
106 
107 #endif  // TENSORFLOW_LITE_DELEGATES_GPU_GL_API_H_
108