1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 // automatically generated by the FlatBuffers compiler, do not modify
16
17
18 #ifndef FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
19 #define FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
20
21 #include "flatbuffers/flatbuffers.h"
22
23 #include "tensorflow/lite/delegates/gpu/common/task/serialization_base_generated.h"
24
25 namespace tflite {
26 namespace gpu {
27 namespace cl {
28 namespace data {
29
30 struct TensorDescWithId;
31 struct TensorDescWithIdBuilder;
32
33 struct CLNode;
34 struct CLNodeBuilder;
35
36 struct PairOfValueIds;
37 struct PairOfValueIdsBuilder;
38
39 struct BinaryProgram;
40 struct BinaryProgramBuilder;
41
42 struct InferenceContext;
43 struct InferenceContextBuilder;
44
45 struct TensorDescWithId FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
46 typedef TensorDescWithIdBuilder Builder;
47 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
48 VT_DESC = 4,
49 VT_ID = 6
50 };
descFLATBUFFERS_FINAL_CLASS51 const tflite::gpu::data::TensorDescriptor *desc() const {
52 return GetPointer<const tflite::gpu::data::TensorDescriptor *>(VT_DESC);
53 }
idFLATBUFFERS_FINAL_CLASS54 int32_t id() const {
55 return GetField<int32_t>(VT_ID, 0);
56 }
VerifyFLATBUFFERS_FINAL_CLASS57 bool Verify(flatbuffers::Verifier &verifier) const {
58 return VerifyTableStart(verifier) &&
59 VerifyOffset(verifier, VT_DESC) &&
60 verifier.VerifyTable(desc()) &&
61 VerifyField<int32_t>(verifier, VT_ID) &&
62 verifier.EndTable();
63 }
64 };
65
66 struct TensorDescWithIdBuilder {
67 typedef TensorDescWithId Table;
68 flatbuffers::FlatBufferBuilder &fbb_;
69 flatbuffers::uoffset_t start_;
add_descTensorDescWithIdBuilder70 void add_desc(flatbuffers::Offset<tflite::gpu::data::TensorDescriptor> desc) {
71 fbb_.AddOffset(TensorDescWithId::VT_DESC, desc);
72 }
add_idTensorDescWithIdBuilder73 void add_id(int32_t id) {
74 fbb_.AddElement<int32_t>(TensorDescWithId::VT_ID, id, 0);
75 }
TensorDescWithIdBuilderTensorDescWithIdBuilder76 explicit TensorDescWithIdBuilder(flatbuffers::FlatBufferBuilder &_fbb)
77 : fbb_(_fbb) {
78 start_ = fbb_.StartTable();
79 }
FinishTensorDescWithIdBuilder80 flatbuffers::Offset<TensorDescWithId> Finish() {
81 const auto end = fbb_.EndTable(start_);
82 auto o = flatbuffers::Offset<TensorDescWithId>(end);
83 return o;
84 }
85 };
86
87 inline flatbuffers::Offset<TensorDescWithId> CreateTensorDescWithId(
88 flatbuffers::FlatBufferBuilder &_fbb,
89 flatbuffers::Offset<tflite::gpu::data::TensorDescriptor> desc = 0,
90 int32_t id = 0) {
91 TensorDescWithIdBuilder builder_(_fbb);
92 builder_.add_id(id);
93 builder_.add_desc(desc);
94 return builder_.Finish();
95 }
96
97 struct CLNode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
98 typedef CLNodeBuilder Builder;
99 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
100 VT_GPU_OP = 4,
101 VT_FINGERPRINT = 6,
102 VT_INPUT_IDS = 8,
103 VT_OUTPUT_IDS = 10,
104 VT_NAME = 12
105 };
gpu_opFLATBUFFERS_FINAL_CLASS106 const tflite::gpu::data::GPUOperation *gpu_op() const {
107 return GetPointer<const tflite::gpu::data::GPUOperation *>(VT_GPU_OP);
108 }
fingerprintFLATBUFFERS_FINAL_CLASS109 uint64_t fingerprint() const { return GetField<uint64_t>(VT_FINGERPRINT, 0); }
input_idsFLATBUFFERS_FINAL_CLASS110 const flatbuffers::Vector<int32_t> *input_ids() const {
111 return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUT_IDS);
112 }
output_idsFLATBUFFERS_FINAL_CLASS113 const flatbuffers::Vector<int32_t> *output_ids() const {
114 return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_IDS);
115 }
nameFLATBUFFERS_FINAL_CLASS116 const flatbuffers::String *name() const {
117 return GetPointer<const flatbuffers::String *>(VT_NAME);
118 }
VerifyFLATBUFFERS_FINAL_CLASS119 bool Verify(flatbuffers::Verifier &verifier) const {
120 return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_GPU_OP) &&
121 verifier.VerifyTable(gpu_op()) &&
122 VerifyField<uint64_t>(verifier, VT_FINGERPRINT) &&
123 VerifyOffset(verifier, VT_INPUT_IDS) &&
124 verifier.VerifyVector(input_ids()) &&
125 VerifyOffset(verifier, VT_OUTPUT_IDS) &&
126 verifier.VerifyVector(output_ids()) &&
127 VerifyOffset(verifier, VT_NAME) && verifier.VerifyString(name()) &&
128 verifier.EndTable();
129 }
130 };
131
132 struct CLNodeBuilder {
133 typedef CLNode Table;
134 flatbuffers::FlatBufferBuilder &fbb_;
135 flatbuffers::uoffset_t start_;
add_gpu_opCLNodeBuilder136 void add_gpu_op(flatbuffers::Offset<tflite::gpu::data::GPUOperation> gpu_op) {
137 fbb_.AddOffset(CLNode::VT_GPU_OP, gpu_op);
138 }
add_fingerprintCLNodeBuilder139 void add_fingerprint(uint64_t fingerprint) {
140 fbb_.AddElement<uint64_t>(CLNode::VT_FINGERPRINT, fingerprint, 0);
141 }
add_input_idsCLNodeBuilder142 void add_input_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids) {
143 fbb_.AddOffset(CLNode::VT_INPUT_IDS, input_ids);
144 }
add_output_idsCLNodeBuilder145 void add_output_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids) {
146 fbb_.AddOffset(CLNode::VT_OUTPUT_IDS, output_ids);
147 }
add_nameCLNodeBuilder148 void add_name(flatbuffers::Offset<flatbuffers::String> name) {
149 fbb_.AddOffset(CLNode::VT_NAME, name);
150 }
CLNodeBuilderCLNodeBuilder151 explicit CLNodeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
152 : fbb_(_fbb) {
153 start_ = fbb_.StartTable();
154 }
FinishCLNodeBuilder155 flatbuffers::Offset<CLNode> Finish() {
156 const auto end = fbb_.EndTable(start_);
157 auto o = flatbuffers::Offset<CLNode>(end);
158 return o;
159 }
160 };
161
162 inline flatbuffers::Offset<CLNode> CreateCLNode(
163 flatbuffers::FlatBufferBuilder &_fbb,
164 flatbuffers::Offset<tflite::gpu::data::GPUOperation> gpu_op = 0,
165 uint64_t fingerprint = 0,
166 flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids = 0,
167 flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids = 0,
168 flatbuffers::Offset<flatbuffers::String> name = 0) {
169 CLNodeBuilder builder_(_fbb);
170 builder_.add_fingerprint(fingerprint);
171 builder_.add_name(name);
172 builder_.add_output_ids(output_ids);
173 builder_.add_input_ids(input_ids);
174 builder_.add_gpu_op(gpu_op);
175 return builder_.Finish();
176 }
177
178 inline flatbuffers::Offset<CLNode> CreateCLNodeDirect(
179 flatbuffers::FlatBufferBuilder &_fbb,
180 flatbuffers::Offset<tflite::gpu::data::GPUOperation> gpu_op = 0,
181 uint64_t fingerprint = 0, const std::vector<int32_t> *input_ids = nullptr,
182 const std::vector<int32_t> *output_ids = nullptr,
183 const char *name = nullptr) {
184 auto input_ids__ = input_ids ? _fbb.CreateVector<int32_t>(*input_ids) : 0;
185 auto output_ids__ = output_ids ? _fbb.CreateVector<int32_t>(*output_ids) : 0;
186 auto name__ = name ? _fbb.CreateString(name) : 0;
187 return tflite::gpu::cl::data::CreateCLNode(_fbb, gpu_op, fingerprint,
188 input_ids__, output_ids__, name__);
189 }
190
191 struct PairOfValueIds FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
192 typedef PairOfValueIdsBuilder Builder;
193 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
194 VT_FIRST = 4,
195 VT_SECOND = 6
196 };
firstFLATBUFFERS_FINAL_CLASS197 int32_t first() const {
198 return GetField<int32_t>(VT_FIRST, 0);
199 }
secondFLATBUFFERS_FINAL_CLASS200 int32_t second() const {
201 return GetField<int32_t>(VT_SECOND, 0);
202 }
VerifyFLATBUFFERS_FINAL_CLASS203 bool Verify(flatbuffers::Verifier &verifier) const {
204 return VerifyTableStart(verifier) &&
205 VerifyField<int32_t>(verifier, VT_FIRST) &&
206 VerifyField<int32_t>(verifier, VT_SECOND) &&
207 verifier.EndTable();
208 }
209 };
210
211 struct PairOfValueIdsBuilder {
212 typedef PairOfValueIds Table;
213 flatbuffers::FlatBufferBuilder &fbb_;
214 flatbuffers::uoffset_t start_;
add_firstPairOfValueIdsBuilder215 void add_first(int32_t first) {
216 fbb_.AddElement<int32_t>(PairOfValueIds::VT_FIRST, first, 0);
217 }
add_secondPairOfValueIdsBuilder218 void add_second(int32_t second) {
219 fbb_.AddElement<int32_t>(PairOfValueIds::VT_SECOND, second, 0);
220 }
PairOfValueIdsBuilderPairOfValueIdsBuilder221 explicit PairOfValueIdsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
222 : fbb_(_fbb) {
223 start_ = fbb_.StartTable();
224 }
FinishPairOfValueIdsBuilder225 flatbuffers::Offset<PairOfValueIds> Finish() {
226 const auto end = fbb_.EndTable(start_);
227 auto o = flatbuffers::Offset<PairOfValueIds>(end);
228 return o;
229 }
230 };
231
232 inline flatbuffers::Offset<PairOfValueIds> CreatePairOfValueIds(
233 flatbuffers::FlatBufferBuilder &_fbb,
234 int32_t first = 0,
235 int32_t second = 0) {
236 PairOfValueIdsBuilder builder_(_fbb);
237 builder_.add_second(second);
238 builder_.add_first(first);
239 return builder_.Finish();
240 }
241
242 struct BinaryProgram FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
243 typedef BinaryProgramBuilder Builder;
244 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
245 VT_FINGERPRINT = 4,
246 VT_BINARY = 6
247 };
fingerprintFLATBUFFERS_FINAL_CLASS248 uint64_t fingerprint() const { return GetField<uint64_t>(VT_FINGERPRINT, 0); }
binaryFLATBUFFERS_FINAL_CLASS249 const flatbuffers::Vector<uint8_t> *binary() const {
250 return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_BINARY);
251 }
VerifyFLATBUFFERS_FINAL_CLASS252 bool Verify(flatbuffers::Verifier &verifier) const {
253 return VerifyTableStart(verifier) &&
254 VerifyField<uint64_t>(verifier, VT_FINGERPRINT) &&
255 VerifyOffset(verifier, VT_BINARY) &&
256 verifier.VerifyVector(binary()) && verifier.EndTable();
257 }
258 };
259
260 struct BinaryProgramBuilder {
261 typedef BinaryProgram Table;
262 flatbuffers::FlatBufferBuilder &fbb_;
263 flatbuffers::uoffset_t start_;
add_fingerprintBinaryProgramBuilder264 void add_fingerprint(uint64_t fingerprint) {
265 fbb_.AddElement<uint64_t>(BinaryProgram::VT_FINGERPRINT, fingerprint, 0);
266 }
add_binaryBinaryProgramBuilder267 void add_binary(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> binary) {
268 fbb_.AddOffset(BinaryProgram::VT_BINARY, binary);
269 }
BinaryProgramBuilderBinaryProgramBuilder270 explicit BinaryProgramBuilder(flatbuffers::FlatBufferBuilder &_fbb)
271 : fbb_(_fbb) {
272 start_ = fbb_.StartTable();
273 }
FinishBinaryProgramBuilder274 flatbuffers::Offset<BinaryProgram> Finish() {
275 const auto end = fbb_.EndTable(start_);
276 auto o = flatbuffers::Offset<BinaryProgram>(end);
277 return o;
278 }
279 };
280
281 inline flatbuffers::Offset<BinaryProgram> CreateBinaryProgram(
282 flatbuffers::FlatBufferBuilder &_fbb, uint64_t fingerprint = 0,
283 flatbuffers::Offset<flatbuffers::Vector<uint8_t>> binary = 0) {
284 BinaryProgramBuilder builder_(_fbb);
285 builder_.add_fingerprint(fingerprint);
286 builder_.add_binary(binary);
287 return builder_.Finish();
288 }
289
290 inline flatbuffers::Offset<BinaryProgram> CreateBinaryProgramDirect(
291 flatbuffers::FlatBufferBuilder &_fbb, uint64_t fingerprint = 0,
292 const std::vector<uint8_t> *binary = nullptr) {
293 auto binary__ = binary ? _fbb.CreateVector<uint8_t>(*binary) : 0;
294 return tflite::gpu::cl::data::CreateBinaryProgram(_fbb, fingerprint,
295 binary__);
296 }
297
298 struct InferenceContext FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
299 typedef InferenceContextBuilder Builder;
300 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
301 VT_DRIVER_VERSION = 4,
302 VT_BINARY_PROGRAMS = 6,
303 VT_NEED_FLUSH = 8,
304 VT_FLUSH_PERIODICALLY = 10,
305 VT_FLUSH_PERIOD = 12,
306 VT_NEED_MANUAL_RELEASE = 14,
307 VT_PRECISION = 16,
308 VT_STORAGE_TYPE = 18,
309 VT_NODES = 20,
310 VT_TENSORS = 22,
311 VT_CONST_TENSORS = 24,
312 VT_INPUT_IDS = 26,
313 VT_VARIABLE_IDS_AND_REFS = 28,
314 VT_OUTPUT_IDS = 30,
315 VT_INPUT_REFS = 32,
316 VT_OUTPUT_REFS = 34
317 };
driver_versionFLATBUFFERS_FINAL_CLASS318 const flatbuffers::String *driver_version() const {
319 return GetPointer<const flatbuffers::String *>(VT_DRIVER_VERSION);
320 }
321 const flatbuffers::Vector<
322 flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>>
binary_programsFLATBUFFERS_FINAL_CLASS323 *binary_programs() const {
324 return GetPointer<const flatbuffers::Vector<
325 flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>> *>(
326 VT_BINARY_PROGRAMS);
327 }
need_flushFLATBUFFERS_FINAL_CLASS328 bool need_flush() const {
329 return GetField<uint8_t>(VT_NEED_FLUSH, 0) != 0;
330 }
flush_periodicallyFLATBUFFERS_FINAL_CLASS331 bool flush_periodically() const {
332 return GetField<uint8_t>(VT_FLUSH_PERIODICALLY, 0) != 0;
333 }
flush_periodFLATBUFFERS_FINAL_CLASS334 int32_t flush_period() const {
335 return GetField<int32_t>(VT_FLUSH_PERIOD, 0);
336 }
need_manual_releaseFLATBUFFERS_FINAL_CLASS337 bool need_manual_release() const {
338 return GetField<uint8_t>(VT_NEED_MANUAL_RELEASE, 0) != 0;
339 }
precisionFLATBUFFERS_FINAL_CLASS340 tflite::gpu::data::CalculationsPrecision precision() const {
341 return static_cast<tflite::gpu::data::CalculationsPrecision>(
342 GetField<int8_t>(VT_PRECISION, 0));
343 }
storage_typeFLATBUFFERS_FINAL_CLASS344 tflite::gpu::data::TensorStorageType storage_type() const {
345 return static_cast<tflite::gpu::data::TensorStorageType>(GetField<int8_t>(VT_STORAGE_TYPE, 0));
346 }
nodesFLATBUFFERS_FINAL_CLASS347 const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>> *nodes() const {
348 return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>> *>(VT_NODES);
349 }
tensorsFLATBUFFERS_FINAL_CLASS350 const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *tensors() const {
351 return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *>(VT_TENSORS);
352 }
353 const flatbuffers::Vector<
354 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>
const_tensorsFLATBUFFERS_FINAL_CLASS355 *const_tensors() const {
356 return GetPointer<const flatbuffers::Vector<
357 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *>(
358 VT_CONST_TENSORS);
359 }
input_idsFLATBUFFERS_FINAL_CLASS360 const flatbuffers::Vector<int32_t> *input_ids() const {
361 return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUT_IDS);
362 }
variable_ids_and_refsFLATBUFFERS_FINAL_CLASS363 const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>> *variable_ids_and_refs() const {
364 return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>> *>(VT_VARIABLE_IDS_AND_REFS);
365 }
output_idsFLATBUFFERS_FINAL_CLASS366 const flatbuffers::Vector<int32_t> *output_ids() const {
367 return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_IDS);
368 }
input_refsFLATBUFFERS_FINAL_CLASS369 const flatbuffers::Vector<int64_t> *input_refs() const {
370 return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_INPUT_REFS);
371 }
output_refsFLATBUFFERS_FINAL_CLASS372 const flatbuffers::Vector<int64_t> *output_refs() const {
373 return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_OUTPUT_REFS);
374 }
VerifyFLATBUFFERS_FINAL_CLASS375 bool Verify(flatbuffers::Verifier &verifier) const {
376 return VerifyTableStart(verifier) &&
377 VerifyOffset(verifier, VT_DRIVER_VERSION) &&
378 verifier.VerifyString(driver_version()) &&
379 VerifyOffset(verifier, VT_BINARY_PROGRAMS) &&
380 verifier.VerifyVector(binary_programs()) &&
381 verifier.VerifyVectorOfTables(binary_programs()) &&
382 VerifyField<uint8_t>(verifier, VT_NEED_FLUSH) &&
383 VerifyField<uint8_t>(verifier, VT_FLUSH_PERIODICALLY) &&
384 VerifyField<int32_t>(verifier, VT_FLUSH_PERIOD) &&
385 VerifyField<uint8_t>(verifier, VT_NEED_MANUAL_RELEASE) &&
386 VerifyField<int8_t>(verifier, VT_PRECISION) &&
387 VerifyField<int8_t>(verifier, VT_STORAGE_TYPE) &&
388 VerifyOffset(verifier, VT_NODES) && verifier.VerifyVector(nodes()) &&
389 verifier.VerifyVectorOfTables(nodes()) &&
390 VerifyOffset(verifier, VT_TENSORS) &&
391 verifier.VerifyVector(tensors()) &&
392 verifier.VerifyVectorOfTables(tensors()) &&
393 VerifyOffset(verifier, VT_CONST_TENSORS) &&
394 verifier.VerifyVector(const_tensors()) &&
395 verifier.VerifyVectorOfTables(const_tensors()) &&
396 VerifyOffset(verifier, VT_INPUT_IDS) &&
397 verifier.VerifyVector(input_ids()) &&
398 VerifyOffset(verifier, VT_VARIABLE_IDS_AND_REFS) &&
399 verifier.VerifyVector(variable_ids_and_refs()) &&
400 verifier.VerifyVectorOfTables(variable_ids_and_refs()) &&
401 VerifyOffset(verifier, VT_OUTPUT_IDS) &&
402 verifier.VerifyVector(output_ids()) &&
403 VerifyOffset(verifier, VT_INPUT_REFS) &&
404 verifier.VerifyVector(input_refs()) &&
405 VerifyOffset(verifier, VT_OUTPUT_REFS) &&
406 verifier.VerifyVector(output_refs()) && verifier.EndTable();
407 }
408 };
409
410 struct InferenceContextBuilder {
411 typedef InferenceContext Table;
412 flatbuffers::FlatBufferBuilder &fbb_;
413 flatbuffers::uoffset_t start_;
add_driver_versionInferenceContextBuilder414 void add_driver_version(
415 flatbuffers::Offset<flatbuffers::String> driver_version) {
416 fbb_.AddOffset(InferenceContext::VT_DRIVER_VERSION, driver_version);
417 }
add_binary_programsInferenceContextBuilder418 void add_binary_programs(
419 flatbuffers::Offset<flatbuffers::Vector<
420 flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>>>
421 binary_programs) {
422 fbb_.AddOffset(InferenceContext::VT_BINARY_PROGRAMS, binary_programs);
423 }
add_need_flushInferenceContextBuilder424 void add_need_flush(bool need_flush) {
425 fbb_.AddElement<uint8_t>(InferenceContext::VT_NEED_FLUSH, static_cast<uint8_t>(need_flush), 0);
426 }
add_flush_periodicallyInferenceContextBuilder427 void add_flush_periodically(bool flush_periodically) {
428 fbb_.AddElement<uint8_t>(InferenceContext::VT_FLUSH_PERIODICALLY, static_cast<uint8_t>(flush_periodically), 0);
429 }
add_flush_periodInferenceContextBuilder430 void add_flush_period(int32_t flush_period) {
431 fbb_.AddElement<int32_t>(InferenceContext::VT_FLUSH_PERIOD, flush_period, 0);
432 }
add_need_manual_releaseInferenceContextBuilder433 void add_need_manual_release(bool need_manual_release) {
434 fbb_.AddElement<uint8_t>(InferenceContext::VT_NEED_MANUAL_RELEASE, static_cast<uint8_t>(need_manual_release), 0);
435 }
add_precisionInferenceContextBuilder436 void add_precision(tflite::gpu::data::CalculationsPrecision precision) {
437 fbb_.AddElement<int8_t>(InferenceContext::VT_PRECISION, static_cast<int8_t>(precision), 0);
438 }
add_storage_typeInferenceContextBuilder439 void add_storage_type(tflite::gpu::data::TensorStorageType storage_type) {
440 fbb_.AddElement<int8_t>(InferenceContext::VT_STORAGE_TYPE, static_cast<int8_t>(storage_type), 0);
441 }
add_nodesInferenceContextBuilder442 void add_nodes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>> nodes) {
443 fbb_.AddOffset(InferenceContext::VT_NODES, nodes);
444 }
add_tensorsInferenceContextBuilder445 void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>> tensors) {
446 fbb_.AddOffset(InferenceContext::VT_TENSORS, tensors);
447 }
add_const_tensorsInferenceContextBuilder448 void add_const_tensors(
449 flatbuffers::Offset<flatbuffers::Vector<
450 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>>
451 const_tensors) {
452 fbb_.AddOffset(InferenceContext::VT_CONST_TENSORS, const_tensors);
453 }
add_input_idsInferenceContextBuilder454 void add_input_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids) {
455 fbb_.AddOffset(InferenceContext::VT_INPUT_IDS, input_ids);
456 }
add_variable_ids_and_refsInferenceContextBuilder457 void add_variable_ids_and_refs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>> variable_ids_and_refs) {
458 fbb_.AddOffset(InferenceContext::VT_VARIABLE_IDS_AND_REFS, variable_ids_and_refs);
459 }
add_output_idsInferenceContextBuilder460 void add_output_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids) {
461 fbb_.AddOffset(InferenceContext::VT_OUTPUT_IDS, output_ids);
462 }
add_input_refsInferenceContextBuilder463 void add_input_refs(flatbuffers::Offset<flatbuffers::Vector<int64_t>> input_refs) {
464 fbb_.AddOffset(InferenceContext::VT_INPUT_REFS, input_refs);
465 }
add_output_refsInferenceContextBuilder466 void add_output_refs(flatbuffers::Offset<flatbuffers::Vector<int64_t>> output_refs) {
467 fbb_.AddOffset(InferenceContext::VT_OUTPUT_REFS, output_refs);
468 }
InferenceContextBuilderInferenceContextBuilder469 explicit InferenceContextBuilder(flatbuffers::FlatBufferBuilder &_fbb)
470 : fbb_(_fbb) {
471 start_ = fbb_.StartTable();
472 }
FinishInferenceContextBuilder473 flatbuffers::Offset<InferenceContext> Finish() {
474 const auto end = fbb_.EndTable(start_);
475 auto o = flatbuffers::Offset<InferenceContext>(end);
476 return o;
477 }
478 };
479
480 inline flatbuffers::Offset<InferenceContext> CreateInferenceContext(
481 flatbuffers::FlatBufferBuilder &_fbb,
482 flatbuffers::Offset<flatbuffers::String> driver_version = 0,
483 flatbuffers::Offset<flatbuffers::Vector<
484 flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>>>
485 binary_programs = 0,
486 bool need_flush = false, bool flush_periodically = false,
487 int32_t flush_period = 0, bool need_manual_release = false,
488 tflite::gpu::data::CalculationsPrecision precision =
489 tflite::gpu::data::CalculationsPrecision::F32,
490 tflite::gpu::data::TensorStorageType storage_type =
491 tflite::gpu::data::TensorStorageType::UNKNOWN,
492 flatbuffers::Offset<
493 flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>>
494 nodes = 0,
495 flatbuffers::Offset<flatbuffers::Vector<
496 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>>
497 tensors = 0,
498 flatbuffers::Offset<flatbuffers::Vector<
499 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>>
500 const_tensors = 0,
501 flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids = 0,
502 flatbuffers::Offset<flatbuffers::Vector<
503 flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>>
504 variable_ids_and_refs = 0,
505 flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids = 0,
506 flatbuffers::Offset<flatbuffers::Vector<int64_t>> input_refs = 0,
507 flatbuffers::Offset<flatbuffers::Vector<int64_t>> output_refs = 0) {
508 InferenceContextBuilder builder_(_fbb);
509 builder_.add_output_refs(output_refs);
510 builder_.add_input_refs(input_refs);
511 builder_.add_output_ids(output_ids);
512 builder_.add_variable_ids_and_refs(variable_ids_and_refs);
513 builder_.add_input_ids(input_ids);
514 builder_.add_const_tensors(const_tensors);
515 builder_.add_tensors(tensors);
516 builder_.add_nodes(nodes);
517 builder_.add_flush_period(flush_period);
518 builder_.add_binary_programs(binary_programs);
519 builder_.add_driver_version(driver_version);
520 builder_.add_storage_type(storage_type);
521 builder_.add_precision(precision);
522 builder_.add_need_manual_release(need_manual_release);
523 builder_.add_flush_periodically(flush_periodically);
524 builder_.add_need_flush(need_flush);
525 return builder_.Finish();
526 }
527
528 inline flatbuffers::Offset<InferenceContext> CreateInferenceContextDirect(
529 flatbuffers::FlatBufferBuilder &_fbb, const char *driver_version = nullptr,
530 const std::vector<flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>>
531 *binary_programs = nullptr,
532 bool need_flush = false, bool flush_periodically = false,
533 int32_t flush_period = 0, bool need_manual_release = false,
534 tflite::gpu::data::CalculationsPrecision precision =
535 tflite::gpu::data::CalculationsPrecision::F32,
536 tflite::gpu::data::TensorStorageType storage_type =
537 tflite::gpu::data::TensorStorageType::UNKNOWN,
538 const std::vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>
539 *nodes = nullptr,
540 const std::vector<
541 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *tensors =
542 nullptr,
543 const std::vector<flatbuffers::Offset<
544 tflite::gpu::cl::data::TensorDescWithId>> *const_tensors = nullptr,
545 const std::vector<int32_t> *input_ids = nullptr,
546 const std::vector<
547 flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>
548 *variable_ids_and_refs = nullptr,
549 const std::vector<int32_t> *output_ids = nullptr,
550 const std::vector<int64_t> *input_refs = nullptr,
551 const std::vector<int64_t> *output_refs = nullptr) {
552 auto driver_version__ =
553 driver_version ? _fbb.CreateString(driver_version) : 0;
554 auto binary_programs__ =
555 binary_programs
556 ? _fbb.CreateVector<
557 flatbuffers::Offset<tflite::gpu::cl::data::BinaryProgram>>(
558 *binary_programs)
559 : 0;
560 auto nodes__ = nodes ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>(*nodes) : 0;
561 auto tensors__ = tensors ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>(*tensors) : 0;
562 auto const_tensors__ =
563 const_tensors
564 ? _fbb.CreateVector<
565 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>(
566 *const_tensors)
567 : 0;
568 auto input_ids__ = input_ids ? _fbb.CreateVector<int32_t>(*input_ids) : 0;
569 auto variable_ids_and_refs__ = variable_ids_and_refs ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>(*variable_ids_and_refs) : 0;
570 auto output_ids__ = output_ids ? _fbb.CreateVector<int32_t>(*output_ids) : 0;
571 auto input_refs__ = input_refs ? _fbb.CreateVector<int64_t>(*input_refs) : 0;
572 auto output_refs__ = output_refs ? _fbb.CreateVector<int64_t>(*output_refs) : 0;
573 return tflite::gpu::cl::data::CreateInferenceContext(
574 _fbb, driver_version__, binary_programs__, need_flush, flush_periodically,
575 flush_period, need_manual_release, precision, storage_type, nodes__,
576 tensors__, const_tensors__, input_ids__, variable_ids_and_refs__,
577 output_ids__, input_refs__, output_refs__);
578 }
579
GetInferenceContext(const void * buf)580 inline const tflite::gpu::cl::data::InferenceContext *GetInferenceContext(const void *buf) {
581 return flatbuffers::GetRoot<tflite::gpu::cl::data::InferenceContext>(buf);
582 }
583
GetSizePrefixedInferenceContext(const void * buf)584 inline const tflite::gpu::cl::data::InferenceContext *GetSizePrefixedInferenceContext(const void *buf) {
585 return flatbuffers::GetSizePrefixedRoot<tflite::gpu::cl::data::InferenceContext>(buf);
586 }
587
VerifyInferenceContextBuffer(flatbuffers::Verifier & verifier)588 inline bool VerifyInferenceContextBuffer(
589 flatbuffers::Verifier &verifier) {
590 return verifier.VerifyBuffer<tflite::gpu::cl::data::InferenceContext>(nullptr);
591 }
592
VerifySizePrefixedInferenceContextBuffer(flatbuffers::Verifier & verifier)593 inline bool VerifySizePrefixedInferenceContextBuffer(
594 flatbuffers::Verifier &verifier) {
595 return verifier.VerifySizePrefixedBuffer<tflite::gpu::cl::data::InferenceContext>(nullptr);
596 }
597
FinishInferenceContextBuffer(flatbuffers::FlatBufferBuilder & fbb,flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root)598 inline void FinishInferenceContextBuffer(
599 flatbuffers::FlatBufferBuilder &fbb,
600 flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root) {
601 fbb.Finish(root);
602 }
603
FinishSizePrefixedInferenceContextBuffer(flatbuffers::FlatBufferBuilder & fbb,flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root)604 inline void FinishSizePrefixedInferenceContextBuffer(
605 flatbuffers::FlatBufferBuilder &fbb,
606 flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root) {
607 fbb.FinishSizePrefixed(root);
608 }
609
610 } // namespace data
611 } // namespace cl
612 } // namespace gpu
613 } // namespace tflite
614
615 #endif // FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
616