1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 // automatically generated by the FlatBuffers compiler, do not modify
16
17
18 #ifndef FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
19 #define FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
20
21 #include "flatbuffers/flatbuffers.h"
22
23 #include "tensorflow/lite/delegates/gpu/common/task/serialization_base_generated.h"
24
25 namespace tflite {
26 namespace gpu {
27 namespace cl {
28 namespace data {
29
30 struct TensorDescWithId;
31 struct TensorDescWithIdBuilder;
32
33 struct CLNode;
34 struct CLNodeBuilder;
35
36 struct PairOfValueIds;
37 struct PairOfValueIdsBuilder;
38
39 struct InferenceContext;
40 struct InferenceContextBuilder;
41
42 struct TensorDescWithId FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
43 typedef TensorDescWithIdBuilder Builder;
44 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
45 VT_DESC = 4,
46 VT_ID = 6
47 };
descFLATBUFFERS_FINAL_CLASS48 const tflite::gpu::data::TensorDescriptor *desc() const {
49 return GetPointer<const tflite::gpu::data::TensorDescriptor *>(VT_DESC);
50 }
idFLATBUFFERS_FINAL_CLASS51 int32_t id() const {
52 return GetField<int32_t>(VT_ID, 0);
53 }
VerifyFLATBUFFERS_FINAL_CLASS54 bool Verify(flatbuffers::Verifier &verifier) const {
55 return VerifyTableStart(verifier) &&
56 VerifyOffset(verifier, VT_DESC) &&
57 verifier.VerifyTable(desc()) &&
58 VerifyField<int32_t>(verifier, VT_ID) &&
59 verifier.EndTable();
60 }
61 };
62
63 struct TensorDescWithIdBuilder {
64 typedef TensorDescWithId Table;
65 flatbuffers::FlatBufferBuilder &fbb_;
66 flatbuffers::uoffset_t start_;
add_descTensorDescWithIdBuilder67 void add_desc(flatbuffers::Offset<tflite::gpu::data::TensorDescriptor> desc) {
68 fbb_.AddOffset(TensorDescWithId::VT_DESC, desc);
69 }
add_idTensorDescWithIdBuilder70 void add_id(int32_t id) {
71 fbb_.AddElement<int32_t>(TensorDescWithId::VT_ID, id, 0);
72 }
TensorDescWithIdBuilderTensorDescWithIdBuilder73 explicit TensorDescWithIdBuilder(flatbuffers::FlatBufferBuilder &_fbb)
74 : fbb_(_fbb) {
75 start_ = fbb_.StartTable();
76 }
FinishTensorDescWithIdBuilder77 flatbuffers::Offset<TensorDescWithId> Finish() {
78 const auto end = fbb_.EndTable(start_);
79 auto o = flatbuffers::Offset<TensorDescWithId>(end);
80 return o;
81 }
82 };
83
84 inline flatbuffers::Offset<TensorDescWithId> CreateTensorDescWithId(
85 flatbuffers::FlatBufferBuilder &_fbb,
86 flatbuffers::Offset<tflite::gpu::data::TensorDescriptor> desc = 0,
87 int32_t id = 0) {
88 TensorDescWithIdBuilder builder_(_fbb);
89 builder_.add_id(id);
90 builder_.add_desc(desc);
91 return builder_.Finish();
92 }
93
94 struct CLNode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
95 typedef CLNodeBuilder Builder;
96 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
97 VT_GPU_OP = 4,
98 VT_INPUT_IDS = 6,
99 VT_OUTPUT_IDS = 8,
100 VT_NAME = 10
101 };
gpu_opFLATBUFFERS_FINAL_CLASS102 const tflite::gpu::data::GPUOperation *gpu_op() const {
103 return GetPointer<const tflite::gpu::data::GPUOperation *>(VT_GPU_OP);
104 }
input_idsFLATBUFFERS_FINAL_CLASS105 const flatbuffers::Vector<int32_t> *input_ids() const {
106 return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUT_IDS);
107 }
output_idsFLATBUFFERS_FINAL_CLASS108 const flatbuffers::Vector<int32_t> *output_ids() const {
109 return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_IDS);
110 }
nameFLATBUFFERS_FINAL_CLASS111 const flatbuffers::String *name() const {
112 return GetPointer<const flatbuffers::String *>(VT_NAME);
113 }
VerifyFLATBUFFERS_FINAL_CLASS114 bool Verify(flatbuffers::Verifier &verifier) const {
115 return VerifyTableStart(verifier) &&
116 VerifyOffset(verifier, VT_GPU_OP) &&
117 verifier.VerifyTable(gpu_op()) &&
118 VerifyOffset(verifier, VT_INPUT_IDS) &&
119 verifier.VerifyVector(input_ids()) &&
120 VerifyOffset(verifier, VT_OUTPUT_IDS) &&
121 verifier.VerifyVector(output_ids()) &&
122 VerifyOffset(verifier, VT_NAME) &&
123 verifier.VerifyString(name()) &&
124 verifier.EndTable();
125 }
126 };
127
128 struct CLNodeBuilder {
129 typedef CLNode Table;
130 flatbuffers::FlatBufferBuilder &fbb_;
131 flatbuffers::uoffset_t start_;
add_gpu_opCLNodeBuilder132 void add_gpu_op(flatbuffers::Offset<tflite::gpu::data::GPUOperation> gpu_op) {
133 fbb_.AddOffset(CLNode::VT_GPU_OP, gpu_op);
134 }
add_input_idsCLNodeBuilder135 void add_input_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids) {
136 fbb_.AddOffset(CLNode::VT_INPUT_IDS, input_ids);
137 }
add_output_idsCLNodeBuilder138 void add_output_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids) {
139 fbb_.AddOffset(CLNode::VT_OUTPUT_IDS, output_ids);
140 }
add_nameCLNodeBuilder141 void add_name(flatbuffers::Offset<flatbuffers::String> name) {
142 fbb_.AddOffset(CLNode::VT_NAME, name);
143 }
CLNodeBuilderCLNodeBuilder144 explicit CLNodeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
145 : fbb_(_fbb) {
146 start_ = fbb_.StartTable();
147 }
FinishCLNodeBuilder148 flatbuffers::Offset<CLNode> Finish() {
149 const auto end = fbb_.EndTable(start_);
150 auto o = flatbuffers::Offset<CLNode>(end);
151 return o;
152 }
153 };
154
155 inline flatbuffers::Offset<CLNode> CreateCLNode(
156 flatbuffers::FlatBufferBuilder &_fbb,
157 flatbuffers::Offset<tflite::gpu::data::GPUOperation> gpu_op = 0,
158 flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids = 0,
159 flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids = 0,
160 flatbuffers::Offset<flatbuffers::String> name = 0) {
161 CLNodeBuilder builder_(_fbb);
162 builder_.add_name(name);
163 builder_.add_output_ids(output_ids);
164 builder_.add_input_ids(input_ids);
165 builder_.add_gpu_op(gpu_op);
166 return builder_.Finish();
167 }
168
169 inline flatbuffers::Offset<CLNode> CreateCLNodeDirect(
170 flatbuffers::FlatBufferBuilder &_fbb,
171 flatbuffers::Offset<tflite::gpu::data::GPUOperation> gpu_op = 0,
172 const std::vector<int32_t> *input_ids = nullptr,
173 const std::vector<int32_t> *output_ids = nullptr,
174 const char *name = nullptr) {
175 auto input_ids__ = input_ids ? _fbb.CreateVector<int32_t>(*input_ids) : 0;
176 auto output_ids__ = output_ids ? _fbb.CreateVector<int32_t>(*output_ids) : 0;
177 auto name__ = name ? _fbb.CreateString(name) : 0;
178 return tflite::gpu::cl::data::CreateCLNode(
179 _fbb,
180 gpu_op,
181 input_ids__,
182 output_ids__,
183 name__);
184 }
185
186 struct PairOfValueIds FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
187 typedef PairOfValueIdsBuilder Builder;
188 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
189 VT_FIRST = 4,
190 VT_SECOND = 6
191 };
firstFLATBUFFERS_FINAL_CLASS192 int32_t first() const {
193 return GetField<int32_t>(VT_FIRST, 0);
194 }
secondFLATBUFFERS_FINAL_CLASS195 int32_t second() const {
196 return GetField<int32_t>(VT_SECOND, 0);
197 }
VerifyFLATBUFFERS_FINAL_CLASS198 bool Verify(flatbuffers::Verifier &verifier) const {
199 return VerifyTableStart(verifier) &&
200 VerifyField<int32_t>(verifier, VT_FIRST) &&
201 VerifyField<int32_t>(verifier, VT_SECOND) &&
202 verifier.EndTable();
203 }
204 };
205
206 struct PairOfValueIdsBuilder {
207 typedef PairOfValueIds Table;
208 flatbuffers::FlatBufferBuilder &fbb_;
209 flatbuffers::uoffset_t start_;
add_firstPairOfValueIdsBuilder210 void add_first(int32_t first) {
211 fbb_.AddElement<int32_t>(PairOfValueIds::VT_FIRST, first, 0);
212 }
add_secondPairOfValueIdsBuilder213 void add_second(int32_t second) {
214 fbb_.AddElement<int32_t>(PairOfValueIds::VT_SECOND, second, 0);
215 }
PairOfValueIdsBuilderPairOfValueIdsBuilder216 explicit PairOfValueIdsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
217 : fbb_(_fbb) {
218 start_ = fbb_.StartTable();
219 }
FinishPairOfValueIdsBuilder220 flatbuffers::Offset<PairOfValueIds> Finish() {
221 const auto end = fbb_.EndTable(start_);
222 auto o = flatbuffers::Offset<PairOfValueIds>(end);
223 return o;
224 }
225 };
226
227 inline flatbuffers::Offset<PairOfValueIds> CreatePairOfValueIds(
228 flatbuffers::FlatBufferBuilder &_fbb,
229 int32_t first = 0,
230 int32_t second = 0) {
231 PairOfValueIdsBuilder builder_(_fbb);
232 builder_.add_second(second);
233 builder_.add_first(first);
234 return builder_.Finish();
235 }
236
237 struct InferenceContext FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
238 typedef InferenceContextBuilder Builder;
239 enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
240 VT_NEED_FLUSH = 4,
241 VT_FLUSH_PERIODICALLY = 6,
242 VT_FLUSH_PERIOD = 8,
243 VT_NEED_MANUAL_RELEASE = 10,
244 VT_PRECISION = 12,
245 VT_STORAGE_TYPE = 14,
246 VT_NODES = 16,
247 VT_TENSORS = 18,
248 VT_CONST_TENSORS = 20,
249 VT_INPUT_IDS = 22,
250 VT_VARIABLE_IDS_AND_REFS = 24,
251 VT_OUTPUT_IDS = 26,
252 VT_INPUT_REFS = 28,
253 VT_OUTPUT_REFS = 30
254 };
need_flushFLATBUFFERS_FINAL_CLASS255 bool need_flush() const {
256 return GetField<uint8_t>(VT_NEED_FLUSH, 0) != 0;
257 }
flush_periodicallyFLATBUFFERS_FINAL_CLASS258 bool flush_periodically() const {
259 return GetField<uint8_t>(VT_FLUSH_PERIODICALLY, 0) != 0;
260 }
flush_periodFLATBUFFERS_FINAL_CLASS261 int32_t flush_period() const {
262 return GetField<int32_t>(VT_FLUSH_PERIOD, 0);
263 }
need_manual_releaseFLATBUFFERS_FINAL_CLASS264 bool need_manual_release() const {
265 return GetField<uint8_t>(VT_NEED_MANUAL_RELEASE, 0) != 0;
266 }
precisionFLATBUFFERS_FINAL_CLASS267 tflite::gpu::data::CalculationsPrecision precision() const {
268 return static_cast<tflite::gpu::data::CalculationsPrecision>(
269 GetField<int8_t>(VT_PRECISION, 0));
270 }
storage_typeFLATBUFFERS_FINAL_CLASS271 tflite::gpu::data::TensorStorageType storage_type() const {
272 return static_cast<tflite::gpu::data::TensorStorageType>(GetField<int8_t>(VT_STORAGE_TYPE, 0));
273 }
nodesFLATBUFFERS_FINAL_CLASS274 const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>> *nodes() const {
275 return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>> *>(VT_NODES);
276 }
tensorsFLATBUFFERS_FINAL_CLASS277 const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *tensors() const {
278 return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *>(VT_TENSORS);
279 }
280 const flatbuffers::Vector<
281 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>
const_tensorsFLATBUFFERS_FINAL_CLASS282 *const_tensors() const {
283 return GetPointer<const flatbuffers::Vector<
284 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *>(
285 VT_CONST_TENSORS);
286 }
input_idsFLATBUFFERS_FINAL_CLASS287 const flatbuffers::Vector<int32_t> *input_ids() const {
288 return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUT_IDS);
289 }
variable_ids_and_refsFLATBUFFERS_FINAL_CLASS290 const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>> *variable_ids_and_refs() const {
291 return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>> *>(VT_VARIABLE_IDS_AND_REFS);
292 }
output_idsFLATBUFFERS_FINAL_CLASS293 const flatbuffers::Vector<int32_t> *output_ids() const {
294 return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_IDS);
295 }
input_refsFLATBUFFERS_FINAL_CLASS296 const flatbuffers::Vector<int64_t> *input_refs() const {
297 return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_INPUT_REFS);
298 }
output_refsFLATBUFFERS_FINAL_CLASS299 const flatbuffers::Vector<int64_t> *output_refs() const {
300 return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_OUTPUT_REFS);
301 }
VerifyFLATBUFFERS_FINAL_CLASS302 bool Verify(flatbuffers::Verifier &verifier) const {
303 return VerifyTableStart(verifier) &&
304 VerifyField<uint8_t>(verifier, VT_NEED_FLUSH) &&
305 VerifyField<uint8_t>(verifier, VT_FLUSH_PERIODICALLY) &&
306 VerifyField<int32_t>(verifier, VT_FLUSH_PERIOD) &&
307 VerifyField<uint8_t>(verifier, VT_NEED_MANUAL_RELEASE) &&
308 VerifyField<int8_t>(verifier, VT_PRECISION) &&
309 VerifyField<int8_t>(verifier, VT_STORAGE_TYPE) &&
310 VerifyOffset(verifier, VT_NODES) && verifier.VerifyVector(nodes()) &&
311 verifier.VerifyVectorOfTables(nodes()) &&
312 VerifyOffset(verifier, VT_TENSORS) &&
313 verifier.VerifyVector(tensors()) &&
314 verifier.VerifyVectorOfTables(tensors()) &&
315 VerifyOffset(verifier, VT_CONST_TENSORS) &&
316 verifier.VerifyVector(const_tensors()) &&
317 verifier.VerifyVectorOfTables(const_tensors()) &&
318 VerifyOffset(verifier, VT_INPUT_IDS) &&
319 verifier.VerifyVector(input_ids()) &&
320 VerifyOffset(verifier, VT_VARIABLE_IDS_AND_REFS) &&
321 verifier.VerifyVector(variable_ids_and_refs()) &&
322 verifier.VerifyVectorOfTables(variable_ids_and_refs()) &&
323 VerifyOffset(verifier, VT_OUTPUT_IDS) &&
324 verifier.VerifyVector(output_ids()) &&
325 VerifyOffset(verifier, VT_INPUT_REFS) &&
326 verifier.VerifyVector(input_refs()) &&
327 VerifyOffset(verifier, VT_OUTPUT_REFS) &&
328 verifier.VerifyVector(output_refs()) && verifier.EndTable();
329 }
330 };
331
332 struct InferenceContextBuilder {
333 typedef InferenceContext Table;
334 flatbuffers::FlatBufferBuilder &fbb_;
335 flatbuffers::uoffset_t start_;
add_need_flushInferenceContextBuilder336 void add_need_flush(bool need_flush) {
337 fbb_.AddElement<uint8_t>(InferenceContext::VT_NEED_FLUSH, static_cast<uint8_t>(need_flush), 0);
338 }
add_flush_periodicallyInferenceContextBuilder339 void add_flush_periodically(bool flush_periodically) {
340 fbb_.AddElement<uint8_t>(InferenceContext::VT_FLUSH_PERIODICALLY, static_cast<uint8_t>(flush_periodically), 0);
341 }
add_flush_periodInferenceContextBuilder342 void add_flush_period(int32_t flush_period) {
343 fbb_.AddElement<int32_t>(InferenceContext::VT_FLUSH_PERIOD, flush_period, 0);
344 }
add_need_manual_releaseInferenceContextBuilder345 void add_need_manual_release(bool need_manual_release) {
346 fbb_.AddElement<uint8_t>(InferenceContext::VT_NEED_MANUAL_RELEASE, static_cast<uint8_t>(need_manual_release), 0);
347 }
add_precisionInferenceContextBuilder348 void add_precision(tflite::gpu::data::CalculationsPrecision precision) {
349 fbb_.AddElement<int8_t>(InferenceContext::VT_PRECISION, static_cast<int8_t>(precision), 0);
350 }
add_storage_typeInferenceContextBuilder351 void add_storage_type(tflite::gpu::data::TensorStorageType storage_type) {
352 fbb_.AddElement<int8_t>(InferenceContext::VT_STORAGE_TYPE, static_cast<int8_t>(storage_type), 0);
353 }
add_nodesInferenceContextBuilder354 void add_nodes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>> nodes) {
355 fbb_.AddOffset(InferenceContext::VT_NODES, nodes);
356 }
add_tensorsInferenceContextBuilder357 void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>> tensors) {
358 fbb_.AddOffset(InferenceContext::VT_TENSORS, tensors);
359 }
add_const_tensorsInferenceContextBuilder360 void add_const_tensors(
361 flatbuffers::Offset<flatbuffers::Vector<
362 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>>
363 const_tensors) {
364 fbb_.AddOffset(InferenceContext::VT_CONST_TENSORS, const_tensors);
365 }
add_input_idsInferenceContextBuilder366 void add_input_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids) {
367 fbb_.AddOffset(InferenceContext::VT_INPUT_IDS, input_ids);
368 }
add_variable_ids_and_refsInferenceContextBuilder369 void add_variable_ids_and_refs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>> variable_ids_and_refs) {
370 fbb_.AddOffset(InferenceContext::VT_VARIABLE_IDS_AND_REFS, variable_ids_and_refs);
371 }
add_output_idsInferenceContextBuilder372 void add_output_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids) {
373 fbb_.AddOffset(InferenceContext::VT_OUTPUT_IDS, output_ids);
374 }
add_input_refsInferenceContextBuilder375 void add_input_refs(flatbuffers::Offset<flatbuffers::Vector<int64_t>> input_refs) {
376 fbb_.AddOffset(InferenceContext::VT_INPUT_REFS, input_refs);
377 }
add_output_refsInferenceContextBuilder378 void add_output_refs(flatbuffers::Offset<flatbuffers::Vector<int64_t>> output_refs) {
379 fbb_.AddOffset(InferenceContext::VT_OUTPUT_REFS, output_refs);
380 }
InferenceContextBuilderInferenceContextBuilder381 explicit InferenceContextBuilder(flatbuffers::FlatBufferBuilder &_fbb)
382 : fbb_(_fbb) {
383 start_ = fbb_.StartTable();
384 }
FinishInferenceContextBuilder385 flatbuffers::Offset<InferenceContext> Finish() {
386 const auto end = fbb_.EndTable(start_);
387 auto o = flatbuffers::Offset<InferenceContext>(end);
388 return o;
389 }
390 };
391
392 inline flatbuffers::Offset<InferenceContext> CreateInferenceContext(
393 flatbuffers::FlatBufferBuilder &_fbb, bool need_flush = false,
394 bool flush_periodically = false, int32_t flush_period = 0,
395 bool need_manual_release = false,
396 tflite::gpu::data::CalculationsPrecision precision =
397 tflite::gpu::data::CalculationsPrecision::F32,
398 tflite::gpu::data::TensorStorageType storage_type =
399 tflite::gpu::data::TensorStorageType::UNKNOWN,
400 flatbuffers::Offset<
401 flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>>
402 nodes = 0,
403 flatbuffers::Offset<flatbuffers::Vector<
404 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>>
405 tensors = 0,
406 flatbuffers::Offset<flatbuffers::Vector<
407 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>>
408 const_tensors = 0,
409 flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids = 0,
410 flatbuffers::Offset<flatbuffers::Vector<
411 flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>>
412 variable_ids_and_refs = 0,
413 flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids = 0,
414 flatbuffers::Offset<flatbuffers::Vector<int64_t>> input_refs = 0,
415 flatbuffers::Offset<flatbuffers::Vector<int64_t>> output_refs = 0) {
416 InferenceContextBuilder builder_(_fbb);
417 builder_.add_output_refs(output_refs);
418 builder_.add_input_refs(input_refs);
419 builder_.add_output_ids(output_ids);
420 builder_.add_variable_ids_and_refs(variable_ids_and_refs);
421 builder_.add_input_ids(input_ids);
422 builder_.add_const_tensors(const_tensors);
423 builder_.add_tensors(tensors);
424 builder_.add_nodes(nodes);
425 builder_.add_flush_period(flush_period);
426 builder_.add_storage_type(storage_type);
427 builder_.add_precision(precision);
428 builder_.add_need_manual_release(need_manual_release);
429 builder_.add_flush_periodically(flush_periodically);
430 builder_.add_need_flush(need_flush);
431 return builder_.Finish();
432 }
433
434 inline flatbuffers::Offset<InferenceContext> CreateInferenceContextDirect(
435 flatbuffers::FlatBufferBuilder &_fbb, bool need_flush = false,
436 bool flush_periodically = false, int32_t flush_period = 0,
437 bool need_manual_release = false,
438 tflite::gpu::data::CalculationsPrecision precision =
439 tflite::gpu::data::CalculationsPrecision::F32,
440 tflite::gpu::data::TensorStorageType storage_type =
441 tflite::gpu::data::TensorStorageType::UNKNOWN,
442 const std::vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>
443 *nodes = nullptr,
444 const std::vector<
445 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *tensors =
446 nullptr,
447 const std::vector<flatbuffers::Offset<
448 tflite::gpu::cl::data::TensorDescWithId>> *const_tensors = nullptr,
449 const std::vector<int32_t> *input_ids = nullptr,
450 const std::vector<
451 flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>
452 *variable_ids_and_refs = nullptr,
453 const std::vector<int32_t> *output_ids = nullptr,
454 const std::vector<int64_t> *input_refs = nullptr,
455 const std::vector<int64_t> *output_refs = nullptr) {
456 auto nodes__ = nodes ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>(*nodes) : 0;
457 auto tensors__ = tensors ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>(*tensors) : 0;
458 auto const_tensors__ =
459 const_tensors
460 ? _fbb.CreateVector<
461 flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>(
462 *const_tensors)
463 : 0;
464 auto input_ids__ = input_ids ? _fbb.CreateVector<int32_t>(*input_ids) : 0;
465 auto variable_ids_and_refs__ = variable_ids_and_refs ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>(*variable_ids_and_refs) : 0;
466 auto output_ids__ = output_ids ? _fbb.CreateVector<int32_t>(*output_ids) : 0;
467 auto input_refs__ = input_refs ? _fbb.CreateVector<int64_t>(*input_refs) : 0;
468 auto output_refs__ = output_refs ? _fbb.CreateVector<int64_t>(*output_refs) : 0;
469 return tflite::gpu::cl::data::CreateInferenceContext(
470 _fbb, need_flush, flush_periodically, flush_period, need_manual_release,
471 precision, storage_type, nodes__, tensors__, const_tensors__, input_ids__,
472 variable_ids_and_refs__, output_ids__, input_refs__, output_refs__);
473 }
474
GetInferenceContext(const void * buf)475 inline const tflite::gpu::cl::data::InferenceContext *GetInferenceContext(const void *buf) {
476 return flatbuffers::GetRoot<tflite::gpu::cl::data::InferenceContext>(buf);
477 }
478
GetSizePrefixedInferenceContext(const void * buf)479 inline const tflite::gpu::cl::data::InferenceContext *GetSizePrefixedInferenceContext(const void *buf) {
480 return flatbuffers::GetSizePrefixedRoot<tflite::gpu::cl::data::InferenceContext>(buf);
481 }
482
VerifyInferenceContextBuffer(flatbuffers::Verifier & verifier)483 inline bool VerifyInferenceContextBuffer(
484 flatbuffers::Verifier &verifier) {
485 return verifier.VerifyBuffer<tflite::gpu::cl::data::InferenceContext>(nullptr);
486 }
487
VerifySizePrefixedInferenceContextBuffer(flatbuffers::Verifier & verifier)488 inline bool VerifySizePrefixedInferenceContextBuffer(
489 flatbuffers::Verifier &verifier) {
490 return verifier.VerifySizePrefixedBuffer<tflite::gpu::cl::data::InferenceContext>(nullptr);
491 }
492
FinishInferenceContextBuffer(flatbuffers::FlatBufferBuilder & fbb,flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root)493 inline void FinishInferenceContextBuffer(
494 flatbuffers::FlatBufferBuilder &fbb,
495 flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root) {
496 fbb.Finish(root);
497 }
498
FinishSizePrefixedInferenceContextBuffer(flatbuffers::FlatBufferBuilder & fbb,flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root)499 inline void FinishSizePrefixedInferenceContextBuffer(
500 flatbuffers::FlatBufferBuilder &fbb,
501 flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root) {
502 fbb.FinishSizePrefixed(root);
503 }
504
505 } // namespace data
506 } // namespace cl
507 } // namespace gpu
508 } // namespace tflite
509
510 #endif // FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
511