• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/gpu/cl/serialization.h"
17 
18 #include <cstdint>
19 
20 #include "tensorflow/lite/delegates/gpu/cl/gpu_object.h"
21 #include "tensorflow/lite/delegates/gpu/cl/inference_context.h"
22 #include "tensorflow/lite/delegates/gpu/cl/serialization_generated.h"
23 #include "tensorflow/lite/delegates/gpu/common/model.h"
24 #include "tensorflow/lite/delegates/gpu/common/precision.h"
25 #include "tensorflow/lite/delegates/gpu/common/task/arguments.h"
26 #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h"
27 #include "tensorflow/lite/delegates/gpu/common/task/gpu_object_desc.h"
28 #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
29 #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
30 #include "tensorflow/lite/delegates/gpu/common/task/tensor_linear_desc.h"
31 #include "tensorflow/lite/delegates/gpu/common/task/texture2d_desc.h"
32 
33 namespace tflite {
34 namespace gpu {
35 
36 namespace {
ToFB(AccessType type)37 data::AccessType ToFB(AccessType type) {
38   switch (type) {
39     case AccessType::READ:
40       return data::AccessType::READ;
41     case AccessType::WRITE:
42       return data::AccessType::WRITE;
43     case AccessType::READ_WRITE:
44       return data::AccessType::READ_WRITE;
45     default:
46       return data::AccessType::READ_WRITE;
47   }
48 }
49 
ToFB(DataType type)50 data::DataType ToFB(DataType type) {
51   switch (type) {
52     case DataType::FLOAT16:
53       return data::DataType::FLOAT16;
54     case DataType::FLOAT32:
55       return data::DataType::FLOAT32;
56     case DataType::FLOAT64:
57       return data::DataType::FLOAT64;
58     case DataType::UINT8:
59       return data::DataType::UINT8;
60     case DataType::INT8:
61       return data::DataType::INT8;
62     case DataType::UINT16:
63       return data::DataType::UINT16;
64     case DataType::INT16:
65       return data::DataType::INT16;
66     case DataType::UINT32:
67       return data::DataType::UINT32;
68     case DataType::INT32:
69       return data::DataType::INT32;
70     case DataType::UINT64:
71       return data::DataType::UINT64;
72     case DataType::INT64:
73       return data::DataType::INT64;
74     case DataType::UNKNOWN:
75       return data::DataType::UNKNOWN;
76   }
77 }
78 
ToFB(MemoryType type)79 data::MemoryType ToFB(MemoryType type) {
80   switch (type) {
81     case MemoryType::CONSTANT:
82       return data::MemoryType::CONSTANT;
83     case MemoryType::GLOBAL:
84       return data::MemoryType::GLOBAL;
85     case MemoryType::LOCAL:
86       return data::MemoryType::LOCAL;
87   }
88 }
89 
ToFB(LinearStorageType type)90 data::LinearStorageType ToFB(LinearStorageType type) {
91   switch (type) {
92     case LinearStorageType::BUFFER:
93       return data::LinearStorageType::BUFFER;
94     case LinearStorageType::TEXTURE_2D:
95       return data::LinearStorageType::TEXTURE_2D;
96   }
97 }
98 
ToFB(TensorStorageType type)99 data::TensorStorageType ToFB(TensorStorageType type) {
100   switch (type) {
101     case TensorStorageType::BUFFER:
102       return data::TensorStorageType::BUFFER;
103     case TensorStorageType::IMAGE_BUFFER:
104       return data::TensorStorageType::IMAGE_BUFFER;
105     case TensorStorageType::TEXTURE_2D:
106       return data::TensorStorageType::TEXTURE_2D;
107     case TensorStorageType::TEXTURE_ARRAY:
108       return data::TensorStorageType::TEXTURE_ARRAY;
109     case TensorStorageType::TEXTURE_3D:
110       return data::TensorStorageType::TEXTURE_3D;
111     case TensorStorageType::SINGLE_TEXTURE_2D:
112       return data::TensorStorageType::SINGLE_TEXTURE_2D;
113     case TensorStorageType::UNKNOWN:
114       return data::TensorStorageType::UNKNOWN;
115   }
116 }
117 
ToFB(Layout type)118 data::Layout ToFB(Layout type) {
119   switch (type) {
120     case Layout::HWC:
121       return data::Layout::HWC;
122     case Layout::BHWC:
123       return data::Layout::BHWC;
124     case Layout::HWDC:
125       return data::Layout::HWDC;
126     case Layout::BHWDC:
127       return data::Layout::BHWDC;
128     default:
129       return data::Layout::UNKNOWN;
130   }
131 }
132 
ToEnum(data::DataType type)133 DataType ToEnum(data::DataType type) {
134   switch (type) {
135     case data::DataType::FLOAT16:
136       return DataType::FLOAT16;
137     case data::DataType::FLOAT32:
138       return DataType::FLOAT32;
139     case data::DataType::FLOAT64:
140       return DataType::FLOAT64;
141     case data::DataType::UINT8:
142       return DataType::UINT8;
143     case data::DataType::INT8:
144       return DataType::INT8;
145     case data::DataType::UINT16:
146       return DataType::UINT16;
147     case data::DataType::INT16:
148       return DataType::INT16;
149     case data::DataType::UINT32:
150       return DataType::UINT32;
151     case data::DataType::INT32:
152       return DataType::INT32;
153     case data::DataType::UINT64:
154       return DataType::UINT64;
155     case data::DataType::INT64:
156       return DataType::INT64;
157     case data::DataType::UNKNOWN:
158       return DataType::UNKNOWN;
159   }
160 }
161 
ToEnum(data::AccessType type)162 AccessType ToEnum(data::AccessType type) {
163   switch (type) {
164     case data::AccessType::READ:
165       return AccessType::READ;
166     case data::AccessType::WRITE:
167       return AccessType::WRITE;
168     case data::AccessType::READ_WRITE:
169       return AccessType::READ_WRITE;
170   }
171 }
172 
ToEnum(data::MemoryType type)173 MemoryType ToEnum(data::MemoryType type) {
174   switch (type) {
175     case data::MemoryType::CONSTANT:
176       return MemoryType::CONSTANT;
177     case data::MemoryType::GLOBAL:
178       return MemoryType::GLOBAL;
179     case data::MemoryType::LOCAL:
180       return MemoryType::LOCAL;
181   }
182 }
183 
ToEnum(data::LinearStorageType type)184 LinearStorageType ToEnum(data::LinearStorageType type) {
185   switch (type) {
186     case data::LinearStorageType::BUFFER:
187       return LinearStorageType::BUFFER;
188     case data::LinearStorageType::TEXTURE_2D:
189       return LinearStorageType::TEXTURE_2D;
190   }
191 }
192 
ToEnum(data::TensorStorageType type)193 TensorStorageType ToEnum(data::TensorStorageType type) {
194   switch (type) {
195     case data::TensorStorageType::BUFFER:
196       return TensorStorageType::BUFFER;
197     case data::TensorStorageType::IMAGE_BUFFER:
198       return TensorStorageType::IMAGE_BUFFER;
199     case data::TensorStorageType::TEXTURE_2D:
200       return TensorStorageType::TEXTURE_2D;
201     case data::TensorStorageType::TEXTURE_ARRAY:
202       return TensorStorageType::TEXTURE_ARRAY;
203     case data::TensorStorageType::TEXTURE_3D:
204       return TensorStorageType::TEXTURE_3D;
205     case data::TensorStorageType::SINGLE_TEXTURE_2D:
206       return TensorStorageType::SINGLE_TEXTURE_2D;
207     case data::TensorStorageType::UNKNOWN:
208       return TensorStorageType::UNKNOWN;
209   }
210 }
211 
ToEnum(data::Layout type)212 Layout ToEnum(data::Layout type) {
213   switch (type) {
214     case data::Layout::HWC:
215       return Layout::HWC;
216     case data::Layout::BHWC:
217       return Layout::BHWC;
218     case data::Layout::HWDC:
219       return Layout::HWDC;
220     case data::Layout::BHWDC:
221       return Layout::BHWDC;
222     default:
223       return Layout::UNKNOWN;
224   }
225 }
226 
ToFB(CalculationsPrecision type)227 data::CalculationsPrecision ToFB(CalculationsPrecision type) {
228   switch (type) {
229     case CalculationsPrecision::F32:
230       return data::CalculationsPrecision::F32;
231     case CalculationsPrecision::F32_F16:
232       return data::CalculationsPrecision::F32_F16;
233     case CalculationsPrecision::F16:
234       return data::CalculationsPrecision::F16;
235   }
236 }
237 
ToFB(TensorToGrid type)238 data::TensorToGrid ToFB(TensorToGrid type) {
239   switch (type) {
240     case TensorToGrid::kCustom:
241       return data::TensorToGrid::CUSTOM;
242     case TensorToGrid::kWBToX_HDToY_SToZ:
243       return data::TensorToGrid::WB_TO_X_HD_TO_Y_S_TO_Z;
244     case TensorToGrid::kWBToX_HDToY_ZIs1:
245       return data::TensorToGrid::WB_TO_X_HD_TO_Y_Z_IS_1;
246     case TensorToGrid::kWBToX_HToY_DToZ:
247       return data::TensorToGrid::WB_TO_X_H_TO_Y_D_TO_Z;
248     case TensorToGrid::kBToX_YIs1_ZIs1:
249       return data::TensorToGrid::B_TO_X_Y_IS_1_Z_IS_1;
250   }
251 }
252 
ToFB(CompilerOptions type)253 data::CompilerOptions ToFB(CompilerOptions type) {
254   switch (type) {
255     case CompilerOptions::kAdrenoFullSimd:
256       return data::CompilerOptions::ADRENO_FULL_SIMD_LINE;
257     case CompilerOptions::kAdrenoMoreWaves:
258       return data::CompilerOptions::ADRENO_MORE_WAVES;
259     case CompilerOptions::kClPowervrFp16:
260       return data::CompilerOptions::POWERVR_FP16;
261     case CompilerOptions::kClDisableOptimizations:
262       return data::CompilerOptions::CL_OPT_DISABLE;
263     case CompilerOptions::kCl20:
264       return data::CompilerOptions::CL_2_0;
265     case CompilerOptions::kCl30:
266       return data::CompilerOptions::CL_3_0;
267   }
268 }
269 
ToEnum(data::CalculationsPrecision type)270 CalculationsPrecision ToEnum(data::CalculationsPrecision type) {
271   switch (type) {
272     case data::CalculationsPrecision::F32:
273       return CalculationsPrecision::F32;
274     case data::CalculationsPrecision::F32_F16:
275       return CalculationsPrecision::F32_F16;
276     case data::CalculationsPrecision::F16:
277       return CalculationsPrecision::F16;
278   }
279 }
280 
ToEnum(data::TensorToGrid type)281 TensorToGrid ToEnum(data::TensorToGrid type) {
282   switch (type) {
283     case data::TensorToGrid::CUSTOM:
284       return TensorToGrid::kCustom;
285     case data::TensorToGrid::WB_TO_X_HD_TO_Y_S_TO_Z:
286       return TensorToGrid::kWBToX_HDToY_SToZ;
287     case data::TensorToGrid::WB_TO_X_HD_TO_Y_Z_IS_1:
288       return TensorToGrid::kWBToX_HDToY_ZIs1;
289     case data::TensorToGrid::WB_TO_X_H_TO_Y_D_TO_Z:
290       return TensorToGrid::kWBToX_HToY_DToZ;
291     case data::TensorToGrid::B_TO_X_Y_IS_1_Z_IS_1:
292       return TensorToGrid::kBToX_YIs1_ZIs1;
293   }
294 }
295 
ToEnum(data::CompilerOptions type)296 CompilerOptions ToEnum(data::CompilerOptions type) {
297   switch (type) {
298     case data::CompilerOptions::ADRENO_FULL_SIMD_LINE:
299       return CompilerOptions::kAdrenoFullSimd;
300     case data::CompilerOptions::ADRENO_MORE_WAVES:
301       return CompilerOptions::kAdrenoMoreWaves;
302     case data::CompilerOptions::POWERVR_FP16:
303       return CompilerOptions::kClPowervrFp16;
304     case data::CompilerOptions::CL_OPT_DISABLE:
305       return CompilerOptions::kClDisableOptimizations;
306     case data::CompilerOptions::CL_2_0:
307       return CompilerOptions::kCl20;
308     case data::CompilerOptions::CL_3_0:
309       return CompilerOptions::kCl30;
310   }
311 }
312 
313 }  // namespace
314 
Encode(const int2 & v,flatbuffers::FlatBufferBuilder * builder)315 flatbuffers::Offset<data::Int2> Encode(
316     const int2& v, flatbuffers::FlatBufferBuilder* builder) {
317   data::Int2Builder int2_builder(*builder);
318   int2_builder.add_x(v.x);
319   int2_builder.add_y(v.y);
320   return int2_builder.Finish();
321 }
322 
Encode(const int3 & v,flatbuffers::FlatBufferBuilder * builder)323 flatbuffers::Offset<data::Int3> Encode(
324     const int3& v, flatbuffers::FlatBufferBuilder* builder) {
325   data::Int3Builder int3_builder(*builder);
326   int3_builder.add_x(v.x);
327   int3_builder.add_y(v.y);
328   int3_builder.add_z(v.z);
329   return int3_builder.Finish();
330 }
331 
Encode(const GPUObjectDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)332 flatbuffers::Offset<data::GPUObjectDescriptor> Encode(
333     const GPUObjectDescriptor& desc, flatbuffers::FlatBufferBuilder* builder) {
334   std::vector<flatbuffers::Offset<data::StateVariable>> state_vars_fb;
335   for (auto& v0 : desc.state_vars_) {
336     auto key_fb = builder->CreateString(v0.first);
337     auto value_fb = builder->CreateString(v0.second);
338     data::StateVariableBuilder state_builder(*builder);
339     state_builder.add_key(key_fb);
340     state_builder.add_value(value_fb);
341     state_vars_fb.push_back(state_builder.Finish());
342   }
343   auto state_vars_fb_vec = builder->CreateVector(state_vars_fb);
344   data::GPUObjectDescriptorBuilder obj_builder(*builder);
345   obj_builder.add_state_vars(state_vars_fb_vec);
346   obj_builder.add_access_type(ToFB(desc.access_type_));
347   return obj_builder.Finish();
348 }
349 
Decode(const data::GPUObjectDescriptor * fb_obj,GPUObjectDescriptor * obj)350 void Decode(const data::GPUObjectDescriptor* fb_obj, GPUObjectDescriptor* obj) {
351   obj->access_type_ = ToEnum(fb_obj->access_type());
352   for (auto state_fb : *fb_obj->state_vars()) {
353     std::string key(state_fb->key()->c_str(), state_fb->key()->size());
354     std::string value(state_fb->value()->c_str(), state_fb->value()->size());
355     obj->state_vars_[key] = value;
356   }
357 }
358 
Encode(const BufferDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)359 flatbuffers::Offset<data::BufferDescriptor> Encode(
360     const BufferDescriptor& desc, flatbuffers::FlatBufferBuilder* builder) {
361   auto obj_fb =
362       Encode(*static_cast<const GPUObjectDescriptor*>(&desc), builder);
363 
364   std::vector<flatbuffers::Offset<flatbuffers::String>> attributes_fb;
365   for (auto& attr : desc.attributes) {
366     attributes_fb.push_back(builder->CreateString(attr));
367   }
368   auto attributes_fb_vec = builder->CreateVector(attributes_fb);
369   auto data_fb = builder->CreateVector(desc.data);
370   data::BufferDescriptorBuilder buf_builder(*builder);
371   buf_builder.add_base_obj(obj_fb);
372   buf_builder.add_element_type(ToFB(desc.element_type));
373   buf_builder.add_element_size(desc.element_size);
374   buf_builder.add_memory_type(ToFB(desc.memory_type));
375   buf_builder.add_attributes(attributes_fb_vec);
376   buf_builder.add_size(desc.size);
377   buf_builder.add_data(data_fb);
378   return buf_builder.Finish();
379 }
380 
Decode(const data::BufferDescriptor * fb_desc,BufferDescriptor * desc)381 void Decode(const data::BufferDescriptor* fb_desc, BufferDescriptor* desc) {
382   Decode(fb_desc->base_obj(), desc);
383   desc->element_type = ToEnum(fb_desc->element_type());
384   desc->element_size = fb_desc->element_size();
385   desc->memory_type = ToEnum(fb_desc->memory_type());
386   for (auto attr_fb : *fb_desc->attributes()) {
387     std::string attr(attr_fb->c_str(), attr_fb->size());
388     desc->attributes.push_back(attr);
389   }
390   desc->size = fb_desc->size();
391   desc->data =
392       std::vector<uint8_t>(fb_desc->data()->data(),
393                            fb_desc->data()->data() + fb_desc->data()->size());
394 }
395 
Encode(const Texture2DDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)396 flatbuffers::Offset<data::Texture2DDescriptor> Encode(
397     const Texture2DDescriptor& desc, flatbuffers::FlatBufferBuilder* builder) {
398   auto obj_fb =
399       Encode(*static_cast<const GPUObjectDescriptor*>(&desc), builder);
400 
401   auto data_fb = builder->CreateVector(desc.data);
402   auto size_fb = Encode(desc.size, builder);
403   data::Texture2DDescriptorBuilder tex_builder(*builder);
404   tex_builder.add_base_obj(obj_fb);
405   tex_builder.add_element_type(ToFB(desc.element_type));
406   tex_builder.add_normalized(desc.normalized);
407   tex_builder.add_normalized_type(ToFB(desc.normalized_type));
408   tex_builder.add_size(size_fb);
409   tex_builder.add_data(data_fb);
410   return tex_builder.Finish();
411 }
412 
Decode(const data::Texture2DDescriptor * fb_desc,Texture2DDescriptor * desc)413 void Decode(const data::Texture2DDescriptor* fb_desc,
414             Texture2DDescriptor* desc) {
415   Decode(fb_desc->base_obj(), desc);
416   desc->element_type = ToEnum(fb_desc->element_type());
417   desc->normalized = fb_desc->normalized();
418   desc->normalized_type = ToEnum(fb_desc->normalized_type());
419   desc->size.x = fb_desc->size()->x();
420   desc->size.y = fb_desc->size()->y();
421   desc->data =
422       std::vector<uint8_t>(fb_desc->data()->data(),
423                            fb_desc->data()->data() + fb_desc->data()->size());
424 }
425 
Encode(const TensorLinearDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)426 flatbuffers::Offset<data::TensorLinearDescriptor> Encode(
427     const TensorLinearDescriptor& desc,
428     flatbuffers::FlatBufferBuilder* builder) {
429   auto obj_fb =
430       Encode(*static_cast<const GPUObjectDescriptor*>(&desc), builder);
431 
432   auto data_fb = builder->CreateVector(desc.data);
433   data::TensorLinearDescriptorBuilder tensor_builder(*builder);
434   tensor_builder.add_base_obj(obj_fb);
435   tensor_builder.add_element_type(ToFB(desc.element_type));
436   tensor_builder.add_storage_type(ToFB(desc.storage_type));
437   tensor_builder.add_memory_type(ToFB(desc.memory_type));
438   tensor_builder.add_size(desc.size);
439   tensor_builder.add_data(data_fb);
440   return tensor_builder.Finish();
441 }
442 
Decode(const data::TensorLinearDescriptor * fb_desc,TensorLinearDescriptor * desc)443 void Decode(const data::TensorLinearDescriptor* fb_desc,
444             TensorLinearDescriptor* desc) {
445   Decode(fb_desc->base_obj(), desc);
446   desc->element_type = ToEnum(fb_desc->element_type());
447   desc->storage_type = ToEnum(fb_desc->storage_type());
448   desc->memory_type = ToEnum(fb_desc->memory_type());
449   desc->size = fb_desc->size();
450   desc->data =
451       std::vector<uint8_t>(fb_desc->data()->data(),
452                            fb_desc->data()->data() + fb_desc->data()->size());
453 }
454 
Encode(const TensorDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)455 flatbuffers::Offset<data::TensorDescriptor> Encode(
456     const TensorDescriptor& desc, flatbuffers::FlatBufferBuilder* builder) {
457   auto obj_fb =
458       Encode(*static_cast<const GPUObjectDescriptor*>(&desc), builder);
459 
460   data::BHWDCBuilder shape_builder(*builder);
461   shape_builder.add_b(desc.shape.b);
462   shape_builder.add_h(desc.shape.h);
463   shape_builder.add_w(desc.shape.w);
464   shape_builder.add_d(desc.shape.d);
465   shape_builder.add_c(desc.shape.c);
466   auto shape_fb = shape_builder.Finish();
467 
468   auto data_fb = builder->CreateVector(desc.data);
469   data::TensorDescriptorBuilder tensor_builder(*builder);
470   tensor_builder.add_base_obj(obj_fb);
471   tensor_builder.add_data_type(ToFB(desc.data_type));
472   tensor_builder.add_storage_type(ToFB(desc.storage_type));
473   tensor_builder.add_layout(ToFB(desc.layout));
474   tensor_builder.add_shape(shape_fb);
475   tensor_builder.add_data(data_fb);
476   return tensor_builder.Finish();
477 }
478 
Decode(const data::TensorDescriptor * fb_desc,TensorDescriptor * desc)479 void Decode(const data::TensorDescriptor* fb_desc, TensorDescriptor* desc) {
480   Decode(fb_desc->base_obj(), desc);
481   desc->data_type = ToEnum(fb_desc->data_type());
482   desc->storage_type = ToEnum(fb_desc->storage_type());
483   desc->layout = ToEnum(fb_desc->layout());
484   desc->shape.b = fb_desc->shape()->b();
485   desc->shape.h = fb_desc->shape()->h();
486   desc->shape.w = fb_desc->shape()->w();
487   desc->shape.d = fb_desc->shape()->d();
488   desc->shape.c = fb_desc->shape()->c();
489   desc->data =
490       std::vector<uint8_t>(fb_desc->data()->data(),
491                            fb_desc->data()->data() + fb_desc->data()->size());
492 }
493 
Decode(const data::Arguments * fb_args,Arguments * args)494 absl::Status Decode(const data::Arguments* fb_args, Arguments* args) {
495   args->int_values_.clear();
496   for (auto int_values_fb : *fb_args->int_values()) {
497     Arguments::IntValue value;
498     value.value = int_values_fb->value();
499     value.active = int_values_fb->active();
500     std::string name(int_values_fb->name()->c_str(),
501                      int_values_fb->name()->size());
502     args->int_values_[name] = value;
503   }
504 
505   args->float_values_.clear();
506   for (auto float_values_fb : *fb_args->float_values()) {
507     Arguments::FloatValue value;
508     value.value = float_values_fb->value();
509     value.active = float_values_fb->active();
510     std::string name(float_values_fb->name()->c_str(),
511                      float_values_fb->name()->size());
512     args->float_values_[name] = value;
513   }
514 
515   args->half_values_.clear();
516   for (auto half_values_fb : *fb_args->half_values()) {
517     Arguments::HalfValue value;
518     value.value = half_values_fb->value();
519     value.active = half_values_fb->active();
520     std::string name(half_values_fb->name()->c_str(),
521                      half_values_fb->name()->size());
522     args->half_values_[name] = value;
523   }
524 
525   for (auto buffer_pair_fb : *fb_args->buffer_objects()) {
526     std::string key(buffer_pair_fb->key()->c_str(),
527                     buffer_pair_fb->key()->size());
528     BufferDescriptor desc;
529     Decode(buffer_pair_fb->value(), &desc);
530     args->AddObject(key, absl::make_unique<BufferDescriptor>(std::move(desc)));
531   }
532 
533   for (auto texture_pair_fb : *fb_args->texture2d_objects()) {
534     std::string key(texture_pair_fb->key()->c_str(),
535                     texture_pair_fb->key()->size());
536     Texture2DDescriptor desc;
537     Decode(texture_pair_fb->value(), &desc);
538     args->AddObject(key,
539                     absl::make_unique<Texture2DDescriptor>(std::move(desc)));
540   }
541 
542   for (auto tensor_pair_fb : *fb_args->tensor_linear_objects()) {
543     std::string key(tensor_pair_fb->key()->c_str(),
544                     tensor_pair_fb->key()->size());
545     TensorLinearDescriptor desc;
546     Decode(tensor_pair_fb->value(), &desc);
547     args->AddObject(key,
548                     absl::make_unique<TensorLinearDescriptor>(std::move(desc)));
549   }
550 
551   for (auto tensor_pair_fb : *fb_args->tensor_objects()) {
552     std::string key(tensor_pair_fb->key()->c_str(),
553                     tensor_pair_fb->key()->size());
554     TensorDescriptor desc;
555     Decode(tensor_pair_fb->value(), &desc);
556     args->AddObject(key, absl::make_unique<TensorDescriptor>(std::move(desc)));
557   }
558 
559   for (auto buffer_pair_fb : *fb_args->buffer_refs()) {
560     std::string key(buffer_pair_fb->key()->c_str(),
561                     buffer_pair_fb->key()->size());
562     BufferDescriptor desc;
563     Decode(buffer_pair_fb->value(), &desc);
564     auto access_type = desc.GetAccess();
565     args->AddObjectRef(key, access_type,
566                        absl::make_unique<BufferDescriptor>(std::move(desc)));
567   }
568 
569   for (auto texture_pair_fb : *fb_args->texture2d_refs()) {
570     std::string key(texture_pair_fb->key()->c_str(),
571                     texture_pair_fb->key()->size());
572     Texture2DDescriptor desc;
573     Decode(texture_pair_fb->value(), &desc);
574     auto access_type = desc.GetAccess();
575     args->AddObjectRef(key, access_type,
576                        absl::make_unique<Texture2DDescriptor>(std::move(desc)));
577   }
578 
579   for (auto tensor_pair_fb : *fb_args->tensor_linear_refs()) {
580     std::string key(tensor_pair_fb->key()->c_str(),
581                     tensor_pair_fb->key()->size());
582     TensorLinearDescriptor desc;
583     Decode(tensor_pair_fb->value(), &desc);
584     auto access_type = desc.GetAccess();
585     args->AddObjectRef(
586         key, access_type,
587         absl::make_unique<TensorLinearDescriptor>(std::move(desc)));
588   }
589 
590   for (auto tensor_pair_fb : *fb_args->tensor_refs()) {
591     std::string key(tensor_pair_fb->key()->c_str(),
592                     tensor_pair_fb->key()->size());
593     TensorDescriptor desc;
594     Decode(tensor_pair_fb->value(), &desc);
595     auto access_type = desc.GetAccess();
596     args->AddObjectRef(key, access_type,
597                        absl::make_unique<TensorDescriptor>(std::move(desc)));
598   }
599   return absl::OkStatus();
600 }
601 
Encode(const Arguments & args,flatbuffers::FlatBufferBuilder * builder)602 flatbuffers::Offset<data::Arguments> Encode(
603     const Arguments& args, flatbuffers::FlatBufferBuilder* builder) {
604   std::vector<flatbuffers::Offset<data::IntValue>> int_values_fb;
605   for (auto& value : args.int_values_) {
606     auto name_fb = builder->CreateString(value.first);
607     data::IntValueBuilder value_builder(*builder);
608     value_builder.add_name(name_fb);
609     value_builder.add_value(value.second.value);
610     value_builder.add_active(value.second.active);
611     int_values_fb.push_back(value_builder.Finish());
612   }
613 
614   std::vector<flatbuffers::Offset<data::FloatValue>> float_values_fb;
615   for (auto& value : args.float_values_) {
616     auto name_fb = builder->CreateString(value.first);
617     data::FloatValueBuilder value_builder(*builder);
618     value_builder.add_name(name_fb);
619     value_builder.add_value(value.second.value);
620     value_builder.add_active(value.second.active);
621     float_values_fb.push_back(value_builder.Finish());
622   }
623 
624   std::vector<flatbuffers::Offset<data::HalfValue>> half_values_fb;
625   for (auto& value : args.half_values_) {
626     auto name_fb = builder->CreateString(value.first);
627     data::HalfValueBuilder value_builder(*builder);
628     value_builder.add_name(name_fb);
629     value_builder.add_value(value.second.value);
630     value_builder.add_active(value.second.active);
631     half_values_fb.push_back(value_builder.Finish());
632   }
633 
634   std::vector<flatbuffers::Offset<data::BufferDescriptorMapValue>>
635       buffer_objs_fb;
636   for (auto& value : args.objects_) {
637     const auto* buffer_desc =
638         dynamic_cast<const BufferDescriptor*>(value.second.get());
639     if (!buffer_desc) continue;
640     auto desc_fb = Encode(*buffer_desc, builder);
641     auto key_fb = builder->CreateString(value.first);
642     data::BufferDescriptorMapValueBuilder buf_map_builder(*builder);
643     buf_map_builder.add_key(key_fb);
644     buf_map_builder.add_value(desc_fb);
645     buffer_objs_fb.push_back(buf_map_builder.Finish());
646   }
647   std::vector<flatbuffers::Offset<data::Texture2DDescriptorMapValue>>
648       texture2d_objs_fb;
649   for (auto& value : args.objects_) {
650     const auto* texture_desc =
651         dynamic_cast<const Texture2DDescriptor*>(value.second.get());
652     if (!texture_desc) continue;
653     auto desc_fb = Encode(*texture_desc, builder);
654     auto key_fb = builder->CreateString(value.first);
655     data::Texture2DDescriptorMapValueBuilder tex_map_builder(*builder);
656     tex_map_builder.add_key(key_fb);
657     tex_map_builder.add_value(desc_fb);
658     texture2d_objs_fb.push_back(tex_map_builder.Finish());
659   }
660   std::vector<flatbuffers::Offset<data::TensorLinearDescriptorMapValue>>
661       tensor_linear_objs_fb;
662   for (auto& value : args.objects_) {
663     const auto* tensor_desc =
664         dynamic_cast<const TensorLinearDescriptor*>(value.second.get());
665     if (!tensor_desc) continue;
666     auto desc_fb = Encode(*tensor_desc, builder);
667     auto key_fb = builder->CreateString(value.first);
668     data::TensorLinearDescriptorMapValueBuilder ten_map_builder(*builder);
669     ten_map_builder.add_key(key_fb);
670     ten_map_builder.add_value(desc_fb);
671     tensor_linear_objs_fb.push_back(ten_map_builder.Finish());
672   }
673   std::vector<flatbuffers::Offset<data::TensorDescriptorMapValue>>
674       tensor_objs_fb;
675   for (auto& value : args.objects_) {
676     const auto* tensor_desc =
677         dynamic_cast<const TensorDescriptor*>(value.second.get());
678     if (!tensor_desc) continue;
679     auto desc_fb = Encode(*tensor_desc, builder);
680     auto key_fb = builder->CreateString(value.first);
681     data::TensorDescriptorMapValueBuilder ten_map_builder(*builder);
682     ten_map_builder.add_key(key_fb);
683     ten_map_builder.add_value(desc_fb);
684     tensor_objs_fb.push_back(ten_map_builder.Finish());
685   }
686 
687   std::vector<flatbuffers::Offset<data::BufferDescriptorMapValue>>
688       buffer_refs_fb;
689   for (auto& value : args.object_refs_) {
690     const auto* buffer_desc =
691         dynamic_cast<const BufferDescriptor*>(value.second.get());
692     if (!buffer_desc) continue;
693     auto desc_fb = Encode(*buffer_desc, builder);
694     auto key_fb = builder->CreateString(value.first);
695     data::BufferDescriptorMapValueBuilder buf_map_builder(*builder);
696     buf_map_builder.add_key(key_fb);
697     buf_map_builder.add_value(desc_fb);
698     buffer_refs_fb.push_back(buf_map_builder.Finish());
699   }
700   std::vector<flatbuffers::Offset<data::Texture2DDescriptorMapValue>>
701       texture2d_refs_fb;
702   for (auto& value : args.object_refs_) {
703     const auto* texture_desc =
704         dynamic_cast<const Texture2DDescriptor*>(value.second.get());
705     if (!texture_desc) continue;
706     auto desc_fb = Encode(*texture_desc, builder);
707     auto key_fb = builder->CreateString(value.first);
708     data::Texture2DDescriptorMapValueBuilder tex_map_builder(*builder);
709     tex_map_builder.add_key(key_fb);
710     tex_map_builder.add_value(desc_fb);
711     texture2d_refs_fb.push_back(tex_map_builder.Finish());
712   }
713   std::vector<flatbuffers::Offset<data::TensorLinearDescriptorMapValue>>
714       tensor_linear_refs_fb;
715   for (auto& value : args.object_refs_) {
716     const auto* tensor_desc =
717         dynamic_cast<const TensorLinearDescriptor*>(value.second.get());
718     if (!tensor_desc) continue;
719     auto desc_fb = Encode(*tensor_desc, builder);
720     auto key_fb = builder->CreateString(value.first);
721     data::TensorLinearDescriptorMapValueBuilder ten_map_builder(*builder);
722     ten_map_builder.add_key(key_fb);
723     ten_map_builder.add_value(desc_fb);
724     tensor_linear_refs_fb.push_back(ten_map_builder.Finish());
725   }
726   std::vector<flatbuffers::Offset<data::TensorDescriptorMapValue>>
727       tensor_refs_fb;
728   for (auto& value : args.object_refs_) {
729     const auto* tensor_desc =
730         dynamic_cast<const TensorDescriptor*>(value.second.get());
731     if (!tensor_desc) continue;
732     auto desc_fb = Encode(*tensor_desc, builder);
733     auto key_fb = builder->CreateString(value.first);
734     data::TensorDescriptorMapValueBuilder ten_map_builder(*builder);
735     ten_map_builder.add_key(key_fb);
736     ten_map_builder.add_value(desc_fb);
737     tensor_refs_fb.push_back(ten_map_builder.Finish());
738   }
739 
740   auto int_values_fb_vec = builder->CreateVector(int_values_fb);
741   auto float_values_fb_vec = builder->CreateVector(float_values_fb);
742   auto half_values_fb_vec = builder->CreateVector(half_values_fb);
743   auto buffer_objs_fb_vec = builder->CreateVector(buffer_objs_fb);
744   auto texture2d_objs_fb_vec = builder->CreateVector(texture2d_objs_fb);
745   auto tensor_linear_objs_fb_vec = builder->CreateVector(tensor_linear_objs_fb);
746   auto tensor_objs_fb_vec = builder->CreateVector(tensor_objs_fb);
747   auto buffer_refs_fb_vec = builder->CreateVector(buffer_refs_fb);
748   auto texture2d_refs_fb_vec = builder->CreateVector(texture2d_refs_fb);
749   auto tensor_linear_refs_fb_vec = builder->CreateVector(tensor_linear_refs_fb);
750   auto tensor_refs_fb_vec = builder->CreateVector(tensor_refs_fb);
751   data::ArgumentsBuilder arguments_builder(*builder);
752   arguments_builder.add_int_values(int_values_fb_vec);
753   arguments_builder.add_float_values(float_values_fb_vec);
754   arguments_builder.add_half_values(half_values_fb_vec);
755   arguments_builder.add_buffer_objects(buffer_objs_fb_vec);
756   arguments_builder.add_texture2d_objects(texture2d_objs_fb_vec);
757   arguments_builder.add_tensor_linear_objects(tensor_linear_objs_fb_vec);
758   arguments_builder.add_tensor_objects(tensor_objs_fb_vec);
759   arguments_builder.add_buffer_refs(buffer_refs_fb_vec);
760   arguments_builder.add_texture2d_refs(texture2d_refs_fb_vec);
761   arguments_builder.add_tensor_linear_refs(tensor_linear_refs_fb_vec);
762   arguments_builder.add_tensor_refs(tensor_refs_fb_vec);
763   return arguments_builder.Finish();
764 }
765 
Encode(const OperationDef & def,flatbuffers::FlatBufferBuilder * builder)766 flatbuffers::Offset<data::OperationDef> Encode(
767     const OperationDef& def, flatbuffers::FlatBufferBuilder* builder) {
768   std::vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>
769       src_tensors_fb;
770   for (auto& desc : def.src_tensors) {
771     auto desc_fb = Encode(desc, builder);
772     src_tensors_fb.push_back(desc_fb);
773   }
774 
775   std::vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>
776       dst_tensors_fb;
777   for (auto& desc : def.dst_tensors) {
778     auto desc_fb = Encode(desc, builder);
779     dst_tensors_fb.push_back(desc_fb);
780   }
781 
782   auto src_tensors_fb_vec = builder->CreateVector(src_tensors_fb);
783   auto dst_tensors_fb_vec = builder->CreateVector(dst_tensors_fb);
784 
785   data::OperationDefBuilder def_builder(*builder);
786   def_builder.add_precision(ToFB(def.precision));
787   def_builder.add_src_tensors(src_tensors_fb_vec);
788   def_builder.add_dst_tensors(dst_tensors_fb_vec);
789   return def_builder.Finish();
790 }
791 
Decode(const data::OperationDef * fb_def,OperationDef * def)792 void Decode(const data::OperationDef* fb_def, OperationDef* def) {
793   for (auto src_fb : *fb_def->src_tensors()) {
794     TensorDescriptor desc;
795     Decode(src_fb, &desc);
796     def->src_tensors.push_back(std::move(desc));
797   }
798   for (auto dst_fb : *fb_def->dst_tensors()) {
799     TensorDescriptor desc;
800     Decode(dst_fb, &desc);
801     def->dst_tensors.push_back(std::move(desc));
802   }
803   def->precision = ToEnum(fb_def->precision());
804 }
805 
Decode(const data::GPUOperation * fb_op,GPUOperation * op)806 absl::Status Decode(const data::GPUOperation* fb_op, GPUOperation* op) {
807   RETURN_IF_ERROR(Decode(fb_op->arguments(), &op->args_));
808   op->code_ = std::string(fb_op->code()->c_str(), fb_op->code()->size());
809   op->work_group_size_.x = fb_op->work_group_size()->x();
810   op->work_group_size_.y = fb_op->work_group_size()->y();
811   op->work_group_size_.z = fb_op->work_group_size()->z();
812   for (auto option_fb : *fb_op->compiler_options()) {
813     op->compiler_options_.push_back(ToEnum(option_fb->option()));
814   }
815   op->tensor_to_grid_ = ToEnum(fb_op->tensor_to_grid());
816   op->elementwise_ = fb_op->elementwise();
817   op->linkable_ = fb_op->linkable();
818   op->check_src_channels_size_ = fb_op->check_src_channels_size();
819   Decode(fb_op->definition(), &op->definition_);
820   op->grid_dimension_ = fb_op->grid_dimension();
821   op->work_group_launch_order_.x = fb_op->work_group_launch_order()->x();
822   op->work_group_launch_order_.y = fb_op->work_group_launch_order()->y();
823   op->work_group_launch_order_.z = fb_op->work_group_launch_order()->z();
824   op->grid_size_.x = fb_op->grid_size()->x();
825   op->grid_size_.y = fb_op->grid_size()->y();
826   op->grid_size_.z = fb_op->grid_size()->z();
827   for (auto name_fb : *fb_op->src_tensors_names()) {
828     std::string name(name_fb->c_str(), name_fb->size());
829     op->src_tensors_names_.push_back(std::move(name));
830   }
831   for (auto name_fb : *fb_op->dst_tensors_names()) {
832     std::string name(name_fb->c_str(), name_fb->size());
833     op->dst_tensors_names_.push_back(std::move(name));
834   }
835   op->work_groups_count_.x = fb_op->work_groups_count()->x();
836   op->work_groups_count_.y = fb_op->work_groups_count()->y();
837   op->work_groups_count_.z = fb_op->work_groups_count()->z();
838   op->linkable_count_ = fb_op->linkable_count();
839   op->elementwise_code_ = std::string(fb_op->elementwise_code()->c_str(),
840                                       fb_op->elementwise_code()->size());
841   return absl::OkStatus();
842 }
843 
Encode(const GPUOperation & op,flatbuffers::FlatBufferBuilder * builder)844 flatbuffers::Offset<data::GPUOperation> Encode(
845     const GPUOperation& op, flatbuffers::FlatBufferBuilder* builder) {
846   auto args_fb = Encode(op.args_, builder);
847   auto code_fb = builder->CreateString(op.code_);
848   auto work_group_size_fb = Encode(op.work_group_size_, builder);
849   std::vector<flatbuffers::Offset<data::CompilerOption>> compiler_options_fb;
850   for (int i = 0; i < op.compiler_options_.size(); ++i) {
851     data::CompilerOptionBuilder option_builder(*builder);
852     option_builder.add_option(ToFB(op.compiler_options_[i]));
853     compiler_options_fb.push_back(option_builder.Finish());
854   }
855   auto compiler_options_fb_vec = builder->CreateVector(compiler_options_fb);
856 
857   auto def_fb = Encode(op.definition_, builder);
858   auto work_group_launch_order_fb =
859       Encode(op.work_group_launch_order_, builder);
860   auto grid_size_fb = Encode(op.grid_size_, builder);
861   auto work_groups_count_fb = Encode(op.work_groups_count_, builder);
862 
863   std::vector<flatbuffers::Offset<flatbuffers::String>> src_names_fb;
864   for (auto& name : op.src_tensors_names_) {
865     src_names_fb.push_back(builder->CreateString(name));
866   }
867   auto src_names_fb_vec = builder->CreateVector(src_names_fb);
868 
869   std::vector<flatbuffers::Offset<flatbuffers::String>> dst_names_fb;
870   for (auto& name : op.dst_tensors_names_) {
871     dst_names_fb.push_back(builder->CreateString(name));
872   }
873   auto dst_names_fb_vec = builder->CreateVector(dst_names_fb);
874 
875   auto elementwise_code_fb = builder->CreateString(op.elementwise_code_);
876 
877   data::GPUOperationBuilder op_builder(*builder);
878   op_builder.add_arguments(args_fb);
879   op_builder.add_code(code_fb);
880   op_builder.add_work_group_size(work_group_size_fb);
881   op_builder.add_compiler_options(compiler_options_fb_vec);
882   op_builder.add_tensor_to_grid(ToFB(op.tensor_to_grid_));
883   op_builder.add_elementwise(op.elementwise_);
884   op_builder.add_linkable(op.linkable_);
885   op_builder.add_check_src_channels_size(op.check_src_channels_size_);
886   op_builder.add_definition(def_fb);
887   op_builder.add_grid_dimension(op.grid_dimension_);
888   op_builder.add_work_group_launch_order(work_group_launch_order_fb);
889   op_builder.add_grid_size(grid_size_fb);
890   op_builder.add_src_tensors_names(src_names_fb_vec);
891   op_builder.add_dst_tensors_names(dst_names_fb_vec);
892   op_builder.add_work_groups_count(work_groups_count_fb);
893   op_builder.add_linkable_count(op.linkable_count_);
894   op_builder.add_elementwise_code(elementwise_code_fb);
895   return op_builder.Finish();
896 }
897 
898 namespace cl {
899 
Encode(const TensorDescriptor & desc,const ValueId & id,flatbuffers::FlatBufferBuilder * builder)900 flatbuffers::Offset<data::TensorDescWithId> Encode(
901     const TensorDescriptor& desc, const ValueId& id,
902     flatbuffers::FlatBufferBuilder* builder) {
903   auto desc_fb = Encode(desc, builder);
904   data::TensorDescWithIdBuilder desc_builder(*builder);
905   desc_builder.add_desc(desc_fb);
906   desc_builder.add_id(id);
907   return desc_builder.Finish();
908 }
909 
Decode(const data::TensorDescWithId * fb_desc,TensorDescriptor * desc,ValueId * id)910 void Decode(const data::TensorDescWithId* fb_desc, TensorDescriptor* desc,
911             ValueId* id) {
912   Decode(fb_desc->desc(), desc);
913   *id = fb_desc->id();
914 }
915 
Encode(const CLNode & node,flatbuffers::FlatBufferBuilder * builder)916 flatbuffers::Offset<data::CLNode> Encode(
917     const CLNode& node, flatbuffers::FlatBufferBuilder* builder) {
918   auto op_fb = Encode(node.cl_operation.GetGpuOperation(), builder);
919   std::vector<int32_t> in_ids(node.inputs.size());
920   for (int i = 0; i < in_ids.size(); ++i) {
921     in_ids[i] = node.inputs[i];
922   }
923   std::vector<int32_t> out_ids(node.outputs.size());
924   for (int i = 0; i < out_ids.size(); ++i) {
925     out_ids[i] = node.outputs[i];
926   }
927   auto in_ids_fb = builder->CreateVector(in_ids);
928   auto out_ids_fb = builder->CreateVector(out_ids);
929   auto name_fb = builder->CreateString(node.name);
930   data::CLNodeBuilder node_builder(*builder);
931   node_builder.add_gpu_op(op_fb);
932   node_builder.add_input_ids(in_ids_fb);
933   node_builder.add_output_ids(out_ids_fb);
934   node_builder.add_name(name_fb);
935   return node_builder.Finish();
936 }
937 
Decode(const data::CLNode * fb_node,CLNode * node)938 absl::Status Decode(const data::CLNode* fb_node, CLNode* node) {
939   GPUOperation op;
940   RETURN_IF_ERROR(Decode(fb_node->gpu_op(), &op));
941   node->cl_operation.Init(absl::make_unique<GPUOperation>(std::move(op)));
942   for (auto in_fb : *fb_node->input_ids()) {
943     node->inputs.push_back(in_fb);
944   }
945   for (auto out_fb : *fb_node->output_ids()) {
946     node->outputs.push_back(out_fb);
947   }
948   node->name = std::string(fb_node->name()->c_str(), fb_node->name()->size());
949 
950   return absl::OkStatus();
951 }
952 
Encode(const InferenceContext & inference,flatbuffers::FlatBufferBuilder * builder)953 flatbuffers::Offset<data::InferenceContext> Encode(
954     const InferenceContext& inference,
955     flatbuffers::FlatBufferBuilder* builder) {
956   std::vector<int32_t> in_ids(inference.input_ids_.size());
957   for (int i = 0; i < in_ids.size(); ++i) {
958     in_ids[i] = inference.input_ids_[i];
959   }
960   std::vector<int32_t> out_ids(inference.output_ids_.size());
961   for (int i = 0; i < out_ids.size(); ++i) {
962     out_ids[i] = inference.output_ids_[i];
963   }
964   auto in_ids_fb = builder->CreateVector(in_ids);
965   auto out_ids_fb = builder->CreateVector(out_ids);
966 
967   auto in_refs_fb = builder->CreateVector(inference.in_refs_);
968   auto out_refs_fb = builder->CreateVector(inference.out_refs_);
969 
970   std::vector<flatbuffers::Offset<data::CLNode>> nodes_fb;
971   for (int i = 0; i < inference.nodes_.size(); ++i) {
972     auto node_fb = Encode(inference.nodes_[i], builder);
973     nodes_fb.push_back(node_fb);
974   }
975   auto nodes_fb_vec = builder->CreateVector(nodes_fb);
976 
977   std::vector<flatbuffers::Offset<data::TensorDescWithId>> tensors_fb;
978   auto tensors = inference.tensor_reserver_.GetTensorDescs();
979   for (const auto& tensor : tensors) {
980     auto tensor_fb = Encode(tensor.second, tensor.first, builder);
981     tensors_fb.push_back(tensor_fb);
982   }
983   auto tensors_fb_vec = builder->CreateVector(tensors_fb);
984 
985   std::vector<flatbuffers::Offset<data::TensorDescWithId>> const_tensors_fb;
986   for (const auto& tensor : inference.const_tensors_descs_) {
987     auto tensor_fb = Encode(tensor.second, tensor.first, builder);
988     const_tensors_fb.push_back(tensor_fb);
989   }
990   auto const_tensors_fb_vec = builder->CreateVector(const_tensors_fb);
991 
992   std::vector<flatbuffers::Offset<data::PairOfValueIds>>
993       variable_ids_and_refs_fb;
994   for (auto& pair : inference.variable_ids_and_refs_) {
995     data::PairOfValueIdsBuilder pair_builder(*builder);
996     pair_builder.add_first(pair.first);
997     pair_builder.add_second(pair.second);
998     variable_ids_and_refs_fb.push_back(pair_builder.Finish());
999   }
1000   auto variable_ids_and_refs_fb_vec =
1001       builder->CreateVector(variable_ids_and_refs_fb);
1002 
1003   data::InferenceContextBuilder inf_builder(*builder);
1004   inf_builder.add_need_flush(inference.need_flush_);
1005   inf_builder.add_flush_periodically(inference.flush_periodically_);
1006   inf_builder.add_flush_period(inference.flush_period_);
1007   inf_builder.add_need_manual_release(inference.need_manual_release_);
1008   inf_builder.add_precision(ToFB(inference.precision_));
1009   inf_builder.add_storage_type(tflite::gpu::ToFB(inference.storage_type_));
1010   inf_builder.add_nodes(nodes_fb_vec);
1011   inf_builder.add_tensors(tensors_fb_vec);
1012   inf_builder.add_const_tensors(const_tensors_fb_vec);
1013   inf_builder.add_input_ids(in_ids_fb);
1014   inf_builder.add_output_ids(out_ids_fb);
1015   inf_builder.add_variable_ids_and_refs(variable_ids_and_refs_fb_vec);
1016   inf_builder.add_input_refs(in_refs_fb);
1017   inf_builder.add_output_refs(out_refs_fb);
1018   return inf_builder.Finish();
1019 }
1020 
Decode(const data::InferenceContext * fb_inference,InferenceContext * inference)1021 absl::Status Decode(const data::InferenceContext* fb_inference,
1022                     InferenceContext* inference) {
1023   inference->need_flush_ = fb_inference->need_flush();
1024   inference->flush_periodically_ = fb_inference->flush_periodically();
1025   inference->flush_period_ = fb_inference->flush_period();
1026   inference->need_manual_release_ = fb_inference->need_manual_release();
1027   inference->precision_ = ToEnum(fb_inference->precision());
1028   inference->storage_type_ = tflite::gpu::ToEnum(fb_inference->storage_type());
1029 
1030   inference->nodes_.resize(fb_inference->nodes()->size());
1031   int counter = 0;
1032   for (auto node_fb : *fb_inference->nodes()) {
1033     RETURN_IF_ERROR(Decode(node_fb, &inference->nodes_[counter]));
1034     counter++;
1035   }
1036 
1037   std::vector<std::pair<ValueId, TensorDescriptor>> tensors;
1038   for (const auto& tensor_fb : *fb_inference->tensors()) {
1039     TensorDescriptor desc;
1040     Decode(tensor_fb->desc(), &desc);
1041     tensors.push_back({tensor_fb->id(), std::move(desc)});
1042   }
1043   inference->tensor_reserver_.Add(tensors);
1044   for (const auto& tensor_fb : *fb_inference->const_tensors()) {
1045     TensorDescriptor desc;
1046     Decode(tensor_fb->desc(), &desc);
1047     inference->const_tensors_descs_[tensor_fb->id()] = std::move(desc);
1048   }
1049   for (auto in_fb : *fb_inference->input_ids()) {
1050     inference->input_ids_.push_back(in_fb);
1051   }
1052   for (auto out_fb : *fb_inference->output_ids()) {
1053     inference->output_ids_.push_back(out_fb);
1054   }
1055 
1056   for (auto variable_id : *fb_inference->variable_ids_and_refs()) {
1057     inference->variable_ids_and_refs_[variable_id->first()] =
1058         variable_id->second();
1059   }
1060 
1061   for (auto in_fb : *fb_inference->input_refs()) {
1062     inference->in_refs_.push_back(in_fb);
1063   }
1064   for (auto out_fb : *fb_inference->output_refs()) {
1065     inference->out_refs_.push_back(out_fb);
1066   }
1067   return absl::OkStatus();
1068 }
1069 
1070 }  // namespace cl
1071 }  // namespace gpu
1072 }  // namespace tflite
1073