• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/gpu/cl/serialization.h"
17 
18 #include <cstdint>
19 #include <set>
20 #include <string>
21 #include <utility>
22 
23 #include "tensorflow/lite/delegates/gpu/cl/gpu_object.h"
24 #include "tensorflow/lite/delegates/gpu/cl/inference_context.h"
25 #include "tensorflow/lite/delegates/gpu/cl/serialization_generated.h"
26 #include "tensorflow/lite/delegates/gpu/common/model.h"
27 #include "tensorflow/lite/delegates/gpu/common/precision.h"
28 #include "tensorflow/lite/delegates/gpu/common/task/arguments.h"
29 #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h"
30 #include "tensorflow/lite/delegates/gpu/common/task/gpu_object_desc.h"
31 #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
32 #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
33 #include "tensorflow/lite/delegates/gpu/common/task/tensor_linear_desc.h"
34 #include "tensorflow/lite/delegates/gpu/common/task/texture2d_desc.h"
35 
36 namespace tflite {
37 namespace gpu {
38 
39 namespace {
ToFB(AccessType type)40 data::AccessType ToFB(AccessType type) {
41   switch (type) {
42     case AccessType::READ:
43       return data::AccessType::READ;
44     case AccessType::WRITE:
45       return data::AccessType::WRITE;
46     case AccessType::READ_WRITE:
47       return data::AccessType::READ_WRITE;
48     default:
49       return data::AccessType::READ_WRITE;
50   }
51 }
52 
ToFB(DataType type)53 data::DataType ToFB(DataType type) {
54   switch (type) {
55     case DataType::FLOAT16:
56       return data::DataType::FLOAT16;
57     case DataType::FLOAT32:
58       return data::DataType::FLOAT32;
59     case DataType::FLOAT64:
60       return data::DataType::FLOAT64;
61     case DataType::UINT8:
62       return data::DataType::UINT8;
63     case DataType::INT8:
64       return data::DataType::INT8;
65     case DataType::UINT16:
66       return data::DataType::UINT16;
67     case DataType::INT16:
68       return data::DataType::INT16;
69     case DataType::UINT32:
70       return data::DataType::UINT32;
71     case DataType::INT32:
72       return data::DataType::INT32;
73     case DataType::UINT64:
74       return data::DataType::UINT64;
75     case DataType::INT64:
76       return data::DataType::INT64;
77     case DataType::UNKNOWN:
78       return data::DataType::UNKNOWN;
79   }
80 }
81 
ToFB(MemoryType type)82 data::MemoryType ToFB(MemoryType type) {
83   switch (type) {
84     case MemoryType::CONSTANT:
85       return data::MemoryType::CONSTANT;
86     case MemoryType::GLOBAL:
87       return data::MemoryType::GLOBAL;
88     case MemoryType::LOCAL:
89       return data::MemoryType::LOCAL;
90   }
91 }
92 
ToFB(LinearStorageType type)93 data::LinearStorageType ToFB(LinearStorageType type) {
94   switch (type) {
95     case LinearStorageType::BUFFER:
96       return data::LinearStorageType::BUFFER;
97     case LinearStorageType::TEXTURE_2D:
98       return data::LinearStorageType::TEXTURE_2D;
99   }
100 }
101 
ToFB(TensorStorageType type)102 data::TensorStorageType ToFB(TensorStorageType type) {
103   switch (type) {
104     case TensorStorageType::BUFFER:
105       return data::TensorStorageType::BUFFER;
106     case TensorStorageType::IMAGE_BUFFER:
107       return data::TensorStorageType::IMAGE_BUFFER;
108     case TensorStorageType::TEXTURE_2D:
109       return data::TensorStorageType::TEXTURE_2D;
110     case TensorStorageType::TEXTURE_ARRAY:
111       return data::TensorStorageType::TEXTURE_ARRAY;
112     case TensorStorageType::TEXTURE_3D:
113       return data::TensorStorageType::TEXTURE_3D;
114     case TensorStorageType::SINGLE_TEXTURE_2D:
115       return data::TensorStorageType::SINGLE_TEXTURE_2D;
116     case TensorStorageType::UNKNOWN:
117       return data::TensorStorageType::UNKNOWN;
118   }
119 }
120 
ToFB(Layout type)121 data::Layout ToFB(Layout type) {
122   switch (type) {
123     case Layout::HWC:
124       return data::Layout::HWC;
125     case Layout::BHWC:
126       return data::Layout::BHWC;
127     case Layout::HWDC:
128       return data::Layout::HWDC;
129     case Layout::BHWDC:
130       return data::Layout::BHWDC;
131     default:
132       return data::Layout::UNKNOWN;
133   }
134 }
135 
ToEnum(data::DataType type)136 DataType ToEnum(data::DataType type) {
137   switch (type) {
138     case data::DataType::FLOAT16:
139       return DataType::FLOAT16;
140     case data::DataType::FLOAT32:
141       return DataType::FLOAT32;
142     case data::DataType::FLOAT64:
143       return DataType::FLOAT64;
144     case data::DataType::UINT8:
145       return DataType::UINT8;
146     case data::DataType::INT8:
147       return DataType::INT8;
148     case data::DataType::UINT16:
149       return DataType::UINT16;
150     case data::DataType::INT16:
151       return DataType::INT16;
152     case data::DataType::UINT32:
153       return DataType::UINT32;
154     case data::DataType::INT32:
155       return DataType::INT32;
156     case data::DataType::UINT64:
157       return DataType::UINT64;
158     case data::DataType::INT64:
159       return DataType::INT64;
160     case data::DataType::UNKNOWN:
161       return DataType::UNKNOWN;
162   }
163 }
164 
ToEnum(data::AccessType type)165 AccessType ToEnum(data::AccessType type) {
166   switch (type) {
167     case data::AccessType::READ:
168       return AccessType::READ;
169     case data::AccessType::WRITE:
170       return AccessType::WRITE;
171     case data::AccessType::READ_WRITE:
172       return AccessType::READ_WRITE;
173   }
174 }
175 
ToEnum(data::MemoryType type)176 MemoryType ToEnum(data::MemoryType type) {
177   switch (type) {
178     case data::MemoryType::CONSTANT:
179       return MemoryType::CONSTANT;
180     case data::MemoryType::GLOBAL:
181       return MemoryType::GLOBAL;
182     case data::MemoryType::LOCAL:
183       return MemoryType::LOCAL;
184   }
185 }
186 
ToEnum(data::LinearStorageType type)187 LinearStorageType ToEnum(data::LinearStorageType type) {
188   switch (type) {
189     case data::LinearStorageType::BUFFER:
190       return LinearStorageType::BUFFER;
191     case data::LinearStorageType::TEXTURE_2D:
192       return LinearStorageType::TEXTURE_2D;
193   }
194 }
195 
ToEnum(data::TensorStorageType type)196 TensorStorageType ToEnum(data::TensorStorageType type) {
197   switch (type) {
198     case data::TensorStorageType::BUFFER:
199       return TensorStorageType::BUFFER;
200     case data::TensorStorageType::IMAGE_BUFFER:
201       return TensorStorageType::IMAGE_BUFFER;
202     case data::TensorStorageType::TEXTURE_2D:
203       return TensorStorageType::TEXTURE_2D;
204     case data::TensorStorageType::TEXTURE_ARRAY:
205       return TensorStorageType::TEXTURE_ARRAY;
206     case data::TensorStorageType::TEXTURE_3D:
207       return TensorStorageType::TEXTURE_3D;
208     case data::TensorStorageType::SINGLE_TEXTURE_2D:
209       return TensorStorageType::SINGLE_TEXTURE_2D;
210     case data::TensorStorageType::UNKNOWN:
211       return TensorStorageType::UNKNOWN;
212   }
213 }
214 
ToEnum(data::Layout type)215 Layout ToEnum(data::Layout type) {
216   switch (type) {
217     case data::Layout::HWC:
218       return Layout::HWC;
219     case data::Layout::BHWC:
220       return Layout::BHWC;
221     case data::Layout::HWDC:
222       return Layout::HWDC;
223     case data::Layout::BHWDC:
224       return Layout::BHWDC;
225     default:
226       return Layout::UNKNOWN;
227   }
228 }
229 
ToFB(CalculationsPrecision type)230 data::CalculationsPrecision ToFB(CalculationsPrecision type) {
231   switch (type) {
232     case CalculationsPrecision::F32:
233       return data::CalculationsPrecision::F32;
234     case CalculationsPrecision::F32_F16:
235       return data::CalculationsPrecision::F32_F16;
236     case CalculationsPrecision::F16:
237       return data::CalculationsPrecision::F16;
238   }
239 }
240 
ToFB(TensorToGrid type)241 data::TensorToGrid ToFB(TensorToGrid type) {
242   switch (type) {
243     case TensorToGrid::kCustom:
244       return data::TensorToGrid::CUSTOM;
245     case TensorToGrid::kWBToX_HDToY_SToZ:
246       return data::TensorToGrid::WB_TO_X_HD_TO_Y_S_TO_Z;
247     case TensorToGrid::kWBToX_HDToY_ZIs1:
248       return data::TensorToGrid::WB_TO_X_HD_TO_Y_Z_IS_1;
249     case TensorToGrid::kWBToX_HToY_DToZ:
250       return data::TensorToGrid::WB_TO_X_H_TO_Y_D_TO_Z;
251     case TensorToGrid::kBToX_YIs1_ZIs1:
252       return data::TensorToGrid::B_TO_X_Y_IS_1_Z_IS_1;
253   }
254 }
255 
ToFB(CompilerOptions type)256 data::CompilerOptions ToFB(CompilerOptions type) {
257   switch (type) {
258     case CompilerOptions::kAdrenoFullSimd:
259       return data::CompilerOptions::ADRENO_FULL_SIMD_LINE;
260     case CompilerOptions::kAdrenoMoreWaves:
261       return data::CompilerOptions::ADRENO_MORE_WAVES;
262     case CompilerOptions::kClFastRelaxedMath:
263       return data::CompilerOptions::CL_FAST_RELAXED_MATH;
264     case CompilerOptions::kClDisableOptimizations:
265       return data::CompilerOptions::CL_OPT_DISABLE;
266     case CompilerOptions::kCl20:
267       return data::CompilerOptions::CL_2_0;
268     case CompilerOptions::kCl30:
269       return data::CompilerOptions::CL_3_0;
270   }
271 }
272 
ToEnum(data::CalculationsPrecision type)273 CalculationsPrecision ToEnum(data::CalculationsPrecision type) {
274   switch (type) {
275     case data::CalculationsPrecision::F32:
276       return CalculationsPrecision::F32;
277     case data::CalculationsPrecision::F32_F16:
278       return CalculationsPrecision::F32_F16;
279     case data::CalculationsPrecision::F16:
280       return CalculationsPrecision::F16;
281   }
282 }
283 
ToEnum(data::TensorToGrid type)284 TensorToGrid ToEnum(data::TensorToGrid type) {
285   switch (type) {
286     case data::TensorToGrid::CUSTOM:
287       return TensorToGrid::kCustom;
288     case data::TensorToGrid::WB_TO_X_HD_TO_Y_S_TO_Z:
289       return TensorToGrid::kWBToX_HDToY_SToZ;
290     case data::TensorToGrid::WB_TO_X_HD_TO_Y_Z_IS_1:
291       return TensorToGrid::kWBToX_HDToY_ZIs1;
292     case data::TensorToGrid::WB_TO_X_H_TO_Y_D_TO_Z:
293       return TensorToGrid::kWBToX_HToY_DToZ;
294     case data::TensorToGrid::B_TO_X_Y_IS_1_Z_IS_1:
295       return TensorToGrid::kBToX_YIs1_ZIs1;
296   }
297 }
298 
ToEnum(data::CompilerOptions type)299 CompilerOptions ToEnum(data::CompilerOptions type) {
300   switch (type) {
301     case data::CompilerOptions::ADRENO_FULL_SIMD_LINE:
302       return CompilerOptions::kAdrenoFullSimd;
303     case data::CompilerOptions::ADRENO_MORE_WAVES:
304       return CompilerOptions::kAdrenoMoreWaves;
305     case data::CompilerOptions::CL_FAST_RELAXED_MATH:
306       return CompilerOptions::kClFastRelaxedMath;
307     case data::CompilerOptions::CL_OPT_DISABLE:
308       return CompilerOptions::kClDisableOptimizations;
309     case data::CompilerOptions::CL_2_0:
310       return CompilerOptions::kCl20;
311     case data::CompilerOptions::CL_3_0:
312       return CompilerOptions::kCl30;
313   }
314 }
315 
316 }  // namespace
317 
Encode(const int2 & v,flatbuffers::FlatBufferBuilder * builder)318 flatbuffers::Offset<data::Int2> Encode(
319     const int2& v, flatbuffers::FlatBufferBuilder* builder) {
320   data::Int2Builder int2_builder(*builder);
321   int2_builder.add_x(v.x);
322   int2_builder.add_y(v.y);
323   return int2_builder.Finish();
324 }
325 
Encode(const int3 & v,flatbuffers::FlatBufferBuilder * builder)326 flatbuffers::Offset<data::Int3> Encode(
327     const int3& v, flatbuffers::FlatBufferBuilder* builder) {
328   data::Int3Builder int3_builder(*builder);
329   int3_builder.add_x(v.x);
330   int3_builder.add_y(v.y);
331   int3_builder.add_z(v.z);
332   return int3_builder.Finish();
333 }
334 
Encode(const GPUObjectDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)335 flatbuffers::Offset<data::GPUObjectDescriptor> Encode(
336     const GPUObjectDescriptor& desc, flatbuffers::FlatBufferBuilder* builder) {
337   std::vector<flatbuffers::Offset<data::StateVariable>> state_vars_fb;
338   for (auto& v0 : desc.state_vars_) {
339     auto key_fb = builder->CreateString(v0.first);
340     auto value_fb = builder->CreateString(v0.second);
341     data::StateVariableBuilder state_builder(*builder);
342     state_builder.add_key(key_fb);
343     state_builder.add_value(value_fb);
344     state_vars_fb.push_back(state_builder.Finish());
345   }
346   auto state_vars_fb_vec = builder->CreateVector(state_vars_fb);
347   data::GPUObjectDescriptorBuilder obj_builder(*builder);
348   obj_builder.add_state_vars(state_vars_fb_vec);
349   obj_builder.add_access_type(ToFB(desc.access_type_));
350   return obj_builder.Finish();
351 }
352 
Decode(const data::GPUObjectDescriptor * fb_obj,GPUObjectDescriptor * obj)353 void Decode(const data::GPUObjectDescriptor* fb_obj, GPUObjectDescriptor* obj) {
354   obj->access_type_ = ToEnum(fb_obj->access_type());
355   for (auto state_fb : *fb_obj->state_vars()) {
356     std::string key(state_fb->key()->c_str(), state_fb->key()->size());
357     std::string value(state_fb->value()->c_str(), state_fb->value()->size());
358     obj->state_vars_[key] = value;
359   }
360 }
361 
Encode(const BufferDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)362 flatbuffers::Offset<data::BufferDescriptor> Encode(
363     const BufferDescriptor& desc, flatbuffers::FlatBufferBuilder* builder) {
364   auto obj_fb =
365       Encode(*static_cast<const GPUObjectDescriptor*>(&desc), builder);
366 
367   std::vector<flatbuffers::Offset<flatbuffers::String>> attributes_fb;
368   for (auto& attr : desc.attributes) {
369     attributes_fb.push_back(builder->CreateString(attr));
370   }
371   auto attributes_fb_vec = builder->CreateVector(attributes_fb);
372   auto data_fb = builder->CreateVector(desc.data);
373   data::BufferDescriptorBuilder buf_builder(*builder);
374   buf_builder.add_base_obj(obj_fb);
375   buf_builder.add_element_type(ToFB(desc.element_type));
376   buf_builder.add_element_size(desc.element_size);
377   buf_builder.add_memory_type(ToFB(desc.memory_type));
378   buf_builder.add_attributes(attributes_fb_vec);
379   buf_builder.add_size(desc.size);
380   buf_builder.add_data(data_fb);
381   return buf_builder.Finish();
382 }
383 
Decode(const data::BufferDescriptor * fb_desc,BufferDescriptor * desc)384 void Decode(const data::BufferDescriptor* fb_desc, BufferDescriptor* desc) {
385   Decode(fb_desc->base_obj(), desc);
386   desc->element_type = ToEnum(fb_desc->element_type());
387   desc->element_size = fb_desc->element_size();
388   desc->memory_type = ToEnum(fb_desc->memory_type());
389   for (auto attr_fb : *fb_desc->attributes()) {
390     std::string attr(attr_fb->c_str(), attr_fb->size());
391     desc->attributes.push_back(attr);
392   }
393   desc->size = fb_desc->size();
394   desc->data =
395       std::vector<uint8_t>(fb_desc->data()->data(),
396                            fb_desc->data()->data() + fb_desc->data()->size());
397 }
398 
Encode(const Texture2DDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)399 flatbuffers::Offset<data::Texture2DDescriptor> Encode(
400     const Texture2DDescriptor& desc, flatbuffers::FlatBufferBuilder* builder) {
401   auto obj_fb =
402       Encode(*static_cast<const GPUObjectDescriptor*>(&desc), builder);
403 
404   auto data_fb = builder->CreateVector(desc.data);
405   auto size_fb = Encode(desc.size, builder);
406   data::Texture2DDescriptorBuilder tex_builder(*builder);
407   tex_builder.add_base_obj(obj_fb);
408   tex_builder.add_element_type(ToFB(desc.element_type));
409   tex_builder.add_normalized(desc.normalized);
410   tex_builder.add_normalized_type(ToFB(desc.normalized_type));
411   tex_builder.add_size(size_fb);
412   tex_builder.add_data(data_fb);
413   return tex_builder.Finish();
414 }
415 
Decode(const data::Texture2DDescriptor * fb_desc,Texture2DDescriptor * desc)416 void Decode(const data::Texture2DDescriptor* fb_desc,
417             Texture2DDescriptor* desc) {
418   Decode(fb_desc->base_obj(), desc);
419   desc->element_type = ToEnum(fb_desc->element_type());
420   desc->normalized = fb_desc->normalized();
421   desc->normalized_type = ToEnum(fb_desc->normalized_type());
422   desc->size.x = fb_desc->size()->x();
423   desc->size.y = fb_desc->size()->y();
424   desc->data =
425       std::vector<uint8_t>(fb_desc->data()->data(),
426                            fb_desc->data()->data() + fb_desc->data()->size());
427 }
428 
Encode(const TensorLinearDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)429 flatbuffers::Offset<data::TensorLinearDescriptor> Encode(
430     const TensorLinearDescriptor& desc,
431     flatbuffers::FlatBufferBuilder* builder) {
432   auto obj_fb =
433       Encode(*static_cast<const GPUObjectDescriptor*>(&desc), builder);
434 
435   auto data_fb = builder->CreateVector(desc.data);
436   data::TensorLinearDescriptorBuilder tensor_builder(*builder);
437   tensor_builder.add_base_obj(obj_fb);
438   tensor_builder.add_element_type(ToFB(desc.element_type));
439   tensor_builder.add_storage_type(ToFB(desc.storage_type));
440   tensor_builder.add_memory_type(ToFB(desc.memory_type));
441   tensor_builder.add_size(desc.size);
442   tensor_builder.add_data(data_fb);
443   return tensor_builder.Finish();
444 }
445 
Decode(const data::TensorLinearDescriptor * fb_desc,TensorLinearDescriptor * desc)446 void Decode(const data::TensorLinearDescriptor* fb_desc,
447             TensorLinearDescriptor* desc) {
448   Decode(fb_desc->base_obj(), desc);
449   desc->element_type = ToEnum(fb_desc->element_type());
450   desc->storage_type = ToEnum(fb_desc->storage_type());
451   desc->memory_type = ToEnum(fb_desc->memory_type());
452   desc->size = fb_desc->size();
453   desc->data =
454       std::vector<uint8_t>(fb_desc->data()->data(),
455                            fb_desc->data()->data() + fb_desc->data()->size());
456 }
457 
Encode(const TensorDescriptor & desc,flatbuffers::FlatBufferBuilder * builder)458 flatbuffers::Offset<data::TensorDescriptor> Encode(
459     const TensorDescriptor& desc, flatbuffers::FlatBufferBuilder* builder) {
460   auto obj_fb =
461       Encode(*static_cast<const GPUObjectDescriptor*>(&desc), builder);
462 
463   data::BHWDCBuilder shape_builder(*builder);
464   shape_builder.add_b(desc.shape.b);
465   shape_builder.add_h(desc.shape.h);
466   shape_builder.add_w(desc.shape.w);
467   shape_builder.add_d(desc.shape.d);
468   shape_builder.add_c(desc.shape.c);
469   auto shape_fb = shape_builder.Finish();
470 
471   auto data_fb = builder->CreateVector(desc.data);
472   data::TensorDescriptorBuilder tensor_builder(*builder);
473   tensor_builder.add_base_obj(obj_fb);
474   tensor_builder.add_data_type(ToFB(desc.data_type));
475   tensor_builder.add_storage_type(ToFB(desc.storage_type));
476   tensor_builder.add_layout(ToFB(desc.layout));
477   tensor_builder.add_shape(shape_fb);
478   tensor_builder.add_data(data_fb);
479   return tensor_builder.Finish();
480 }
481 
Decode(const data::TensorDescriptor * fb_desc,TensorDescriptor * desc)482 void Decode(const data::TensorDescriptor* fb_desc, TensorDescriptor* desc) {
483   Decode(fb_desc->base_obj(), desc);
484   desc->data_type = ToEnum(fb_desc->data_type());
485   desc->storage_type = ToEnum(fb_desc->storage_type());
486   desc->layout = ToEnum(fb_desc->layout());
487   desc->shape.b = fb_desc->shape()->b();
488   desc->shape.h = fb_desc->shape()->h();
489   desc->shape.w = fb_desc->shape()->w();
490   desc->shape.d = fb_desc->shape()->d();
491   desc->shape.c = fb_desc->shape()->c();
492   desc->data =
493       std::vector<uint8_t>(fb_desc->data()->data(),
494                            fb_desc->data()->data() + fb_desc->data()->size());
495 }
496 
Decode(const data::Arguments * fb_args,Arguments * args)497 absl::Status Decode(const data::Arguments* fb_args, Arguments* args) {
498   args->int_values_.clear();
499   for (auto int_values_fb : *fb_args->int_values()) {
500     Arguments::IntValue value;
501     value.value = int_values_fb->value();
502     value.active = int_values_fb->active();
503     std::string name(int_values_fb->name()->c_str(),
504                      int_values_fb->name()->size());
505     args->int_values_[name] = value;
506   }
507 
508   args->float_values_.clear();
509   for (auto float_values_fb : *fb_args->float_values()) {
510     Arguments::FloatValue value;
511     value.value = float_values_fb->value();
512     value.active = float_values_fb->active();
513     std::string name(float_values_fb->name()->c_str(),
514                      float_values_fb->name()->size());
515     args->float_values_[name] = value;
516   }
517 
518   args->half_values_.clear();
519   for (auto half_values_fb : *fb_args->half_values()) {
520     Arguments::HalfValue value;
521     value.value = half_values_fb->value();
522     value.active = half_values_fb->active();
523     std::string name(half_values_fb->name()->c_str(),
524                      half_values_fb->name()->size());
525     args->half_values_[name] = value;
526   }
527 
528   for (auto buffer_pair_fb : *fb_args->buffer_objects()) {
529     std::string key(buffer_pair_fb->key()->c_str(),
530                     buffer_pair_fb->key()->size());
531     BufferDescriptor desc;
532     Decode(buffer_pair_fb->value(), &desc);
533     args->AddObject(key, absl::make_unique<BufferDescriptor>(std::move(desc)));
534   }
535 
536   for (auto texture_pair_fb : *fb_args->texture2d_objects()) {
537     std::string key(texture_pair_fb->key()->c_str(),
538                     texture_pair_fb->key()->size());
539     Texture2DDescriptor desc;
540     Decode(texture_pair_fb->value(), &desc);
541     args->AddObject(key,
542                     absl::make_unique<Texture2DDescriptor>(std::move(desc)));
543   }
544 
545   for (auto tensor_pair_fb : *fb_args->tensor_linear_objects()) {
546     std::string key(tensor_pair_fb->key()->c_str(),
547                     tensor_pair_fb->key()->size());
548     TensorLinearDescriptor desc;
549     Decode(tensor_pair_fb->value(), &desc);
550     args->AddObject(key,
551                     absl::make_unique<TensorLinearDescriptor>(std::move(desc)));
552   }
553 
554   for (auto tensor_pair_fb : *fb_args->tensor_objects()) {
555     std::string key(tensor_pair_fb->key()->c_str(),
556                     tensor_pair_fb->key()->size());
557     TensorDescriptor desc;
558     Decode(tensor_pair_fb->value(), &desc);
559     args->AddObject(key, absl::make_unique<TensorDescriptor>(std::move(desc)));
560   }
561 
562   for (auto buffer_pair_fb : *fb_args->buffer_refs()) {
563     std::string key(buffer_pair_fb->key()->c_str(),
564                     buffer_pair_fb->key()->size());
565     BufferDescriptor desc;
566     Decode(buffer_pair_fb->value(), &desc);
567     auto access_type = desc.GetAccess();
568     args->AddObjectRef(key, access_type,
569                        absl::make_unique<BufferDescriptor>(std::move(desc)));
570   }
571 
572   for (auto texture_pair_fb : *fb_args->texture2d_refs()) {
573     std::string key(texture_pair_fb->key()->c_str(),
574                     texture_pair_fb->key()->size());
575     Texture2DDescriptor desc;
576     Decode(texture_pair_fb->value(), &desc);
577     auto access_type = desc.GetAccess();
578     args->AddObjectRef(key, access_type,
579                        absl::make_unique<Texture2DDescriptor>(std::move(desc)));
580   }
581 
582   for (auto tensor_pair_fb : *fb_args->tensor_linear_refs()) {
583     std::string key(tensor_pair_fb->key()->c_str(),
584                     tensor_pair_fb->key()->size());
585     TensorLinearDescriptor desc;
586     Decode(tensor_pair_fb->value(), &desc);
587     auto access_type = desc.GetAccess();
588     args->AddObjectRef(
589         key, access_type,
590         absl::make_unique<TensorLinearDescriptor>(std::move(desc)));
591   }
592 
593   for (auto tensor_pair_fb : *fb_args->tensor_refs()) {
594     std::string key(tensor_pair_fb->key()->c_str(),
595                     tensor_pair_fb->key()->size());
596     TensorDescriptor desc;
597     Decode(tensor_pair_fb->value(), &desc);
598     auto access_type = desc.GetAccess();
599     args->AddObjectRef(key, access_type,
600                        absl::make_unique<TensorDescriptor>(std::move(desc)));
601   }
602   return absl::OkStatus();
603 }
604 
Encode(const Arguments & args,flatbuffers::FlatBufferBuilder * builder)605 flatbuffers::Offset<data::Arguments> Encode(
606     const Arguments& args, flatbuffers::FlatBufferBuilder* builder) {
607   std::vector<flatbuffers::Offset<data::IntValue>> int_values_fb;
608   for (auto& value : args.int_values_) {
609     auto name_fb = builder->CreateString(value.first);
610     data::IntValueBuilder value_builder(*builder);
611     value_builder.add_name(name_fb);
612     value_builder.add_value(value.second.value);
613     value_builder.add_active(value.second.active);
614     int_values_fb.push_back(value_builder.Finish());
615   }
616 
617   std::vector<flatbuffers::Offset<data::FloatValue>> float_values_fb;
618   for (auto& value : args.float_values_) {
619     auto name_fb = builder->CreateString(value.first);
620     data::FloatValueBuilder value_builder(*builder);
621     value_builder.add_name(name_fb);
622     value_builder.add_value(value.second.value);
623     value_builder.add_active(value.second.active);
624     float_values_fb.push_back(value_builder.Finish());
625   }
626 
627   std::vector<flatbuffers::Offset<data::HalfValue>> half_values_fb;
628   for (auto& value : args.half_values_) {
629     auto name_fb = builder->CreateString(value.first);
630     data::HalfValueBuilder value_builder(*builder);
631     value_builder.add_name(name_fb);
632     value_builder.add_value(value.second.value);
633     value_builder.add_active(value.second.active);
634     half_values_fb.push_back(value_builder.Finish());
635   }
636 
637   std::vector<flatbuffers::Offset<data::BufferDescriptorMapValue>>
638       buffer_objs_fb;
639   for (auto& value : args.objects_) {
640     const auto* buffer_desc =
641         dynamic_cast<const BufferDescriptor*>(value.second.get());
642     if (!buffer_desc) continue;
643     auto desc_fb = Encode(*buffer_desc, builder);
644     auto key_fb = builder->CreateString(value.first);
645     data::BufferDescriptorMapValueBuilder buf_map_builder(*builder);
646     buf_map_builder.add_key(key_fb);
647     buf_map_builder.add_value(desc_fb);
648     buffer_objs_fb.push_back(buf_map_builder.Finish());
649   }
650   std::vector<flatbuffers::Offset<data::Texture2DDescriptorMapValue>>
651       texture2d_objs_fb;
652   for (auto& value : args.objects_) {
653     const auto* texture_desc =
654         dynamic_cast<const Texture2DDescriptor*>(value.second.get());
655     if (!texture_desc) continue;
656     auto desc_fb = Encode(*texture_desc, builder);
657     auto key_fb = builder->CreateString(value.first);
658     data::Texture2DDescriptorMapValueBuilder tex_map_builder(*builder);
659     tex_map_builder.add_key(key_fb);
660     tex_map_builder.add_value(desc_fb);
661     texture2d_objs_fb.push_back(tex_map_builder.Finish());
662   }
663   std::vector<flatbuffers::Offset<data::TensorLinearDescriptorMapValue>>
664       tensor_linear_objs_fb;
665   for (auto& value : args.objects_) {
666     const auto* tensor_desc =
667         dynamic_cast<const TensorLinearDescriptor*>(value.second.get());
668     if (!tensor_desc) continue;
669     auto desc_fb = Encode(*tensor_desc, builder);
670     auto key_fb = builder->CreateString(value.first);
671     data::TensorLinearDescriptorMapValueBuilder ten_map_builder(*builder);
672     ten_map_builder.add_key(key_fb);
673     ten_map_builder.add_value(desc_fb);
674     tensor_linear_objs_fb.push_back(ten_map_builder.Finish());
675   }
676   std::vector<flatbuffers::Offset<data::TensorDescriptorMapValue>>
677       tensor_objs_fb;
678   for (auto& value : args.objects_) {
679     const auto* tensor_desc =
680         dynamic_cast<const TensorDescriptor*>(value.second.get());
681     if (!tensor_desc) continue;
682     auto desc_fb = Encode(*tensor_desc, builder);
683     auto key_fb = builder->CreateString(value.first);
684     data::TensorDescriptorMapValueBuilder ten_map_builder(*builder);
685     ten_map_builder.add_key(key_fb);
686     ten_map_builder.add_value(desc_fb);
687     tensor_objs_fb.push_back(ten_map_builder.Finish());
688   }
689 
690   std::vector<flatbuffers::Offset<data::BufferDescriptorMapValue>>
691       buffer_refs_fb;
692   for (auto& value : args.object_refs_) {
693     const auto* buffer_desc =
694         dynamic_cast<const BufferDescriptor*>(value.second.get());
695     if (!buffer_desc) continue;
696     auto desc_fb = Encode(*buffer_desc, builder);
697     auto key_fb = builder->CreateString(value.first);
698     data::BufferDescriptorMapValueBuilder buf_map_builder(*builder);
699     buf_map_builder.add_key(key_fb);
700     buf_map_builder.add_value(desc_fb);
701     buffer_refs_fb.push_back(buf_map_builder.Finish());
702   }
703   std::vector<flatbuffers::Offset<data::Texture2DDescriptorMapValue>>
704       texture2d_refs_fb;
705   for (auto& value : args.object_refs_) {
706     const auto* texture_desc =
707         dynamic_cast<const Texture2DDescriptor*>(value.second.get());
708     if (!texture_desc) continue;
709     auto desc_fb = Encode(*texture_desc, builder);
710     auto key_fb = builder->CreateString(value.first);
711     data::Texture2DDescriptorMapValueBuilder tex_map_builder(*builder);
712     tex_map_builder.add_key(key_fb);
713     tex_map_builder.add_value(desc_fb);
714     texture2d_refs_fb.push_back(tex_map_builder.Finish());
715   }
716   std::vector<flatbuffers::Offset<data::TensorLinearDescriptorMapValue>>
717       tensor_linear_refs_fb;
718   for (auto& value : args.object_refs_) {
719     const auto* tensor_desc =
720         dynamic_cast<const TensorLinearDescriptor*>(value.second.get());
721     if (!tensor_desc) continue;
722     auto desc_fb = Encode(*tensor_desc, builder);
723     auto key_fb = builder->CreateString(value.first);
724     data::TensorLinearDescriptorMapValueBuilder ten_map_builder(*builder);
725     ten_map_builder.add_key(key_fb);
726     ten_map_builder.add_value(desc_fb);
727     tensor_linear_refs_fb.push_back(ten_map_builder.Finish());
728   }
729   std::vector<flatbuffers::Offset<data::TensorDescriptorMapValue>>
730       tensor_refs_fb;
731   for (auto& value : args.object_refs_) {
732     const auto* tensor_desc =
733         dynamic_cast<const TensorDescriptor*>(value.second.get());
734     if (!tensor_desc) continue;
735     auto desc_fb = Encode(*tensor_desc, builder);
736     auto key_fb = builder->CreateString(value.first);
737     data::TensorDescriptorMapValueBuilder ten_map_builder(*builder);
738     ten_map_builder.add_key(key_fb);
739     ten_map_builder.add_value(desc_fb);
740     tensor_refs_fb.push_back(ten_map_builder.Finish());
741   }
742 
743   auto int_values_fb_vec = builder->CreateVector(int_values_fb);
744   auto float_values_fb_vec = builder->CreateVector(float_values_fb);
745   auto half_values_fb_vec = builder->CreateVector(half_values_fb);
746   auto buffer_objs_fb_vec = builder->CreateVector(buffer_objs_fb);
747   auto texture2d_objs_fb_vec = builder->CreateVector(texture2d_objs_fb);
748   auto tensor_linear_objs_fb_vec = builder->CreateVector(tensor_linear_objs_fb);
749   auto tensor_objs_fb_vec = builder->CreateVector(tensor_objs_fb);
750   auto buffer_refs_fb_vec = builder->CreateVector(buffer_refs_fb);
751   auto texture2d_refs_fb_vec = builder->CreateVector(texture2d_refs_fb);
752   auto tensor_linear_refs_fb_vec = builder->CreateVector(tensor_linear_refs_fb);
753   auto tensor_refs_fb_vec = builder->CreateVector(tensor_refs_fb);
754   data::ArgumentsBuilder arguments_builder(*builder);
755   arguments_builder.add_int_values(int_values_fb_vec);
756   arguments_builder.add_float_values(float_values_fb_vec);
757   arguments_builder.add_half_values(half_values_fb_vec);
758   arguments_builder.add_buffer_objects(buffer_objs_fb_vec);
759   arguments_builder.add_texture2d_objects(texture2d_objs_fb_vec);
760   arguments_builder.add_tensor_linear_objects(tensor_linear_objs_fb_vec);
761   arguments_builder.add_tensor_objects(tensor_objs_fb_vec);
762   arguments_builder.add_buffer_refs(buffer_refs_fb_vec);
763   arguments_builder.add_texture2d_refs(texture2d_refs_fb_vec);
764   arguments_builder.add_tensor_linear_refs(tensor_linear_refs_fb_vec);
765   arguments_builder.add_tensor_refs(tensor_refs_fb_vec);
766   return arguments_builder.Finish();
767 }
768 
Encode(const OperationDef & def,flatbuffers::FlatBufferBuilder * builder)769 flatbuffers::Offset<data::OperationDef> Encode(
770     const OperationDef& def, flatbuffers::FlatBufferBuilder* builder) {
771   std::vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>
772       src_tensors_fb;
773   for (auto& desc : def.src_tensors) {
774     auto desc_fb = Encode(desc, builder);
775     src_tensors_fb.push_back(desc_fb);
776   }
777 
778   std::vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>
779       dst_tensors_fb;
780   for (auto& desc : def.dst_tensors) {
781     auto desc_fb = Encode(desc, builder);
782     dst_tensors_fb.push_back(desc_fb);
783   }
784 
785   auto src_tensors_fb_vec = builder->CreateVector(src_tensors_fb);
786   auto dst_tensors_fb_vec = builder->CreateVector(dst_tensors_fb);
787 
788   data::OperationDefBuilder def_builder(*builder);
789   def_builder.add_precision(ToFB(def.precision));
790   def_builder.add_src_tensors(src_tensors_fb_vec);
791   def_builder.add_dst_tensors(dst_tensors_fb_vec);
792   return def_builder.Finish();
793 }
794 
Decode(const data::OperationDef * fb_def,OperationDef * def)795 void Decode(const data::OperationDef* fb_def, OperationDef* def) {
796   for (auto src_fb : *fb_def->src_tensors()) {
797     TensorDescriptor desc;
798     Decode(src_fb, &desc);
799     def->src_tensors.push_back(std::move(desc));
800   }
801   for (auto dst_fb : *fb_def->dst_tensors()) {
802     TensorDescriptor desc;
803     Decode(dst_fb, &desc);
804     def->dst_tensors.push_back(std::move(desc));
805   }
806   def->precision = ToEnum(fb_def->precision());
807 }
808 
Decode(const data::GPUOperation * fb_op,GPUOperation * op)809 absl::Status Decode(const data::GPUOperation* fb_op, GPUOperation* op) {
810   RETURN_IF_ERROR(Decode(fb_op->arguments(), &op->args_));
811   op->work_group_size_.x = fb_op->work_group_size()->x();
812   op->work_group_size_.y = fb_op->work_group_size()->y();
813   op->work_group_size_.z = fb_op->work_group_size()->z();
814   op->tensor_to_grid_ = ToEnum(fb_op->tensor_to_grid());
815   op->elementwise_ = fb_op->elementwise();
816   op->linkable_ = fb_op->linkable();
817   op->check_src_channels_size_ = fb_op->check_src_channels_size();
818   Decode(fb_op->definition(), &op->definition_);
819   op->grid_dimension_ = fb_op->grid_dimension();
820   op->work_group_launch_order_.x = fb_op->work_group_launch_order()->x();
821   op->work_group_launch_order_.y = fb_op->work_group_launch_order()->y();
822   op->work_group_launch_order_.z = fb_op->work_group_launch_order()->z();
823   op->grid_size_.x = fb_op->grid_size()->x();
824   op->grid_size_.y = fb_op->grid_size()->y();
825   op->grid_size_.z = fb_op->grid_size()->z();
826   for (auto name_fb : *fb_op->src_tensors_names()) {
827     std::string name(name_fb->c_str(), name_fb->size());
828     op->src_tensors_names_.push_back(std::move(name));
829   }
830   for (auto name_fb : *fb_op->dst_tensors_names()) {
831     std::string name(name_fb->c_str(), name_fb->size());
832     op->dst_tensors_names_.push_back(std::move(name));
833   }
834   op->work_groups_count_.x = fb_op->work_groups_count()->x();
835   op->work_groups_count_.y = fb_op->work_groups_count()->y();
836   op->work_groups_count_.z = fb_op->work_groups_count()->z();
837   op->linkable_count_ = fb_op->linkable_count();
838   return absl::OkStatus();
839 }
840 
Encode(const GPUOperation & op,flatbuffers::FlatBufferBuilder * builder)841 flatbuffers::Offset<data::GPUOperation> Encode(
842     const GPUOperation& op, flatbuffers::FlatBufferBuilder* builder) {
843   auto args_fb = Encode(op.args_, builder);
844   auto work_group_size_fb = Encode(op.work_group_size_, builder);
845 
846   auto def_fb = Encode(op.definition_, builder);
847   auto work_group_launch_order_fb =
848       Encode(op.work_group_launch_order_, builder);
849   auto grid_size_fb = Encode(op.grid_size_, builder);
850   auto work_groups_count_fb = Encode(op.work_groups_count_, builder);
851 
852   std::vector<flatbuffers::Offset<flatbuffers::String>> src_names_fb;
853   for (auto& name : op.src_tensors_names_) {
854     src_names_fb.push_back(builder->CreateString(name));
855   }
856   auto src_names_fb_vec = builder->CreateVector(src_names_fb);
857 
858   std::vector<flatbuffers::Offset<flatbuffers::String>> dst_names_fb;
859   for (auto& name : op.dst_tensors_names_) {
860     dst_names_fb.push_back(builder->CreateString(name));
861   }
862   auto dst_names_fb_vec = builder->CreateVector(dst_names_fb);
863 
864   data::GPUOperationBuilder op_builder(*builder);
865   op_builder.add_arguments(args_fb);
866   op_builder.add_work_group_size(work_group_size_fb);
867   op_builder.add_tensor_to_grid(ToFB(op.tensor_to_grid_));
868   op_builder.add_elementwise(op.elementwise_);
869   op_builder.add_linkable(op.linkable_);
870   op_builder.add_check_src_channels_size(op.check_src_channels_size_);
871   op_builder.add_definition(def_fb);
872   op_builder.add_grid_dimension(op.grid_dimension_);
873   op_builder.add_work_group_launch_order(work_group_launch_order_fb);
874   op_builder.add_grid_size(grid_size_fb);
875   op_builder.add_src_tensors_names(src_names_fb_vec);
876   op_builder.add_dst_tensors_names(dst_names_fb_vec);
877   op_builder.add_work_groups_count(work_groups_count_fb);
878   op_builder.add_linkable_count(op.linkable_count_);
879   return op_builder.Finish();
880 }
881 
882 namespace cl {
883 
Encode(const TensorDescriptor & desc,const ValueId & id,flatbuffers::FlatBufferBuilder * builder)884 flatbuffers::Offset<data::TensorDescWithId> Encode(
885     const TensorDescriptor& desc, const ValueId& id,
886     flatbuffers::FlatBufferBuilder* builder) {
887   auto desc_fb = Encode(desc, builder);
888   data::TensorDescWithIdBuilder desc_builder(*builder);
889   desc_builder.add_desc(desc_fb);
890   desc_builder.add_id(id);
891   return desc_builder.Finish();
892 }
893 
Decode(const data::TensorDescWithId * fb_desc,TensorDescriptor * desc,ValueId * id)894 void Decode(const data::TensorDescWithId* fb_desc, TensorDescriptor* desc,
895             ValueId* id) {
896   Decode(fb_desc->desc(), desc);
897   *id = fb_desc->id();
898 }
899 
Encode(const CLNode & node,flatbuffers::FlatBufferBuilder * builder)900 flatbuffers::Offset<data::CLNode> Encode(
901     const CLNode& node, flatbuffers::FlatBufferBuilder* builder) {
902   auto op_fb = Encode(node.cl_operation.GetGpuOperation(), builder);
903   std::vector<int32_t> in_ids(node.inputs.size());
904   for (int i = 0; i < in_ids.size(); ++i) {
905     in_ids[i] = node.inputs[i];
906   }
907   std::vector<int32_t> out_ids(node.outputs.size());
908   for (int i = 0; i < out_ids.size(); ++i) {
909     out_ids[i] = node.outputs[i];
910   }
911   auto in_ids_fb = builder->CreateVector(in_ids);
912   auto out_ids_fb = builder->CreateVector(out_ids);
913   auto name_fb = builder->CreateString(node.name);
914   data::CLNodeBuilder node_builder(*builder);
915   node_builder.add_gpu_op(op_fb);
916   node_builder.add_fingerprint(node.cl_operation.GetKernelFingerprint());
917   node_builder.add_input_ids(in_ids_fb);
918   node_builder.add_output_ids(out_ids_fb);
919   node_builder.add_name(name_fb);
920   return node_builder.Finish();
921 }
922 
Decode(const ProgramCache & program_cache,const data::CLNode * fb_node,CLNode * node)923 absl::Status Decode(const ProgramCache& program_cache,
924                     const data::CLNode* fb_node, CLNode* node) {
925   GPUOperation op;
926   RETURN_IF_ERROR(Decode(fb_node->gpu_op(), &op));
927   node->cl_operation.Init(absl::make_unique<GPUOperation>(std::move(op)));
928   RETURN_IF_ERROR(
929       node->cl_operation.InitFromCache(fb_node->fingerprint(), program_cache));
930   for (auto in_fb : *fb_node->input_ids()) {
931     node->inputs.push_back(in_fb);
932   }
933   for (auto out_fb : *fb_node->output_ids()) {
934     node->outputs.push_back(out_fb);
935   }
936   node->name = std::string(fb_node->name()->c_str(), fb_node->name()->size());
937 
938   return absl::OkStatus();
939 }
940 
Encode(const CLDevice & device,const InferenceContext & inference,const ProgramCache & program_cache,const std::vector<int64_t> & in_refs,std::vector<int64_t> & out_refs,flatbuffers::FlatBufferBuilder * builder)941 flatbuffers::Offset<data::InferenceContext> Encode(
942     const CLDevice& device, const InferenceContext& inference,
943     const ProgramCache& program_cache, const std::vector<int64_t>& in_refs,
944     std::vector<int64_t>& out_refs, flatbuffers::FlatBufferBuilder* builder) {
945   std::vector<int32_t> in_ids(inference.input_ids_.size());
946   for (int i = 0; i < in_ids.size(); ++i) {
947     in_ids[i] = inference.input_ids_[i];
948   }
949   std::vector<int32_t> out_ids(inference.output_ids_.size());
950   for (int i = 0; i < out_ids.size(); ++i) {
951     out_ids[i] = inference.output_ids_[i];
952   }
953   auto in_ids_fb = builder->CreateVector(in_ids);
954   auto out_ids_fb = builder->CreateVector(out_ids);
955 
956   auto in_refs_fb = builder->CreateVector(in_refs);
957   auto out_refs_fb = builder->CreateVector(out_refs);
958 
959   std::vector<flatbuffers::Offset<data::CLNode>> nodes_fb;
960   for (int i = 0; i < inference.nodes_.size(); ++i) {
961     auto node_fb = Encode(inference.nodes_[i], builder);
962     nodes_fb.push_back(node_fb);
963   }
964   auto nodes_fb_vec = builder->CreateVector(nodes_fb);
965   std::set<uint64_t> fingerprints;
966   for (const auto& node : inference.nodes_) {
967     fingerprints.insert(node.cl_operation.GetKernelFingerprint());
968   }
969   std::vector<flatbuffers::Offset<data::BinaryProgram>> binary_programs_fb;
970   for (auto fingerprint : fingerprints) {
971     std::vector<uint8_t> program_binary;
972     program_cache.GetProgramBinary(fingerprint, &program_binary).IgnoreError();
973     auto binary_fb = builder->CreateVector(program_binary);
974     data::BinaryProgramBuilder program_builder(*builder);
975     program_builder.add_fingerprint(fingerprint);
976     program_builder.add_binary(binary_fb);
977     binary_programs_fb.push_back(program_builder.Finish());
978   }
979   auto binary_programs_fb_vec = builder->CreateVector(binary_programs_fb);
980 
981   std::vector<flatbuffers::Offset<data::TensorDescWithId>> tensors_fb;
982   auto tensors = inference.tensor_reserver_.GetTensorDescs();
983   for (const auto& tensor : tensors) {
984     auto tensor_fb = Encode(tensor.second, tensor.first, builder);
985     tensors_fb.push_back(tensor_fb);
986   }
987   auto tensors_fb_vec = builder->CreateVector(tensors_fb);
988 
989   std::vector<flatbuffers::Offset<data::TensorDescWithId>> const_tensors_fb;
990   for (const auto& tensor : inference.const_tensors_descs_) {
991     auto tensor_fb = Encode(tensor.second, tensor.first, builder);
992     const_tensors_fb.push_back(tensor_fb);
993   }
994   auto const_tensors_fb_vec = builder->CreateVector(const_tensors_fb);
995 
996   std::vector<flatbuffers::Offset<data::PairOfValueIds>>
997       variable_ids_and_refs_fb;
998   for (auto& pair : inference.variable_ids_and_refs_) {
999     data::PairOfValueIdsBuilder pair_builder(*builder);
1000     pair_builder.add_first(pair.first);
1001     pair_builder.add_second(pair.second);
1002     variable_ids_and_refs_fb.push_back(pair_builder.Finish());
1003   }
1004   auto variable_ids_and_refs_fb_vec =
1005       builder->CreateVector(variable_ids_and_refs_fb);
1006   auto driver_version = builder->CreateString(device.GetPlatformVersion());
1007 
1008   data::InferenceContextBuilder inf_builder(*builder);
1009   inf_builder.add_driver_version(driver_version);
1010   inf_builder.add_binary_programs(binary_programs_fb_vec);
1011   inf_builder.add_need_flush(inference.need_flush_);
1012   inf_builder.add_flush_periodically(inference.flush_periodically_);
1013   inf_builder.add_flush_period(inference.flush_period_);
1014   inf_builder.add_need_manual_release(inference.need_manual_release_);
1015   inf_builder.add_precision(ToFB(inference.precision_));
1016   inf_builder.add_storage_type(tflite::gpu::ToFB(inference.storage_type_));
1017   inf_builder.add_nodes(nodes_fb_vec);
1018   inf_builder.add_tensors(tensors_fb_vec);
1019   inf_builder.add_const_tensors(const_tensors_fb_vec);
1020   inf_builder.add_input_ids(in_ids_fb);
1021   inf_builder.add_output_ids(out_ids_fb);
1022   inf_builder.add_variable_ids_and_refs(variable_ids_and_refs_fb_vec);
1023   inf_builder.add_input_refs(in_refs_fb);
1024   inf_builder.add_output_refs(out_refs_fb);
1025   return inf_builder.Finish();
1026 }
1027 
Decode(const CLContext & context,const CLDevice & device,ProgramCache * program_cache,const data::InferenceContext * fb_inference,InferenceContext * inference)1028 absl::Status Decode(const CLContext& context, const CLDevice& device,
1029                     ProgramCache* program_cache,
1030                     const data::InferenceContext* fb_inference,
1031                     InferenceContext* inference) {
1032   std::string platform_version(fb_inference->driver_version()->c_str(),
1033                                fb_inference->driver_version()->size());
1034   if (device.GetPlatformVersion() != platform_version) {
1035     return absl::InvalidArgumentError(
1036         "OpenCL driver changed, model respresentation invalid, must be "
1037         "regenerated.");
1038   }
1039   inference->need_flush_ = fb_inference->need_flush();
1040   inference->flush_periodically_ = fb_inference->flush_periodically();
1041   inference->flush_period_ = fb_inference->flush_period();
1042   inference->need_manual_release_ = fb_inference->need_manual_release();
1043   inference->precision_ = ToEnum(fb_inference->precision());
1044   inference->storage_type_ = tflite::gpu::ToEnum(fb_inference->storage_type());
1045 
1046   for (auto binary_program_fb : *fb_inference->binary_programs()) {
1047     RETURN_IF_ERROR(program_cache->AddProgramBinary(
1048         context, device, binary_program_fb->fingerprint(),
1049         absl::MakeSpan(binary_program_fb->binary()->data(),
1050                        binary_program_fb->binary()->size())));
1051   }
1052 
1053   inference->nodes_.resize(fb_inference->nodes()->size());
1054   int counter = 0;
1055   for (auto node_fb : *fb_inference->nodes()) {
1056     RETURN_IF_ERROR(
1057         Decode(*program_cache, node_fb, &inference->nodes_[counter]));
1058     counter++;
1059   }
1060 
1061   std::vector<std::pair<ValueId, TensorDescriptor>> tensors;
1062   for (const auto& tensor_fb : *fb_inference->tensors()) {
1063     TensorDescriptor desc;
1064     Decode(tensor_fb->desc(), &desc);
1065     tensors.push_back({tensor_fb->id(), std::move(desc)});
1066   }
1067   inference->tensor_reserver_.Add(tensors);
1068   for (const auto& tensor_fb : *fb_inference->const_tensors()) {
1069     TensorDescriptor desc;
1070     Decode(tensor_fb->desc(), &desc);
1071     inference->const_tensors_descs_[tensor_fb->id()] = std::move(desc);
1072   }
1073   for (auto in_fb : *fb_inference->input_ids()) {
1074     inference->input_ids_.push_back(in_fb);
1075   }
1076   for (auto out_fb : *fb_inference->output_ids()) {
1077     inference->output_ids_.push_back(out_fb);
1078   }
1079 
1080   for (auto variable_id : *fb_inference->variable_ids_and_refs()) {
1081     inference->variable_ids_and_refs_[variable_id->first()] =
1082         variable_id->second();
1083   }
1084   return absl::OkStatus();
1085 }
1086 
GetInOutRefs(const absl::Span<const uint8_t> serialized_model,std::vector<int64_t> * in_refs,std::vector<int64_t> * out_refs)1087 absl::Status GetInOutRefs(const absl::Span<const uint8_t> serialized_model,
1088                           std::vector<int64_t>* in_refs,
1089                           std::vector<int64_t>* out_refs) {
1090   flatbuffers::Verifier verifier(serialized_model.data(),
1091                                  serialized_model.size());
1092   if (!data::VerifyInferenceContextBuffer(verifier)) {
1093     return absl::DataLossError("Deserialization failed.");
1094   }
1095   auto fb_inference = data::GetInferenceContext(serialized_model.data());
1096   if (in_refs) {
1097     in_refs->clear();
1098     for (auto in_fb : *fb_inference->input_refs()) {
1099       in_refs->push_back(in_fb);
1100     }
1101   }
1102   if (out_refs) {
1103     out_refs->clear();
1104     for (auto out_fb : *fb_inference->output_refs()) {
1105       out_refs->push_back(out_fb);
1106     }
1107   }
1108   return absl::OkStatus();
1109 }
1110 
1111 }  // namespace cl
1112 }  // namespace gpu
1113 }  // namespace tflite
1114