Home
last modified time | relevance | path

Searched refs:fb_inference (Results 1 – 3 of 3) sorted by relevance

/external/tensorflow/tensorflow/lite/delegates/gpu/cl/
Dserialization.cc1021 absl::Status Decode(const data::InferenceContext* fb_inference, in Decode() argument
1023 inference->need_flush_ = fb_inference->need_flush(); in Decode()
1024 inference->flush_periodically_ = fb_inference->flush_periodically(); in Decode()
1025 inference->flush_period_ = fb_inference->flush_period(); in Decode()
1026 inference->need_manual_release_ = fb_inference->need_manual_release(); in Decode()
1027 inference->precision_ = ToEnum(fb_inference->precision()); in Decode()
1028 inference->storage_type_ = tflite::gpu::ToEnum(fb_inference->storage_type()); in Decode()
1030 inference->nodes_.resize(fb_inference->nodes()->size()); in Decode()
1032 for (auto node_fb : *fb_inference->nodes()) { in Decode()
1038 for (const auto& tensor_fb : *fb_inference->tensors()) { in Decode()
[all …]
Dserialization.h35 const data::InferenceContext* fb_inference,
Dinference_context.h110 friend absl::Status Decode(const data::InferenceContext* fb_inference,