Home
last modified time | relevance | path

Searched refs:allow_fp32_relax_to_fp16 (Results 1 – 12 of 12) sorted by relevance

/external/tensorflow/tensorflow/lite/delegates/nnapi/
Dnnapi_delegate_errno_test.cc57 bool allow_fp32_relax_to_fp16 = false) in FloatAddOpModel() argument
59 Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16); in FloatAddOpModel()
76 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
83 allow_fp32_relax_to_fp16); in Init()
Dnnapi_delegate_device_selection_test.cc60 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
68 allow_fp32_relax_to_fp16); in Init()
375 bool allow_fp32_relax_to_fp16 = false) in AddSubOpsAcceleratedModel() argument
382 allow_fp32_relax_to_fp16); in AddSubOpsAcceleratedModel()
402 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
415 allow_fp32_relax_to_fp16); in Init()
Dnnapi_delegate_test.cc138 bool allow_fp32_relax_to_fp16 = false) { in FloatAddOpModel() argument
139 Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16); in FloatAddOpModel()
146 bool allow_fp32_relax_to_fp16 = false) in FloatAddOpModel() argument
148 Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16); in FloatAddOpModel()
165 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
172 allow_fp32_relax_to_fp16); in Init()
Dnnapi_delegate.cc3791 nn_model_.get(), context->allow_fp32_relax_to_fp16), in BuildGraph()
/external/tensorflow/tensorflow/lite/kernels/
Dtest_util.cc148 bool allow_fp32_relax_to_fp16, in BuildInterpreter() argument
188 interpreter_->SetAllowFp16PrecisionForFp32(allow_fp32_relax_to_fp16); in BuildInterpreter()
224 bool allow_fp32_relax_to_fp16, in BuildInterpreter() argument
226 BuildInterpreter(input_shapes, /*num_threads=*/-1, allow_fp32_relax_to_fp16, in BuildInterpreter()
Dtest_util.h298 int num_threads, bool allow_fp32_relax_to_fp16,
305 bool allow_fp32_relax_to_fp16, bool apply_delegate);
/external/tensorflow/tensorflow/lite/
Dinterpreter.h348 return context_->allow_fp32_relax_to_fp16; in GetAllowFp16PrecisionForFp32()
Dinterpreter.cc284 subgraph->context()->allow_fp32_relax_to_fp16 = allow; in SetAllowFp16PrecisionForFp32()
/external/tensorflow/tensorflow/lite/c/
Dcommon.h548 bool allow_fp32_relax_to_fp16; member
/external/tensorflow/tensorflow/lite/tools/benchmark/experimental/c/
Dc_api_types.h548 bool allow_fp32_relax_to_fp16; member
/external/tensorflow/tensorflow/lite/core/
Dsubgraph.h251 return context_.allow_fp32_relax_to_fp16; in GetAllowFp16PrecisionForFp32()
Dsubgraph.cc195 context_.allow_fp32_relax_to_fp16 = false; in Subgraph()