/external/tensorflow/tensorflow/lite/delegates/gpu/common/testing/ |
D | tflite_model_reader.cc | 38 const TfLiteDelegateParams* delegate_params) { in Init() argument 40 reinterpret_cast<GraphFloat32*>(delegate_params->delegate->data_); in Init() 42 ? BuildModel(context, delegate_params, denormalized_graph).ok() in Init()
|
/external/tensorflow/tensorflow/lite/delegates/coreml/ |
D | coreml_delegate_kernel.mm | 62 const TfLiteDelegateParams* delegate_params) { 65 TF_LITE_ENSURE_STATUS(BuildModel(context, delegate_params)); 117 const TfLiteDelegateParams* delegate_params) { 122 AddInputTensors(delegate_params->input_tensors, context); 124 for (int node_index : TfLiteIntArrayView(delegate_params->nodes_to_replace)) { 149 AddOutputTensors(delegate_params->output_tensors, context); 151 for (int i = 0; i < delegate_params->input_tensors->size; ++i) { 152 const int tensor_id = delegate_params->input_tensors->data[i];
|
/external/tensorflow/tensorflow/lite/delegates/gpu/ |
D | gl_delegate.cc | 132 const TfLiteDelegateParams* delegate_params) { in Prepare() argument 136 RETURN_IF_ERROR(BuildModel(context, delegate_params, &graph)); in Prepare() 171 inputs_.reserve(delegate_params->input_tensors->size); in Prepare() 172 for (int i = 0; i < delegate_params->input_tensors->size; ++i) { in Prepare() 173 const int tensor_index = delegate_params->input_tensors->data[i]; in Prepare() 213 outputs_.reserve(delegate_params->output_tensors->size); in Prepare() 214 for (int i = 0; i < delegate_params->output_tensors->size; ++i) { in Prepare() 215 const int tensor_index = delegate_params->output_tensors->data[i]; in Prepare()
|
D | delegate.cc | 122 const TfLiteDelegateParams* delegate_params) { in Prepare() argument 130 RETURN_IF_ERROR(InitializeGraph(context, delegate_params, &graph, in Prepare() 152 RETURN_IF_ERROR(InitializeGraph(context, delegate_params, &graph2, in Prepare() 258 const TfLiteDelegateParams* delegate_params, in InitializeGraph() argument 264 RETURN_IF_ERROR(BuildFinalModel(context, delegate_params, graph, in InitializeGraph() 267 RETURN_IF_ERROR(BuildFinalModel(context, delegate_params, graph)); in InitializeGraph()
|
D | metal_delegate.mm | 265 absl::Status Prepare(TfLiteContext* context, const TfLiteDelegateParams* delegate_params) { 270 RETURN_IF_ERROR(BuildFinalModel(context, delegate_params, &graph, &quant_conversion_map_)); 272 RETURN_IF_ERROR(BuildFinalModel(context, delegate_params, &graph)); 296 for (int tensor_index : TfLiteIntArrayView(delegate_params->input_tensors)) { 321 for (int tensor_index : TfLiteIntArrayView(delegate_params->output_tensors)) {
|
/external/tensorflow/tensorflow/lite/delegates/gpu/common/ |
D | model_builder.h | 48 TfLiteContext* context, const TfLiteDelegateParams* delegate_params, 62 TfLiteContext* context, const TfLiteDelegateParams* delegate_params,
|
D | model_builder_test.cc | 173 TfLiteDelegateParams* delegate_params() { return &delegate_params_.front(); } in delegate_params() function in tflite::gpu::__anona5beb0730111::DelegatedInterpreter 329 *partition_params_array = interpreter_fp16_add_op->delegate_params(); in TEST() 389 interpreter_fp16_non_constant->delegate_params(); in TEST() 562 *partition_params_array = interpreter_fp32->delegate_params(); in TEST() 738 *partition_params_array = interpreter2_fp32->delegate_params(); in TEST() 953 *partition_params_array = interpreter_mn->delegate_params(); in TEST() 1038 *partition_params_array = interpreter_mn2->delegate_params(); in TEST() 1229 *partition_params_array = interpreter_quant->delegate_params(); in TEST()
|
D | model_builder.cc | 2596 const TfLiteDelegateParams* delegate_params, in BuildModel() argument 2601 for (int i = 0; i < delegate_params->nodes_to_replace->size; ++i) { in BuildModel() 2605 context, delegate_params->nodes_to_replace->data[i], &tflite_node, in BuildModel() 2627 delegate_params->input_tensors, in BuildModel() 2630 delegate_params->output_tensors, in BuildModel() 2636 context, delegate_params->nodes_to_replace->data[tflite_nodes[i]], in BuildModel() 2668 TfLiteContext* context, const TfLiteDelegateParams* delegate_params, in BuildFinalModel() argument 2671 BuildModel(context, delegate_params, graph, quant_conversion_map)); in BuildFinalModel()
|
/external/tensorflow/tensorflow/lite/delegates/nnapi/ |
D | nnapi_delegate_disabled.cc | 62 const TfLiteDelegateParams* delegate_params, in CacheDelegateKernel() argument 66 const TfLiteDelegateParams* delegate_params) { in MaybeGetCachedDelegateKernel() argument
|
D | nnapi_delegate.h | 259 void CacheDelegateKernel(const TfLiteDelegateParams* delegate_params, 264 const TfLiteDelegateParams* delegate_params);
|
D | nnapi_delegate.cc | 4793 const TfLiteDelegateParams* delegate_params, in CacheDelegateKernel() argument 4795 const int cache_key = delegate_params->nodes_to_replace->data[0]; in CacheDelegateKernel() 4800 const TfLiteDelegateParams* delegate_params) { in MaybeGetCachedDelegateKernel() argument 4801 const int cache_key = delegate_params->nodes_to_replace->data[0]; in MaybeGetCachedDelegateKernel()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | test_util.cc | 266 const auto& delegate_params = in GetForceUseNnapi() local 270 return delegate_params.HasParam("use_nnapi") && in GetForceUseNnapi() 271 delegate_params.Get<bool>("use_nnapi"); in GetForceUseNnapi()
|
/external/tensorflow/tensorflow/lite/delegates/ |
D | utils_test.cc | 128 TfLiteDelegateParams* delegate_params() { return &delegate_params_.front(); } in delegate_params() function in tflite::delegates::__anonf41d3e3a0111::MockTfLiteContext 152 *partition_params_array = mock->delegate_params(); in MockPreviewDelegatePartitioning()
|
/external/tensorflow/tensorflow/lite/delegates/gpu/cl/ |
D | gpu_api_delegate.cc | 91 const TfLiteDelegateParams* delegate_params) { in Prepare() argument 95 RETURN_IF_ERROR(BuildModel(context, delegate_params, &graph)); in Prepare()
|