/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/interpreter.h" #include #include #include #include #include #include #include #include "ruy/denormal.h" // from @ruy #include "tensorflow/lite/allocation.h" #include "tensorflow/lite/core/api/error_reporter.h" #include "tensorflow/lite/core/api/profiler.h" #include "tensorflow/lite/core/subgraph.h" #include "tensorflow/lite/external_cpu_backend_context.h" #include "tensorflow/lite/minimal_logging.h" #include "tensorflow/lite/stderr_reporter.h" #include "tensorflow/lite/util.h" namespace tflite { TfLiteStatus Interpreter::SetCustomAllocationForTensor( int tensor_index, const TfLiteCustomAllocation& allocation, int64_t flags) { return primary_subgraph().SetCustomAllocationForTensor(tensor_index, allocation, flags); } TfLiteStatus Interpreter::ReleaseNonPersistentMemory() { // TODO(b/138790287): We could do this for all subgraphs whose tensors have // been allocated. However, AllocateTensors() relies on Control Flow ops to // allocate tensors on 'children' subgraphs. Revisit this if required. return primary_subgraph().ReleaseNonPersistentMemory(); } TfLiteStatus Interpreter::ResetVariableTensors() { for (auto& subgraph : subgraphs_) { TF_LITE_ENSURE_STATUS(subgraph->ResetVariableTensors()); } return kTfLiteOk; } void Interpreter::SetAllowFp16PrecisionForFp32(bool allow) { for (auto& subgraph : subgraphs_) { subgraph->context()->allow_fp32_relax_to_fp16 = allow; } } // TODO(b/121264966): Subgraphs added after cancellation is set will not get the // cancellation function added to their context. void Interpreter::SetCancellationFunction(void* data, bool (*check_cancelled_func)(void*)) { for (auto& subgraph : subgraphs_) { subgraph->SetCancellationFunction(data, check_cancelled_func); } } bool Interpreter::IsCancelled() { return primary_subgraph().IsCancelled(); } TfLiteStatus Interpreter::ModifyGraphWithDelegate(TfLiteDelegate* delegate) { TfLiteStatus status = kTfLiteOk; for (auto& subgraph : subgraphs_) { if (IsValidationSubgraph(subgraph->GetName().c_str())) { continue; } status = subgraph->ModifyGraphWithDelegate(delegate); if (status != kTfLiteOk) { break; } } // Delegate-specific errors can be recovered from by restoring Interpreter to // its original state. if (status == kTfLiteDelegateError) { TF_LITE_ENSURE_STATUS(RemoveAllDelegates()); } return status; } TfLiteStatus Interpreter::RemoveAllDelegates() { for (auto& subgraph : subgraphs_) { TF_LITE_ENSURE_STATUS(subgraph->RemoveAllDelegates()); } return kTfLiteOk; } bool Interpreter::HasDelegates() { return primary_subgraph().HasDelegates(); } TfLiteStatus Interpreter::SetBufferHandle(int tensor_index, TfLiteBufferHandle buffer_handle, TfLiteDelegate* delegate) { TF_LITE_ENSURE(context_, tensor_index < tensors_size()); TfLiteTensor* tensor = primary_subgraph().tensor(tensor_index); TF_LITE_ENSURE(context_, tensor->delegate == nullptr || tensor->delegate == delegate); tensor->delegate = delegate; if (tensor->buffer_handle != kTfLiteNullBufferHandle) { TF_LITE_ENSURE(context_, tensor->delegate->FreeBufferHandle != nullptr); tensor->delegate->FreeBufferHandle(context_, tensor->delegate, &tensor->buffer_handle); } tensor->buffer_handle = buffer_handle; return kTfLiteOk; } TfLiteStatus Interpreter::GetBufferHandle(int tensor_index, TfLiteBufferHandle* buffer_handle, TfLiteDelegate** delegate) { TF_LITE_ENSURE(context_, tensor_index < tensors_size()); TfLiteTensor* tensor = primary_subgraph().tensor(tensor_index); *delegate = tensor->delegate; *buffer_handle = tensor->buffer_handle; return kTfLiteOk; } void Interpreter::SetProfiler(Profiler* profiler) { // Release resources occupied by owned_profiler_ which is replaced by // caller-owned profiler. owned_profiler_.reset(nullptr); installed_profiler_ = profiler; SetSubgraphProfiler(); } void Interpreter::SetProfiler(std::unique_ptr profiler) { owned_profiler_ = std::move(profiler); installed_profiler_ = owned_profiler_.get(); SetSubgraphProfiler(); } void Interpreter::SetSubgraphProfiler() { for (int subgraph_index = 0; subgraph_index < subgraphs_.size(); ++subgraph_index) { subgraphs_[subgraph_index]->SetProfiler(installed_profiler_, subgraph_index); } } Profiler* Interpreter::GetProfiler() { return primary_subgraph().GetProfiler(); } TfLiteStatus Interpreter::PreserveAllTensorsExperimental() { for (int subgraph_index = 0; subgraph_index < subgraphs_.size(); ++subgraph_index) { TF_LITE_ENSURE_STATUS( subgraphs_[subgraph_index]->PreserveAllTensorsExperimental()); } return kTfLiteOk; } } // namespace tflite